repo_name stringlengths 6 67 | path stringlengths 5 185 | copies stringlengths 1 3 | size stringlengths 4 6 | content stringlengths 1.02k 962k | license stringclasses 15 values |
|---|---|---|---|---|---|
sanketloke/scikit-learn | examples/decomposition/plot_faces_decomposition.py | 103 | 4394 | """
============================
Faces dataset decompositions
============================
This example applies to :ref:`olivetti_faces` different unsupervised
matrix decomposition (dimension reduction) methods from the module
:py:mod:`sklearn.decomposition` (see the documentation chapter
:ref:`decompositions`) .
"""
print(__doc__)
# Authors: Vlad Niculae, Alexandre Gramfort
# License: BSD 3 clause
import logging
from time import time
from numpy.random import RandomState
import matplotlib.pyplot as plt
from sklearn.datasets import fetch_olivetti_faces
from sklearn.cluster import MiniBatchKMeans
from sklearn import decomposition
# Display progress logs on stdout
logging.basicConfig(level=logging.INFO,
format='%(asctime)s %(levelname)s %(message)s')
n_row, n_col = 2, 3
n_components = n_row * n_col
image_shape = (64, 64)
rng = RandomState(0)
###############################################################################
# Load faces data
dataset = fetch_olivetti_faces(shuffle=True, random_state=rng)
faces = dataset.data
n_samples, n_features = faces.shape
# global centering
faces_centered = faces - faces.mean(axis=0)
# local centering
faces_centered -= faces_centered.mean(axis=1).reshape(n_samples, -1)
print("Dataset consists of %d faces" % n_samples)
###############################################################################
def plot_gallery(title, images, n_col=n_col, n_row=n_row):
plt.figure(figsize=(2. * n_col, 2.26 * n_row))
plt.suptitle(title, size=16)
for i, comp in enumerate(images):
plt.subplot(n_row, n_col, i + 1)
vmax = max(comp.max(), -comp.min())
plt.imshow(comp.reshape(image_shape), cmap=plt.cm.gray,
interpolation='nearest',
vmin=-vmax, vmax=vmax)
plt.xticks(())
plt.yticks(())
plt.subplots_adjust(0.01, 0.05, 0.99, 0.93, 0.04, 0.)
###############################################################################
# List of the different estimators, whether to center and transpose the
# problem, and whether the transformer uses the clustering API.
estimators = [
('Eigenfaces - RandomizedPCA',
decomposition.RandomizedPCA(n_components=n_components, whiten=True),
True),
('Non-negative components - NMF',
decomposition.NMF(n_components=n_components, init='nndsvda', tol=5e-3),
False),
('Independent components - FastICA',
decomposition.FastICA(n_components=n_components, whiten=True),
True),
('Sparse comp. - MiniBatchSparsePCA',
decomposition.MiniBatchSparsePCA(n_components=n_components, alpha=0.8,
n_iter=100, batch_size=3,
random_state=rng),
True),
('MiniBatchDictionaryLearning',
decomposition.MiniBatchDictionaryLearning(n_components=15, alpha=0.1,
n_iter=50, batch_size=3,
random_state=rng),
True),
('Cluster centers - MiniBatchKMeans',
MiniBatchKMeans(n_clusters=n_components, tol=1e-3, batch_size=20,
max_iter=50, random_state=rng),
True),
('Factor Analysis components - FA',
decomposition.FactorAnalysis(n_components=n_components, max_iter=2),
True),
]
###############################################################################
# Plot a sample of the input data
plot_gallery("First centered Olivetti faces", faces_centered[:n_components])
###############################################################################
# Do the estimation and plot it
for name, estimator, center in estimators:
print("Extracting the top %d %s..." % (n_components, name))
t0 = time()
data = faces
if center:
data = faces_centered
estimator.fit(data)
train_time = (time() - t0)
print("done in %0.3fs" % train_time)
if hasattr(estimator, 'cluster_centers_'):
components_ = estimator.cluster_centers_
else:
components_ = estimator.components_
if hasattr(estimator, 'noise_variance_'):
plot_gallery("Pixelwise variance",
estimator.noise_variance_.reshape(1, -1), n_col=1,
n_row=1)
plot_gallery('%s - Train time %.1fs' % (name, train_time),
components_[:n_components])
plt.show()
| bsd-3-clause |
devanshdalal/scikit-learn | examples/ensemble/plot_gradient_boosting_quantile.py | 392 | 2114 | """
=====================================================
Prediction Intervals for Gradient Boosting Regression
=====================================================
This example shows how quantile regression can be used
to create prediction intervals.
"""
import numpy as np
import matplotlib.pyplot as plt
from sklearn.ensemble import GradientBoostingRegressor
np.random.seed(1)
def f(x):
"""The function to predict."""
return x * np.sin(x)
#----------------------------------------------------------------------
# First the noiseless case
X = np.atleast_2d(np.random.uniform(0, 10.0, size=100)).T
X = X.astype(np.float32)
# Observations
y = f(X).ravel()
dy = 1.5 + 1.0 * np.random.random(y.shape)
noise = np.random.normal(0, dy)
y += noise
y = y.astype(np.float32)
# Mesh the input space for evaluations of the real function, the prediction and
# its MSE
xx = np.atleast_2d(np.linspace(0, 10, 1000)).T
xx = xx.astype(np.float32)
alpha = 0.95
clf = GradientBoostingRegressor(loss='quantile', alpha=alpha,
n_estimators=250, max_depth=3,
learning_rate=.1, min_samples_leaf=9,
min_samples_split=9)
clf.fit(X, y)
# Make the prediction on the meshed x-axis
y_upper = clf.predict(xx)
clf.set_params(alpha=1.0 - alpha)
clf.fit(X, y)
# Make the prediction on the meshed x-axis
y_lower = clf.predict(xx)
clf.set_params(loss='ls')
clf.fit(X, y)
# Make the prediction on the meshed x-axis
y_pred = clf.predict(xx)
# Plot the function, the prediction and the 90% confidence interval based on
# the MSE
fig = plt.figure()
plt.plot(xx, f(xx), 'g:', label=u'$f(x) = x\,\sin(x)$')
plt.plot(X, y, 'b.', markersize=10, label=u'Observations')
plt.plot(xx, y_pred, 'r-', label=u'Prediction')
plt.plot(xx, y_upper, 'k-')
plt.plot(xx, y_lower, 'k-')
plt.fill(np.concatenate([xx, xx[::-1]]),
np.concatenate([y_upper, y_lower[::-1]]),
alpha=.5, fc='b', ec='None', label='90% prediction interval')
plt.xlabel('$x$')
plt.ylabel('$f(x)$')
plt.ylim(-10, 20)
plt.legend(loc='upper left')
plt.show()
| bsd-3-clause |
wwjiang007/flink | flink-python/pyflink/table/tests/test_pandas_udaf.py | 5 | 37026 | ################################################################################
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
################################################################################
import unittest
from pyflink.table import expressions as expr
from pyflink.table.types import DataTypes
from pyflink.table.udf import udaf, udf, AggregateFunction
from pyflink.testing import source_sink_utils
from pyflink.testing.test_case_utils import PyFlinkBatchTableTestCase, \
PyFlinkStreamTableTestCase
class BatchPandasUDAFITTests(PyFlinkBatchTableTestCase):
def test_check_result_type(self):
def pandas_udaf():
pass
with self.assertRaises(
TypeError,
msg="Invalid returnType: Pandas UDAF doesn't support DataType type MAP currently"):
udaf(pandas_udaf, result_type=DataTypes.MAP(DataTypes.INT(), DataTypes.INT()),
func_type="pandas")
def test_group_aggregate_function(self):
t = self.t_env.from_elements(
[(1, 2, 3), (3, 2, 3), (2, 1, 3), (1, 5, 4), (1, 8, 6), (2, 3, 4)],
DataTypes.ROW(
[DataTypes.FIELD("a", DataTypes.TINYINT()),
DataTypes.FIELD("b", DataTypes.SMALLINT()),
DataTypes.FIELD("c", DataTypes.INT())]))
table_sink = source_sink_utils.TestAppendSink(
['a', 'b', 'c'],
[DataTypes.TINYINT(), DataTypes.FLOAT(),
DataTypes.ROW(
[DataTypes.FIELD("a", DataTypes.INT()),
DataTypes.FIELD("b", DataTypes.INT())])])
self.t_env.register_table_sink("Results", table_sink)
# general udf
add = udf(lambda a: a + 1, result_type=DataTypes.INT())
# pandas udf
substract = udf(lambda a: a - 1, result_type=DataTypes.INT(), func_type="pandas")
max_udaf = udaf(lambda a: (a.max(), a.min()),
result_type=DataTypes.ROW(
[DataTypes.FIELD("a", DataTypes.INT()),
DataTypes.FIELD("b", DataTypes.INT())]),
func_type="pandas")
t.group_by("a") \
.select(t.a, mean_udaf(add(t.b)), max_udaf(substract(t.c))) \
.execute_insert("Results") \
.wait()
actual = source_sink_utils.results()
self.assert_equals(
actual,
["+I[1, 6.0, +I[5, 2]]", "+I[2, 3.0, +I[3, 2]]", "+I[3, 3.0, +I[2, 2]]"])
def test_group_aggregate_without_keys(self):
t = self.t_env.from_elements(
[(1, 2, 3), (3, 2, 3), (2, 1, 3), (1, 5, 4), (1, 8, 6), (2, 3, 4)],
DataTypes.ROW(
[DataTypes.FIELD("a", DataTypes.TINYINT()),
DataTypes.FIELD("b", DataTypes.SMALLINT()),
DataTypes.FIELD("c", DataTypes.INT())]))
table_sink = source_sink_utils.TestAppendSink(
['a'],
[DataTypes.INT()])
min_add = udaf(lambda a, b, c: a.min() + b.min() + c.min(),
result_type=DataTypes.INT(), func_type="pandas")
self.t_env.register_table_sink("Results", table_sink)
t.select(min_add(t.a, t.b, t.c)) \
.execute_insert("Results") \
.wait()
actual = source_sink_utils.results()
self.assert_equals(actual, ["+I[5]"])
def test_group_aggregate_with_aux_group(self):
t = self.t_env.from_elements(
[(1, 2, 3), (3, 2, 3), (2, 1, 3), (1, 5, 4), (1, 8, 6), (2, 3, 4)],
DataTypes.ROW(
[DataTypes.FIELD("a", DataTypes.TINYINT()),
DataTypes.FIELD("b", DataTypes.SMALLINT()),
DataTypes.FIELD("c", DataTypes.INT())]))
table_sink = source_sink_utils.TestAppendSink(
['a', 'b', 'c', 'd'],
[DataTypes.TINYINT(), DataTypes.INT(), DataTypes.FLOAT(), DataTypes.INT()])
self.t_env.register_table_sink("Results", table_sink)
self.t_env.get_config().get_configuration().set_string('python.metric.enabled', 'true')
self.t_env.register_function("max_add", udaf(MaxAdd(),
result_type=DataTypes.INT(),
func_type="pandas"))
self.t_env.create_temporary_system_function("mean_udaf", mean_udaf)
t.group_by("a") \
.select("a, a + 1 as b, a + 2 as c") \
.group_by("a, b") \
.select("a, b, mean_udaf(b), max_add(b, c, 1)") \
.execute_insert("Results") \
.wait()
actual = source_sink_utils.results()
self.assert_equals(actual, ["+I[1, 2, 2.0, 6]", "+I[2, 3, 3.0, 8]", "+I[3, 4, 4.0, 10]"])
def test_tumble_group_window_aggregate_function(self):
import datetime
from pyflink.table.window import Tumble
t = self.t_env.from_elements(
[
(1, 2, 3, datetime.datetime(2018, 3, 11, 3, 10, 0, 0)),
(3, 2, 4, datetime.datetime(2018, 3, 11, 3, 10, 0, 0)),
(2, 1, 2, datetime.datetime(2018, 3, 11, 3, 10, 0, 0)),
(1, 3, 1, datetime.datetime(2018, 3, 11, 3, 40, 0, 0)),
(1, 8, 5, datetime.datetime(2018, 3, 11, 4, 20, 0, 0)),
(2, 3, 6, datetime.datetime(2018, 3, 11, 3, 30, 0, 0))
],
DataTypes.ROW(
[DataTypes.FIELD("a", DataTypes.TINYINT()),
DataTypes.FIELD("b", DataTypes.SMALLINT()),
DataTypes.FIELD("c", DataTypes.INT()),
DataTypes.FIELD("rowtime", DataTypes.TIMESTAMP(3))]))
table_sink = source_sink_utils.TestAppendSink(
['a', 'b', 'c'],
[
DataTypes.TIMESTAMP(3),
DataTypes.TIMESTAMP(3),
DataTypes.FLOAT()
])
self.t_env.register_table_sink("Results", table_sink)
self.t_env.create_temporary_system_function("mean_udaf", mean_udaf)
tumble_window = Tumble.over(expr.lit(1).hours) \
.on(expr.col("rowtime")) \
.alias("w")
t.window(tumble_window) \
.group_by("w") \
.select("w.start, w.end, mean_udaf(b)") \
.execute_insert("Results") \
.wait()
actual = source_sink_utils.results()
self.assert_equals(actual,
["+I[2018-03-11 03:00:00.0, 2018-03-11 04:00:00.0, 2.2]",
"+I[2018-03-11 04:00:00.0, 2018-03-11 05:00:00.0, 8.0]"])
def test_slide_group_window_aggregate_function(self):
import datetime
from pyflink.table.window import Slide
t = self.t_env.from_elements(
[
(1, 2, 3, datetime.datetime(2018, 3, 11, 3, 10, 0, 0)),
(3, 2, 4, datetime.datetime(2018, 3, 11, 3, 10, 0, 0)),
(2, 1, 2, datetime.datetime(2018, 3, 11, 3, 10, 0, 0)),
(1, 3, 1, datetime.datetime(2018, 3, 11, 3, 40, 0, 0)),
(1, 8, 5, datetime.datetime(2018, 3, 11, 4, 20, 0, 0)),
(2, 3, 6, datetime.datetime(2018, 3, 11, 3, 30, 0, 0))
],
DataTypes.ROW(
[DataTypes.FIELD("a", DataTypes.TINYINT()),
DataTypes.FIELD("b", DataTypes.SMALLINT()),
DataTypes.FIELD("c", DataTypes.INT()),
DataTypes.FIELD("rowtime", DataTypes.TIMESTAMP(3))]))
table_sink = source_sink_utils.TestAppendSink(
['a', 'b', 'c', 'd', 'e'],
[
DataTypes.TINYINT(),
DataTypes.TIMESTAMP(3),
DataTypes.TIMESTAMP(3),
DataTypes.FLOAT(),
DataTypes.INT()
])
self.t_env.register_table_sink("Results", table_sink)
self.t_env.register_function("max_add", udaf(MaxAdd(),
result_type=DataTypes.INT(),
func_type="pandas"))
self.t_env.create_temporary_system_function("mean_udaf", mean_udaf)
slide_window = Slide.over(expr.lit(1).hours) \
.every(expr.lit(30).minutes) \
.on(expr.col("rowtime")) \
.alias("w")
t.window(slide_window) \
.group_by("a, w") \
.select("a, w.start, w.end, mean_udaf(b), max_add(b, c, 1)") \
.execute_insert("Results") \
.wait()
actual = source_sink_utils.results()
self.assert_equals(actual,
["+I[1, 2018-03-11 02:30:00.0, 2018-03-11 03:30:00.0, 2.0, 6]",
"+I[1, 2018-03-11 03:00:00.0, 2018-03-11 04:00:00.0, 2.5, 7]",
"+I[1, 2018-03-11 03:30:00.0, 2018-03-11 04:30:00.0, 5.5, 14]",
"+I[1, 2018-03-11 04:00:00.0, 2018-03-11 05:00:00.0, 8.0, 14]",
"+I[2, 2018-03-11 02:30:00.0, 2018-03-11 03:30:00.0, 1.0, 4]",
"+I[2, 2018-03-11 03:00:00.0, 2018-03-11 04:00:00.0, 2.0, 10]",
"+I[2, 2018-03-11 03:30:00.0, 2018-03-11 04:30:00.0, 3.0, 10]",
"+I[3, 2018-03-11 03:00:00.0, 2018-03-11 04:00:00.0, 2.0, 7]",
"+I[3, 2018-03-11 02:30:00.0, 2018-03-11 03:30:00.0, 2.0, 7]"])
def test_over_window_aggregate_function(self):
import datetime
t = self.t_env.from_elements(
[
(1, 2, 3, datetime.datetime(2018, 3, 11, 3, 10, 0, 0)),
(3, 2, 1, datetime.datetime(2018, 3, 11, 3, 10, 0, 0)),
(2, 1, 2, datetime.datetime(2018, 3, 11, 3, 10, 0, 0)),
(1, 3, 1, datetime.datetime(2018, 3, 11, 3, 10, 0, 0)),
(1, 8, 5, datetime.datetime(2018, 3, 11, 4, 20, 0, 0)),
(2, 3, 6, datetime.datetime(2018, 3, 11, 3, 30, 0, 0))
],
DataTypes.ROW(
[DataTypes.FIELD("a", DataTypes.TINYINT()),
DataTypes.FIELD("b", DataTypes.SMALLINT()),
DataTypes.FIELD("c", DataTypes.INT()),
DataTypes.FIELD("rowtime", DataTypes.TIMESTAMP(3))]))
table_sink = source_sink_utils.TestAppendSink(
['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j'],
[DataTypes.TINYINT(), DataTypes.FLOAT(), DataTypes.INT(), DataTypes.FLOAT(),
DataTypes.FLOAT(), DataTypes.FLOAT(), DataTypes.FLOAT(), DataTypes.FLOAT(),
DataTypes.FLOAT(), DataTypes.FLOAT()])
self.t_env.register_table_sink("Results", table_sink)
self.t_env.create_temporary_system_function("mean_udaf", mean_udaf)
self.t_env.register_function("max_add", udaf(MaxAdd(),
result_type=DataTypes.INT(),
func_type="pandas"))
self.t_env.register_table("T", t)
self.t_env.execute_sql("""
insert into Results
select a,
mean_udaf(b)
over (PARTITION BY a ORDER BY rowtime
ROWS BETWEEN UNBOUNDED preceding AND UNBOUNDED FOLLOWING),
max_add(b, c)
over (PARTITION BY a ORDER BY rowtime
ROWS BETWEEN UNBOUNDED preceding AND 0 FOLLOWING),
mean_udaf(b)
over (PARTITION BY a ORDER BY rowtime
ROWS BETWEEN 1 PRECEDING AND UNBOUNDED FOLLOWING),
mean_udaf(c)
over (PARTITION BY a ORDER BY rowtime
ROWS BETWEEN 1 PRECEDING AND 0 FOLLOWING),
mean_udaf(c)
over (PARTITION BY a ORDER BY rowtime
RANGE BETWEEN UNBOUNDED PRECEDING AND UNBOUNDED FOLLOWING),
mean_udaf(b)
over (PARTITION BY a ORDER BY rowtime
RANGE BETWEEN UNBOUNDED PRECEDING AND CURRENT ROW),
mean_udaf(b)
over (PARTITION BY a ORDER BY rowtime
RANGE BETWEEN INTERVAL '20' MINUTE PRECEDING AND UNBOUNDED FOLLOWING),
mean_udaf(c)
over (PARTITION BY a ORDER BY rowtime
RANGE BETWEEN INTERVAL '20' MINUTE PRECEDING AND UNBOUNDED FOLLOWING),
mean_udaf(c)
over (PARTITION BY a ORDER BY rowtime
RANGE BETWEEN INTERVAL '20' MINUTE PRECEDING AND CURRENT ROW)
from T
""").wait()
actual = source_sink_utils.results()
self.assert_equals(actual,
["+I[1, 4.3333335, 5, 4.3333335, 3.0, 3.0, 2.5, 4.3333335, 3.0, 2.0]",
"+I[1, 4.3333335, 13, 5.5, 3.0, 3.0, 4.3333335, 8.0, 5.0, 5.0]",
"+I[1, 4.3333335, 6, 4.3333335, 2.0, 3.0, 2.5, 4.3333335, 3.0, 2.0]",
"+I[2, 2.0, 9, 2.0, 4.0, 4.0, 2.0, 2.0, 4.0, 4.0]",
"+I[2, 2.0, 3, 2.0, 2.0, 4.0, 1.0, 2.0, 4.0, 2.0]",
"+I[3, 2.0, 3, 2.0, 1.0, 1.0, 2.0, 2.0, 1.0, 1.0]"])
class StreamPandasUDAFITTests(PyFlinkStreamTableTestCase):
def test_sliding_group_window_over_time(self):
# create source file path
import tempfile
import os
tmp_dir = tempfile.gettempdir()
data = [
'1,1,2,2018-03-11 03:10:00',
'3,3,2,2018-03-11 03:10:00',
'2,2,1,2018-03-11 03:10:00',
'1,1,3,2018-03-11 03:40:00',
'1,1,8,2018-03-11 04:20:00',
'2,2,3,2018-03-11 03:30:00'
]
source_path = tmp_dir + '/test_sliding_group_window_over_time.csv'
with open(source_path, 'w') as fd:
for ele in data:
fd.write(ele + '\n')
from pyflink.table.window import Slide
self.t_env.get_config().get_configuration().set_string(
"pipeline.time-characteristic", "EventTime")
self.t_env.register_function("mean_udaf", mean_udaf)
source_table = """
create table source_table(
a TINYINT,
b SMALLINT,
c SMALLINT,
rowtime TIMESTAMP(3),
WATERMARK FOR rowtime AS rowtime - INTERVAL '60' MINUTE
) with(
'connector.type' = 'filesystem',
'format.type' = 'csv',
'connector.path' = '%s',
'format.ignore-first-line' = 'false',
'format.field-delimiter' = ','
)
""" % source_path
self.t_env.execute_sql(source_table)
t = self.t_env.from_path("source_table")
table_sink = source_sink_utils.TestAppendSink(
['a', 'b', 'c', 'd'],
[
DataTypes.TINYINT(),
DataTypes.TIMESTAMP(3),
DataTypes.TIMESTAMP(3),
DataTypes.FLOAT()])
self.t_env.register_table_sink("Results", table_sink)
t.window(Slide.over("1.hours").every("30.minutes").on("rowtime").alias("w")) \
.group_by("a, b, w") \
.select("a, w.start, w.end, mean_udaf(c) as b") \
.execute_insert("Results") \
.wait()
actual = source_sink_utils.results()
self.assert_equals(actual,
["+I[1, 2018-03-11 02:30:00.0, 2018-03-11 03:30:00.0, 2.0]",
"+I[1, 2018-03-11 03:00:00.0, 2018-03-11 04:00:00.0, 2.5]",
"+I[1, 2018-03-11 03:30:00.0, 2018-03-11 04:30:00.0, 5.5]",
"+I[1, 2018-03-11 04:00:00.0, 2018-03-11 05:00:00.0, 8.0]",
"+I[2, 2018-03-11 02:30:00.0, 2018-03-11 03:30:00.0, 1.0]",
"+I[2, 2018-03-11 03:00:00.0, 2018-03-11 04:00:00.0, 2.0]",
"+I[2, 2018-03-11 03:30:00.0, 2018-03-11 04:30:00.0, 3.0]",
"+I[3, 2018-03-11 03:00:00.0, 2018-03-11 04:00:00.0, 2.0]",
"+I[3, 2018-03-11 02:30:00.0, 2018-03-11 03:30:00.0, 2.0]"])
os.remove(source_path)
def test_sliding_group_window_over_proctime(self):
self.t_env.get_config().get_configuration().set_string("parallelism.default", "1")
from pyflink.table.window import Slide
self.t_env.register_function("mean_udaf", mean_udaf)
source_table = """
create table source_table(
a INT,
proctime as PROCTIME()
) with(
'connector' = 'datagen',
'rows-per-second' = '1',
'fields.a.kind' = 'sequence',
'fields.a.start' = '1',
'fields.a.end' = '10'
)
"""
self.t_env.execute_sql(source_table)
t = self.t_env.from_path("source_table")
iterator = t.select("a, proctime") \
.window(Slide.over("1.seconds").every("1.seconds").on("proctime").alias("w")) \
.group_by("a, w") \
.select("mean_udaf(a) as b, w.start").execute().collect()
result = [i for i in iterator]
# if the WindowAssigner.isEventTime() does not return false,
# the w.start would be 1970-01-01
# TODO: After fixing the TimeZone problem of window with processing time (will be fixed in
# FLIP-162), we should replace it with a more accurate assertion.
self.assertTrue(result[0][1].year > 1970)
def test_sliding_group_window_over_count(self):
self.t_env.get_config().get_configuration().set_string("parallelism.default", "1")
# create source file path
import tempfile
import os
tmp_dir = tempfile.gettempdir()
data = [
'1,1,2,2018-03-11 03:10:00',
'3,3,2,2018-03-11 03:10:00',
'2,2,1,2018-03-11 03:10:00',
'1,1,3,2018-03-11 03:40:00',
'1,1,8,2018-03-11 04:20:00',
'2,2,3,2018-03-11 03:30:00',
'3,3,3,2018-03-11 03:30:00'
]
source_path = tmp_dir + '/test_sliding_group_window_over_count.csv'
with open(source_path, 'w') as fd:
for ele in data:
fd.write(ele + '\n')
from pyflink.table.window import Slide
self.t_env.get_config().get_configuration().set_string(
"pipeline.time-characteristic", "ProcessingTime")
self.t_env.register_function("mean_udaf", mean_udaf)
source_table = """
create table source_table(
a TINYINT,
b SMALLINT,
c SMALLINT,
protime as PROCTIME()
) with(
'connector.type' = 'filesystem',
'format.type' = 'csv',
'connector.path' = '%s',
'format.ignore-first-line' = 'false',
'format.field-delimiter' = ','
)
""" % source_path
self.t_env.execute_sql(source_table)
t = self.t_env.from_path("source_table")
table_sink = source_sink_utils.TestAppendSink(
['a', 'd'],
[
DataTypes.TINYINT(),
DataTypes.FLOAT()])
self.t_env.register_table_sink("Results", table_sink)
t.window(Slide.over("2.rows").every("1.rows").on("protime").alias("w")) \
.group_by("a, b, w") \
.select("a, mean_udaf(c) as b") \
.execute_insert("Results") \
.wait()
actual = source_sink_utils.results()
self.assert_equals(actual, ["+I[1, 2.5]", "+I[1, 5.5]", "+I[2, 2.0]", "+I[3, 2.5]"])
os.remove(source_path)
def test_tumbling_group_window_over_time(self):
# create source file path
import tempfile
import os
tmp_dir = tempfile.gettempdir()
data = [
'1,1,2,2018-03-11 03:10:00',
'3,3,2,2018-03-11 03:10:00',
'2,2,1,2018-03-11 03:10:00',
'1,1,3,2018-03-11 03:40:00',
'1,1,8,2018-03-11 04:20:00',
'2,2,3,2018-03-11 03:30:00'
]
source_path = tmp_dir + '/test_tumbling_group_window_over_time.csv'
with open(source_path, 'w') as fd:
for ele in data:
fd.write(ele + '\n')
from pyflink.table.window import Tumble
self.t_env.get_config().get_configuration().set_string(
"pipeline.time-characteristic", "EventTime")
self.t_env.register_function("mean_udaf", mean_udaf)
source_table = """
create table source_table(
a TINYINT,
b SMALLINT,
c SMALLINT,
rowtime TIMESTAMP(3),
WATERMARK FOR rowtime AS rowtime - INTERVAL '60' MINUTE
) with(
'connector.type' = 'filesystem',
'format.type' = 'csv',
'connector.path' = '%s',
'format.ignore-first-line' = 'false',
'format.field-delimiter' = ','
)
""" % source_path
self.t_env.execute_sql(source_table)
t = self.t_env.from_path("source_table")
table_sink = source_sink_utils.TestAppendSink(
['a', 'b', 'c', 'd', 'e'],
[
DataTypes.TINYINT(),
DataTypes.TIMESTAMP(3),
DataTypes.TIMESTAMP(3),
DataTypes.TIMESTAMP(3),
DataTypes.FLOAT()])
self.t_env.register_table_sink("Results", table_sink)
t.window(Tumble.over("1.hours").on("rowtime").alias("w")) \
.group_by("a, b, w") \
.select("a, w.start, w.end, w.rowtime, mean_udaf(c) as b") \
.execute_insert("Results") \
.wait()
actual = source_sink_utils.results()
self.assert_equals(actual, [
"+I[1, 2018-03-11 03:00:00.0, 2018-03-11 04:00:00.0, 2018-03-11 03:59:59.999, 2.5]",
"+I[1, 2018-03-11 04:00:00.0, 2018-03-11 05:00:00.0, 2018-03-11 04:59:59.999, 8.0]",
"+I[2, 2018-03-11 03:00:00.0, 2018-03-11 04:00:00.0, 2018-03-11 03:59:59.999, 2.0]",
"+I[3, 2018-03-11 03:00:00.0, 2018-03-11 04:00:00.0, 2018-03-11 03:59:59.999, 2.0]",
])
os.remove(source_path)
def test_tumbling_group_window_over_count(self):
self.t_env.get_config().get_configuration().set_string("parallelism.default", "1")
# create source file path
import tempfile
import os
tmp_dir = tempfile.gettempdir()
data = [
'1,1,2,2018-03-11 03:10:00',
'3,3,2,2018-03-11 03:10:00',
'2,2,1,2018-03-11 03:10:00',
'1,1,3,2018-03-11 03:40:00',
'1,1,8,2018-03-11 04:20:00',
'2,2,3,2018-03-11 03:30:00',
'3,3,3,2018-03-11 03:30:00',
'1,1,4,2018-03-11 04:20:00',
]
source_path = tmp_dir + '/test_group_window_aggregate_function_over_count.csv'
with open(source_path, 'w') as fd:
for ele in data:
fd.write(ele + '\n')
from pyflink.table.window import Tumble
self.t_env.get_config().get_configuration().set_string(
"pipeline.time-characteristic", "ProcessingTime")
self.t_env.register_function("mean_udaf", mean_udaf)
source_table = """
create table source_table(
a TINYINT,
b SMALLINT,
c SMALLINT,
protime as PROCTIME()
) with(
'connector.type' = 'filesystem',
'format.type' = 'csv',
'connector.path' = '%s',
'format.ignore-first-line' = 'false',
'format.field-delimiter' = ','
)
""" % source_path
self.t_env.execute_sql(source_table)
t = self.t_env.from_path("source_table")
table_sink = source_sink_utils.TestAppendSink(
['a', 'd'],
[
DataTypes.TINYINT(),
DataTypes.FLOAT()])
self.t_env.register_table_sink("Results", table_sink)
t.window(Tumble.over("2.rows").on("protime").alias("w")) \
.group_by("a, b, w") \
.select("a, mean_udaf(c) as b") \
.execute_insert("Results") \
.wait()
actual = source_sink_utils.results()
self.assert_equals(actual, ["+I[1, 2.5]", "+I[1, 6.0]", "+I[2, 2.0]", "+I[3, 2.5]"])
os.remove(source_path)
def test_row_time_over_range_window_aggregate_function(self):
# create source file path
import tempfile
import os
tmp_dir = tempfile.gettempdir()
data = [
'1,1,2013-01-01 03:10:00',
'3,2,2013-01-01 03:10:00',
'2,1,2013-01-01 03:10:00',
'1,5,2013-01-01 03:10:00',
'1,8,2013-01-01 04:20:00',
'2,3,2013-01-01 03:30:00'
]
source_path = tmp_dir + '/test_over_range_window_aggregate_function.csv'
with open(source_path, 'w') as fd:
for ele in data:
fd.write(ele + '\n')
max_add_min_udaf = udaf(lambda a: a.max() + a.min(),
result_type=DataTypes.SMALLINT(),
func_type='pandas')
self.t_env.get_config().get_configuration().set_string(
"pipeline.time-characteristic", "EventTime")
self.t_env.register_function("mean_udaf", mean_udaf)
self.t_env.register_function("max_add_min_udaf", max_add_min_udaf)
source_table = """
create table source_table(
a TINYINT,
b SMALLINT,
rowtime TIMESTAMP(3),
WATERMARK FOR rowtime AS rowtime - INTERVAL '60' MINUTE
) with(
'connector.type' = 'filesystem',
'format.type' = 'csv',
'connector.path' = '%s',
'format.ignore-first-line' = 'false',
'format.field-delimiter' = ','
)
""" % source_path
self.t_env.execute_sql(source_table)
table_sink = source_sink_utils.TestAppendSink(
['a', 'b', 'c'],
[
DataTypes.TINYINT(),
DataTypes.FLOAT(),
DataTypes.SMALLINT()])
self.t_env.register_table_sink("Results", table_sink)
self.t_env.execute_sql("""
insert into Results
select a,
mean_udaf(b)
over (PARTITION BY a ORDER BY rowtime
RANGE BETWEEN INTERVAL '20' MINUTE PRECEDING AND CURRENT ROW),
max_add_min_udaf(b)
over (PARTITION BY a ORDER BY rowtime
RANGE BETWEEN INTERVAL '20' MINUTE PRECEDING AND CURRENT ROW)
from source_table
""").wait()
actual = source_sink_utils.results()
self.assert_equals(actual,
["+I[1, 3.0, 6]",
"+I[1, 3.0, 6]",
"+I[1, 8.0, 16]",
"+I[2, 1.0, 2]",
"+I[2, 2.0, 4]",
"+I[3, 2.0, 4]"])
os.remove(source_path)
def test_row_time_over_rows_window_aggregate_function(self):
# create source file path
import tempfile
import os
tmp_dir = tempfile.gettempdir()
data = [
'1,1,2013-01-01 03:10:00',
'3,2,2013-01-01 03:10:00',
'2,1,2013-01-01 03:10:00',
'1,5,2013-01-01 03:10:00',
'1,8,2013-01-01 04:20:00',
'2,3,2013-01-01 03:30:00'
]
source_path = tmp_dir + '/test_over_rows_window_aggregate_function.csv'
with open(source_path, 'w') as fd:
for ele in data:
fd.write(ele + '\n')
max_add_min_udaf = udaf(lambda a: a.max() + a.min(),
result_type=DataTypes.SMALLINT(),
func_type='pandas')
self.t_env.get_config().get_configuration().set_string(
"pipeline.time-characteristic", "EventTime")
self.t_env.register_function("mean_udaf", mean_udaf)
self.t_env.register_function("max_add_min_udaf", max_add_min_udaf)
source_table = """
create table source_table(
a TINYINT,
b SMALLINT,
rowtime TIMESTAMP(3),
WATERMARK FOR rowtime AS rowtime - INTERVAL '60' MINUTE
) with(
'connector.type' = 'filesystem',
'format.type' = 'csv',
'connector.path' = '%s',
'format.ignore-first-line' = 'false',
'format.field-delimiter' = ','
)
""" % source_path
self.t_env.execute_sql(source_table)
table_sink = source_sink_utils.TestAppendSink(
['a', 'b', 'c'],
[
DataTypes.TINYINT(),
DataTypes.FLOAT(),
DataTypes.SMALLINT()])
self.t_env.register_table_sink("Results", table_sink)
self.t_env.execute_sql("""
insert into Results
select a,
mean_udaf(b)
over (PARTITION BY a ORDER BY rowtime
ROWS BETWEEN 1 PRECEDING AND CURRENT ROW),
max_add_min_udaf(b)
over (PARTITION BY a ORDER BY rowtime
ROWS BETWEEN 1 PRECEDING AND CURRENT ROW)
from source_table
""").wait()
actual = source_sink_utils.results()
self.assert_equals(actual,
["+I[1, 1.0, 2]",
"+I[1, 3.0, 6]",
"+I[1, 6.5, 13]",
"+I[2, 1.0, 2]",
"+I[2, 2.0, 4]",
"+I[3, 2.0, 4]"])
os.remove(source_path)
def test_proc_time_over_rows_window_aggregate_function(self):
# create source file path
import tempfile
import os
tmp_dir = tempfile.gettempdir()
data = [
'1,1,2013-01-01 03:10:00',
'3,2,2013-01-01 03:10:00',
'2,1,2013-01-01 03:10:00',
'1,5,2013-01-01 03:10:00',
'1,8,2013-01-01 04:20:00',
'2,3,2013-01-01 03:30:00'
]
source_path = tmp_dir + '/test_over_rows_window_aggregate_function.csv'
with open(source_path, 'w') as fd:
for ele in data:
fd.write(ele + '\n')
max_add_min_udaf = udaf(lambda a: a.max() + a.min(),
result_type=DataTypes.SMALLINT(),
func_type='pandas')
self.t_env.get_config().get_configuration().set_string("parallelism.default", "1")
self.t_env.get_config().get_configuration().set_string(
"pipeline.time-characteristic", "ProcessingTime")
self.t_env.register_function("mean_udaf", mean_udaf)
self.t_env.register_function("max_add_min_udaf", max_add_min_udaf)
source_table = """
create table source_table(
a TINYINT,
b SMALLINT,
proctime as PROCTIME()
) with(
'connector.type' = 'filesystem',
'format.type' = 'csv',
'connector.path' = '%s',
'format.ignore-first-line' = 'false',
'format.field-delimiter' = ','
)
""" % source_path
self.t_env.execute_sql(source_table)
table_sink = source_sink_utils.TestAppendSink(
['a', 'b', 'c'],
[
DataTypes.TINYINT(),
DataTypes.FLOAT(),
DataTypes.SMALLINT()])
self.t_env.register_table_sink("Results", table_sink)
self.t_env.execute_sql("""
insert into Results
select a,
mean_udaf(b)
over (PARTITION BY a ORDER BY proctime
ROWS BETWEEN 1 PRECEDING AND CURRENT ROW),
max_add_min_udaf(b)
over (PARTITION BY a ORDER BY proctime
ROWS BETWEEN 1 PRECEDING AND CURRENT ROW)
from source_table
""").wait()
actual = source_sink_utils.results()
self.assert_equals(actual,
["+I[1, 1.0, 2]",
"+I[1, 3.0, 6]",
"+I[1, 6.5, 13]",
"+I[2, 1.0, 2]",
"+I[2, 2.0, 4]",
"+I[3, 2.0, 4]"])
os.remove(source_path)
def test_execute_over_aggregate_from_json_plan(self):
# create source file path
tmp_dir = self.tempdir
data = [
'1,1,2013-01-01 03:10:00',
'3,2,2013-01-01 03:10:00',
'2,1,2013-01-01 03:10:00',
'1,5,2013-01-01 03:10:00',
'1,8,2013-01-01 04:20:00',
'2,3,2013-01-01 03:30:00'
]
source_path = tmp_dir + '/test_execute_over_aggregate_from_json_plan.csv'
sink_path = tmp_dir + '/test_execute_over_aggregate_from_json_plan'
with open(source_path, 'w') as fd:
for ele in data:
fd.write(ele + '\n')
source_table = """
CREATE TABLE source_table (
a TINYINT,
b SMALLINT,
rowtime TIMESTAMP(3),
WATERMARK FOR rowtime AS rowtime - INTERVAL '60' MINUTE
) WITH (
'connector' = 'filesystem',
'path' = '%s',
'format' = 'csv'
)
""" % source_path
self.t_env.execute_sql(source_table)
self.t_env.execute_sql("""
CREATE TABLE sink_table (
a TINYINT,
b FLOAT,
c SMALLINT
) WITH (
'connector' = 'filesystem',
'path' = '%s',
'format' = 'csv'
)
""" % sink_path)
max_add_min_udaf = udaf(lambda a: a.max() + a.min(),
result_type=DataTypes.SMALLINT(),
func_type='pandas')
self.t_env.get_config().get_configuration().set_string(
"pipeline.time-characteristic", "EventTime")
self.t_env.create_temporary_system_function("mean_udaf", mean_udaf)
self.t_env.create_temporary_system_function("max_add_min_udaf", max_add_min_udaf)
json_plan = self.t_env._j_tenv.getJsonPlan("""
insert into sink_table
select a,
mean_udaf(b)
over (PARTITION BY a ORDER BY rowtime
ROWS BETWEEN 1 PRECEDING AND CURRENT ROW),
max_add_min_udaf(b)
over (PARTITION BY a ORDER BY rowtime
ROWS BETWEEN 1 PRECEDING AND CURRENT ROW)
from source_table
""")
from py4j.java_gateway import get_method
get_method(self.t_env._j_tenv.executeJsonPlan(json_plan), "await")()
import glob
lines = [line.strip() for file in glob.glob(sink_path + '/*') for line in open(file, 'r')]
lines.sort()
self.assertEqual(lines, ['1,1.0,2', '1,3.0,6', '1,6.5,13', '2,1.0,2', '2,2.0,4', '3,2.0,4'])
@udaf(result_type=DataTypes.FLOAT(), func_type="pandas")
def mean_udaf(v):
return v.mean()
class MaxAdd(AggregateFunction, unittest.TestCase):
def open(self, function_context):
mg = function_context.get_metric_group()
self.counter = mg.add_group("key", "value").counter("my_counter")
self.counter_sum = 0
def get_value(self, accumulator):
# counter
self.counter.inc(10)
self.counter_sum += 10
return accumulator[0]
def create_accumulator(self):
return []
def accumulate(self, accumulator, *args):
result = 0
for arg in args:
result += arg.max()
accumulator.append(result)
if __name__ == '__main__':
import unittest
try:
import xmlrunner
testRunner = xmlrunner.XMLTestRunner(output='target/test-reports')
except ImportError:
testRunner = None
unittest.main(testRunner=testRunner, verbosity=2)
| apache-2.0 |
wmvanvliet/mne-python | mne/viz/backends/_utils.py | 8 | 2928 | # -*- coding: utf-8 -*-
#
# Authors: Alexandre Gramfort <alexandre.gramfort@inria.fr>
# Eric Larson <larson.eric.d@gmail.com>
# Joan Massich <mailsik@gmail.com>
# Guillaume Favelier <guillaume.favelier@gmail.com>
#
# License: Simplified BSD
from contextlib import contextmanager
import numpy as np
import collections.abc
from ...externals.decorator import decorator
VALID_3D_BACKENDS = (
'pyvista', # default 3d backend
'mayavi',
'notebook',
)
ALLOWED_QUIVER_MODES = ('2darrow', 'arrow', 'cone', 'cylinder', 'sphere',
'oct')
def _get_colormap_from_array(colormap=None, normalized_colormap=False,
default_colormap='coolwarm'):
from matplotlib import cm
from matplotlib.colors import ListedColormap
if colormap is None:
cmap = cm.get_cmap(default_colormap)
elif isinstance(colormap, str):
cmap = cm.get_cmap(colormap)
elif normalized_colormap:
cmap = ListedColormap(colormap)
else:
cmap = ListedColormap(np.array(colormap) / 255.0)
return cmap
def _check_color(color):
from matplotlib.colors import colorConverter
if isinstance(color, str):
color = colorConverter.to_rgb(color)
elif isinstance(color, collections.abc.Iterable):
np_color = np.array(color)
if np_color.size % 3 != 0 and np_color.size % 4 != 0:
raise ValueError("The expected valid format is RGB or RGBA.")
if np_color.dtype in (np.int64, np.int32):
if (np_color < 0).any() or (np_color > 255).any():
raise ValueError("Values out of range [0, 255].")
elif np_color.dtype == np.float64:
if (np_color < 0.0).any() or (np_color > 1.0).any():
raise ValueError("Values out of range [0.0, 1.0].")
else:
raise TypeError("Expected data type is `np.int64`, `np.int32`, or "
"`np.float64` but {} was given."
.format(np_color.dtype))
else:
raise TypeError("Expected type is `str` or iterable but "
"{} was given.".format(type(color)))
return color
def _alpha_blend_background(ctable, background_color):
alphas = ctable[:, -1][:, np.newaxis] / 255.
use_table = ctable.copy()
use_table[:, -1] = 255.
return (use_table * alphas) + background_color * (1 - alphas)
@decorator
def run_once(fun, *args, **kwargs):
"""Run the function only once."""
if not hasattr(fun, "_has_run"):
fun._has_run = True
return fun(*args, **kwargs)
@run_once
def _init_qt_resources():
from ...icons import resources
resources.qInitResources()
@contextmanager
def _qt_disable_paint(widget):
paintEvent = widget.paintEvent
widget.paintEvent = lambda *args, **kwargs: None
try:
yield
finally:
widget.paintEvent = paintEvent
| bsd-3-clause |
aabadie/scikit-learn | sklearn/linear_model/tests/test_theil_sen.py | 58 | 9948 | """
Testing for Theil-Sen module (sklearn.linear_model.theil_sen)
"""
# Author: Florian Wilhelm <florian.wilhelm@gmail.com>
# License: BSD 3 clause
from __future__ import division, print_function, absolute_import
import os
import sys
from contextlib import contextmanager
import numpy as np
from numpy.testing import assert_array_equal, assert_array_less
from numpy.testing import assert_array_almost_equal, assert_warns
from scipy.linalg import norm
from scipy.optimize import fmin_bfgs
from nose.tools import raises, assert_almost_equal
from sklearn.exceptions import ConvergenceWarning
from sklearn.linear_model import LinearRegression, TheilSenRegressor
from sklearn.linear_model.theil_sen import _spatial_median, _breakdown_point
from sklearn.linear_model.theil_sen import _modified_weiszfeld_step
from sklearn.utils.testing import assert_greater, assert_less
@contextmanager
def no_stdout_stderr():
old_stdout = sys.stdout
old_stderr = sys.stderr
with open(os.devnull, 'w') as devnull:
sys.stdout = devnull
sys.stderr = devnull
yield
devnull.flush()
sys.stdout = old_stdout
sys.stderr = old_stderr
def gen_toy_problem_1d(intercept=True):
random_state = np.random.RandomState(0)
# Linear model y = 3*x + N(2, 0.1**2)
w = 3.
if intercept:
c = 2.
n_samples = 50
else:
c = 0.1
n_samples = 100
x = random_state.normal(size=n_samples)
noise = 0.1 * random_state.normal(size=n_samples)
y = w * x + c + noise
# Add some outliers
if intercept:
x[42], y[42] = (-2, 4)
x[43], y[43] = (-2.5, 8)
x[33], y[33] = (2.5, 1)
x[49], y[49] = (2.1, 2)
else:
x[42], y[42] = (-2, 4)
x[43], y[43] = (-2.5, 8)
x[53], y[53] = (2.5, 1)
x[60], y[60] = (2.1, 2)
x[72], y[72] = (1.8, -7)
return x[:, np.newaxis], y, w, c
def gen_toy_problem_2d():
random_state = np.random.RandomState(0)
n_samples = 100
# Linear model y = 5*x_1 + 10*x_2 + N(1, 0.1**2)
X = random_state.normal(size=(n_samples, 2))
w = np.array([5., 10.])
c = 1.
noise = 0.1 * random_state.normal(size=n_samples)
y = np.dot(X, w) + c + noise
# Add some outliers
n_outliers = n_samples // 10
ix = random_state.randint(0, n_samples, size=n_outliers)
y[ix] = 50 * random_state.normal(size=n_outliers)
return X, y, w, c
def gen_toy_problem_4d():
random_state = np.random.RandomState(0)
n_samples = 10000
# Linear model y = 5*x_1 + 10*x_2 + 42*x_3 + 7*x_4 + N(1, 0.1**2)
X = random_state.normal(size=(n_samples, 4))
w = np.array([5., 10., 42., 7.])
c = 1.
noise = 0.1 * random_state.normal(size=n_samples)
y = np.dot(X, w) + c + noise
# Add some outliers
n_outliers = n_samples // 10
ix = random_state.randint(0, n_samples, size=n_outliers)
y[ix] = 50 * random_state.normal(size=n_outliers)
return X, y, w, c
def test_modweiszfeld_step_1d():
X = np.array([1., 2., 3.]).reshape(3, 1)
# Check startvalue is element of X and solution
median = 2.
new_y = _modified_weiszfeld_step(X, median)
assert_array_almost_equal(new_y, median)
# Check startvalue is not the solution
y = 2.5
new_y = _modified_weiszfeld_step(X, y)
assert_array_less(median, new_y)
assert_array_less(new_y, y)
# Check startvalue is not the solution but element of X
y = 3.
new_y = _modified_weiszfeld_step(X, y)
assert_array_less(median, new_y)
assert_array_less(new_y, y)
# Check that a single vector is identity
X = np.array([1., 2., 3.]).reshape(1, 3)
y = X[0, ]
new_y = _modified_weiszfeld_step(X, y)
assert_array_equal(y, new_y)
def test_modweiszfeld_step_2d():
X = np.array([0., 0., 1., 1., 0., 1.]).reshape(3, 2)
y = np.array([0.5, 0.5])
# Check first two iterations
new_y = _modified_weiszfeld_step(X, y)
assert_array_almost_equal(new_y, np.array([1 / 3, 2 / 3]))
new_y = _modified_weiszfeld_step(X, new_y)
assert_array_almost_equal(new_y, np.array([0.2792408, 0.7207592]))
# Check fix point
y = np.array([0.21132505, 0.78867497])
new_y = _modified_weiszfeld_step(X, y)
assert_array_almost_equal(new_y, y)
def test_spatial_median_1d():
X = np.array([1., 2., 3.]).reshape(3, 1)
true_median = 2.
_, median = _spatial_median(X)
assert_array_almost_equal(median, true_median)
# Test larger problem and for exact solution in 1d case
random_state = np.random.RandomState(0)
X = random_state.randint(100, size=(1000, 1))
true_median = np.median(X.ravel())
_, median = _spatial_median(X)
assert_array_equal(median, true_median)
def test_spatial_median_2d():
X = np.array([0., 0., 1., 1., 0., 1.]).reshape(3, 2)
_, median = _spatial_median(X, max_iter=100, tol=1.e-6)
def cost_func(y):
dists = np.array([norm(x - y) for x in X])
return np.sum(dists)
# Check if median is solution of the Fermat-Weber location problem
fermat_weber = fmin_bfgs(cost_func, median, disp=False)
assert_array_almost_equal(median, fermat_weber)
# Check when maximum iteration is exceeded a warning is emitted
assert_warns(ConvergenceWarning, _spatial_median, X, max_iter=30, tol=0.)
def test_theil_sen_1d():
X, y, w, c = gen_toy_problem_1d()
# Check that Least Squares fails
lstq = LinearRegression().fit(X, y)
assert_greater(np.abs(lstq.coef_ - w), 0.9)
# Check that Theil-Sen works
theil_sen = TheilSenRegressor(random_state=0).fit(X, y)
assert_array_almost_equal(theil_sen.coef_, w, 1)
assert_array_almost_equal(theil_sen.intercept_, c, 1)
def test_theil_sen_1d_no_intercept():
X, y, w, c = gen_toy_problem_1d(intercept=False)
# Check that Least Squares fails
lstq = LinearRegression(fit_intercept=False).fit(X, y)
assert_greater(np.abs(lstq.coef_ - w - c), 0.5)
# Check that Theil-Sen works
theil_sen = TheilSenRegressor(fit_intercept=False,
random_state=0).fit(X, y)
assert_array_almost_equal(theil_sen.coef_, w + c, 1)
assert_almost_equal(theil_sen.intercept_, 0.)
def test_theil_sen_2d():
X, y, w, c = gen_toy_problem_2d()
# Check that Least Squares fails
lstq = LinearRegression().fit(X, y)
assert_greater(norm(lstq.coef_ - w), 1.0)
# Check that Theil-Sen works
theil_sen = TheilSenRegressor(max_subpopulation=1e3,
random_state=0).fit(X, y)
assert_array_almost_equal(theil_sen.coef_, w, 1)
assert_array_almost_equal(theil_sen.intercept_, c, 1)
def test_calc_breakdown_point():
bp = _breakdown_point(1e10, 2)
assert_less(np.abs(bp - 1 + 1 / (np.sqrt(2))), 1.e-6)
@raises(ValueError)
def test_checksubparams_negative_subpopulation():
X, y, w, c = gen_toy_problem_1d()
TheilSenRegressor(max_subpopulation=-1, random_state=0).fit(X, y)
@raises(ValueError)
def test_checksubparams_too_few_subsamples():
X, y, w, c = gen_toy_problem_1d()
TheilSenRegressor(n_subsamples=1, random_state=0).fit(X, y)
@raises(ValueError)
def test_checksubparams_too_many_subsamples():
X, y, w, c = gen_toy_problem_1d()
TheilSenRegressor(n_subsamples=101, random_state=0).fit(X, y)
@raises(ValueError)
def test_checksubparams_n_subsamples_if_less_samples_than_features():
random_state = np.random.RandomState(0)
n_samples, n_features = 10, 20
X = random_state.normal(size=(n_samples, n_features))
y = random_state.normal(size=n_samples)
TheilSenRegressor(n_subsamples=9, random_state=0).fit(X, y)
def test_subpopulation():
X, y, w, c = gen_toy_problem_4d()
theil_sen = TheilSenRegressor(max_subpopulation=250,
random_state=0).fit(X, y)
assert_array_almost_equal(theil_sen.coef_, w, 1)
assert_array_almost_equal(theil_sen.intercept_, c, 1)
def test_subsamples():
X, y, w, c = gen_toy_problem_4d()
theil_sen = TheilSenRegressor(n_subsamples=X.shape[0],
random_state=0).fit(X, y)
lstq = LinearRegression().fit(X, y)
# Check for exact the same results as Least Squares
assert_array_almost_equal(theil_sen.coef_, lstq.coef_, 9)
def test_verbosity():
X, y, w, c = gen_toy_problem_1d()
# Check that Theil-Sen can be verbose
with no_stdout_stderr():
TheilSenRegressor(verbose=True, random_state=0).fit(X, y)
TheilSenRegressor(verbose=True,
max_subpopulation=10,
random_state=0).fit(X, y)
def test_theil_sen_parallel():
X, y, w, c = gen_toy_problem_2d()
# Check that Least Squares fails
lstq = LinearRegression().fit(X, y)
assert_greater(norm(lstq.coef_ - w), 1.0)
# Check that Theil-Sen works
theil_sen = TheilSenRegressor(n_jobs=-1,
random_state=0,
max_subpopulation=2e3).fit(X, y)
assert_array_almost_equal(theil_sen.coef_, w, 1)
assert_array_almost_equal(theil_sen.intercept_, c, 1)
def test_less_samples_than_features():
random_state = np.random.RandomState(0)
n_samples, n_features = 10, 20
X = random_state.normal(size=(n_samples, n_features))
y = random_state.normal(size=n_samples)
# Check that Theil-Sen falls back to Least Squares if fit_intercept=False
theil_sen = TheilSenRegressor(fit_intercept=False,
random_state=0).fit(X, y)
lstq = LinearRegression(fit_intercept=False).fit(X, y)
assert_array_almost_equal(theil_sen.coef_, lstq.coef_, 12)
# Check fit_intercept=True case. This will not be equal to the Least
# Squares solution since the intercept is calculated differently.
theil_sen = TheilSenRegressor(fit_intercept=True, random_state=0).fit(X, y)
y_pred = theil_sen.predict(X)
assert_array_almost_equal(y_pred, y, 12)
| bsd-3-clause |
BhallaLab/moose | moose-examples/traub_2005/py/test_singlecomp.py | 2 | 7203 | # test_singlecomp.py ---
#
# Filename: test_singlecomp.py
# Description:
# Author: Subhasis Ray
# Maintainer:
# Created: Tue Jul 17 21:01:14 2012 (+0530)
# Version:
# Last-Updated: Sun Jun 25 15:37:21 2017 (-0400)
# By: subha
# Update #: 320
# URL:
# Keywords:
# Compatibility:
#
#
# Commentary:
#
# Test the ion channels with a single compartment.
#
#
# Change log:
#
# 2012-07-17 22:22:23 (+0530) Tested NaF2 and NaPF_SS against neuron
# test case.
#
#
# Code:
import os
os.environ['NUMPTHREADS'] = '1'
import uuid
import unittest
from datetime import datetime
import sys
sys.path.append('../../../python')
import numpy as np
from matplotlib import pyplot as plt
import moose
from testutils import *
from nachans import *
from kchans import *
from archan import *
from cachans import *
from capool import *
simdt = 0.25e-4
plotdt = 0.25e-4
simtime = 350e-3
erev = {
'K': -100e-3,
'Na': 50e-3,
'Ca': 125e-3,
'AR': -40e-3
}
channel_density = {
'NaF2': 1500.0,
'NaPF_SS': 1.5,
'KDR_FS': 1000.0,
'KC_FAST': 100.0,
'KA': 300.0,
'KM': 37.5,
'K2': 1.0,
'KAHP_SLOWER': 1.0,
'CaL': 5.0,
'CaT_A': 1.0,
'AR': 2.5
}
compartment_propeties = {
'length': 20e-6,
'diameter': 2e-6 * 7.5,
'initVm': -65e-3,
'Em': -65e-3,
'Rm': 5.0,
'Cm': 9e-3,
'Ra': 1.0,
'specific': True}
stimulus = [[100e-3, 50e-3, 3e-10], # delay[0], width[0], level[0]
[1e9, 0, 0]]
def create_compartment(path, length, diameter, initVm, Em, Rm, Cm, Ra, specific=False):
comp = moose.Compartment(path)
comp.length = length
comp.diameter = diameter
comp.initVm = initVm
comp.Em = Em
if not specific:
comp.Rm = Rm
comp.Cm = Cm
comp.Ra = Ra
else:
sarea = np.pi * length * diameter
comp.Rm = Rm / sarea
comp.Cm = Cm * sarea
comp.Ra = 4.0 * Ra * length / (np.pi * diameter * diameter)
return comp
def insert_channel(compartment, channeclass, gbar, density=False):
channel = moose.copy(channeclass.prototype, compartment)[0]
if not density:
channel.Gbar = gbar
else:
channel.Gbar = gbar * np.pi * compartment.length * compartment.diameter
moose.connect(channel, 'channel', compartment, 'channel')
return channel
def insert_ca(compartment, phi, tau):
ca = moose.copy(CaPool.prototype, compartment)[0]
ca.B = phi / (np.pi * compartment.length * compartment.diameter)
ca.tau = tau
print( ca.path, ca.B, ca.tau)
for chan in moose.wildcardFind('%s/#[TYPE=HHChannel]' % (compartment.path)):
if chan.name.startswith('KC') or chan.name.startswith('KAHP'):
moose.connect(ca, 'concOut', chan, 'concen')
elif chan.name.startswith('CaL'):
moose.connect(chan, 'IkOut', ca, 'current')
else:
continue
moose.showfield(chan)
return ca
class TestSingleComp(unittest.TestCase):
def setUp(self):
self.testId = uuid.uuid4().int
self.container = moose.Neutral('test%d' % (self.testId))
self.model = moose.Neutral('%s/model' % (self.container.path))
self.data = moose.Neutral('%s/data' % (self.container.path))
self.soma = create_compartment('%s/soma' % (self.model.path),
**compartment_propeties)
self.tables = {}
tab = moose.Table('%s/Vm' % (self.data.path))
self.tables['Vm'] = tab
moose.connect(tab, 'requestOut', self.soma, 'getVm')
for channelname, conductance in list(channel_density.items()):
chanclass = eval(channelname)
channel = insert_channel(self.soma, chanclass, conductance, density=True)
if issubclass(chanclass, KChannel):
channel.Ek = erev['K']
elif issubclass(chanclass, NaChannel):
channel.Ek = erev['Na']
elif issubclass(chanclass, CaChannel):
channel.Ek = erev['Ca']
elif issubclass(chanclass, AR):
channel.Ek = erev['AR']
tab = moose.Table('%s/%s' % (self.data.path, channelname))
moose.connect(tab, 'requestOut', channel, 'getGk')
self.tables['Gk_'+channel.name] = tab
archan = moose.HHChannel(self.soma.path + '/AR')
archan.X = 0.0
ca = insert_ca(self.soma, 2.6e7, 50e-3)
tab = moose.Table('%s/Ca' % (self.data.path))
self.tables['Ca'] = tab
moose.connect(tab, 'requestOut', ca, 'getCa')
self.pulsegen = moose.PulseGen('%s/inject' % (self.model.path))
moose.connect(self.pulsegen, 'output', self.soma, 'injectMsg')
tab = moose.Table('%s/injection' % (self.data.path))
moose.connect(tab, 'requestOut', self.pulsegen, 'getOutputValue')
self.tables['pulsegen'] = tab
self.pulsegen.count = len(stimulus)
for ii in range(len(stimulus)):
self.pulsegen.delay[ii] = stimulus[ii][0]
self.pulsegen.width[ii] = stimulus[ii][1]
self.pulsegen.level[ii] = stimulus[ii][2]
setup_clocks(simdt, plotdt)
assign_clocks(self.model, self.data)
moose.reinit()
start = datetime.now()
moose.start(simtime)
end = datetime.now()
delta = end - start
print( 'Simulation of %g s finished in %g s' % (simtime, delta.seconds + delta.microseconds*1e-6))
def testDefault(self):
vm_axis = plt.subplot(2,1,1)
ca_axis = plt.subplot(2,1,2)
try:
fname = os.path.join(config.mydir, 'nrn', 'data', 'singlecomp_Vm.dat')
nrndata = np.loadtxt(fname)
vm_axis.plot(nrndata[:,0], nrndata[:,1], label='Vm (mV) - nrn')
ca_axis.plot(nrndata[:,0], nrndata[:,2], label='Ca (mM) - nrn')
except IOError as e:
print(e)
tseries = np.linspace(0, simtime, len(self.tables['Vm'].vector)) * 1e3
# plotcount = len(channel_density) + 1
# rows = int(np.sqrt(plotcount) + 0.5)
# columns = int(plotcount * 1.0/rows + 0.5)
# print plotcount, rows, columns
# plt.subplot(rows, columns, 1)
vm_axis.plot(tseries, self.tables['Vm'].vector * 1e3, label='Vm (mV) - moose')
vm_axis.plot(tseries, self.tables['pulsegen'].vector * 1e12, label='inject (pA)')
ca_axis.plot(tseries, self.tables['Ca'].vector, label='Ca (mM) - moose')
vm_axis.legend()
ca_axis.legend()
# ii = 2
# for key, value in self.tables.items():
# if key.startswith('Gk'):
# plt.subplot(rows, columns, ii)
# plt.plot(tseries, value.vector, label=key)
# ii += 1
# plt.legend()
plt.show()
data = np.vstack((tseries*1e-3,
self.tables['Vm'].vector,
self.tables['Ca'].vector))
np.savetxt(os.path.join(config.data_dir, 'singlecomp_Vm.dat'),
np.transpose(data))
if __name__ == '__main__':
unittest.main()
#
# test_singlecomp.py ends here
| gpl-3.0 |
pompiduskus/scikit-learn | sklearn/linear_model/tests/test_randomized_l1.py | 214 | 4690 | # Authors: Alexandre Gramfort <alexandre.gramfort@inria.fr>
# License: BSD 3 clause
import numpy as np
from scipy import sparse
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_raises
from sklearn.linear_model.randomized_l1 import (lasso_stability_path,
RandomizedLasso,
RandomizedLogisticRegression)
from sklearn.datasets import load_diabetes, load_iris
from sklearn.feature_selection import f_regression, f_classif
from sklearn.preprocessing import StandardScaler
from sklearn.linear_model.base import center_data
diabetes = load_diabetes()
X = diabetes.data
y = diabetes.target
X = StandardScaler().fit_transform(X)
X = X[:, [2, 3, 6, 7, 8]]
# test that the feature score of the best features
F, _ = f_regression(X, y)
def test_lasso_stability_path():
# Check lasso stability path
# Load diabetes data and add noisy features
scaling = 0.3
coef_grid, scores_path = lasso_stability_path(X, y, scaling=scaling,
random_state=42,
n_resampling=30)
assert_array_equal(np.argsort(F)[-3:],
np.argsort(np.sum(scores_path, axis=1))[-3:])
def test_randomized_lasso():
# Check randomized lasso
scaling = 0.3
selection_threshold = 0.5
# or with 1 alpha
clf = RandomizedLasso(verbose=False, alpha=1, random_state=42,
scaling=scaling,
selection_threshold=selection_threshold)
feature_scores = clf.fit(X, y).scores_
assert_array_equal(np.argsort(F)[-3:], np.argsort(feature_scores)[-3:])
# or with many alphas
clf = RandomizedLasso(verbose=False, alpha=[1, 0.8], random_state=42,
scaling=scaling,
selection_threshold=selection_threshold)
feature_scores = clf.fit(X, y).scores_
assert_equal(clf.all_scores_.shape, (X.shape[1], 2))
assert_array_equal(np.argsort(F)[-3:], np.argsort(feature_scores)[-3:])
X_r = clf.transform(X)
X_full = clf.inverse_transform(X_r)
assert_equal(X_r.shape[1], np.sum(feature_scores > selection_threshold))
assert_equal(X_full.shape, X.shape)
clf = RandomizedLasso(verbose=False, alpha='aic', random_state=42,
scaling=scaling)
feature_scores = clf.fit(X, y).scores_
assert_array_equal(feature_scores, X.shape[1] * [1.])
clf = RandomizedLasso(verbose=False, scaling=-0.1)
assert_raises(ValueError, clf.fit, X, y)
clf = RandomizedLasso(verbose=False, scaling=1.1)
assert_raises(ValueError, clf.fit, X, y)
def test_randomized_logistic():
# Check randomized sparse logistic regression
iris = load_iris()
X = iris.data[:, [0, 2]]
y = iris.target
X = X[y != 2]
y = y[y != 2]
F, _ = f_classif(X, y)
scaling = 0.3
clf = RandomizedLogisticRegression(verbose=False, C=1., random_state=42,
scaling=scaling, n_resampling=50,
tol=1e-3)
X_orig = X.copy()
feature_scores = clf.fit(X, y).scores_
assert_array_equal(X, X_orig) # fit does not modify X
assert_array_equal(np.argsort(F), np.argsort(feature_scores))
clf = RandomizedLogisticRegression(verbose=False, C=[1., 0.5],
random_state=42, scaling=scaling,
n_resampling=50, tol=1e-3)
feature_scores = clf.fit(X, y).scores_
assert_array_equal(np.argsort(F), np.argsort(feature_scores))
def test_randomized_logistic_sparse():
# Check randomized sparse logistic regression on sparse data
iris = load_iris()
X = iris.data[:, [0, 2]]
y = iris.target
X = X[y != 2]
y = y[y != 2]
# center here because sparse matrices are usually not centered
X, y, _, _, _ = center_data(X, y, True, True)
X_sp = sparse.csr_matrix(X)
F, _ = f_classif(X, y)
scaling = 0.3
clf = RandomizedLogisticRegression(verbose=False, C=1., random_state=42,
scaling=scaling, n_resampling=50,
tol=1e-3)
feature_scores = clf.fit(X, y).scores_
clf = RandomizedLogisticRegression(verbose=False, C=1., random_state=42,
scaling=scaling, n_resampling=50,
tol=1e-3)
feature_scores_sp = clf.fit(X_sp, y).scores_
assert_array_equal(feature_scores, feature_scores_sp)
| bsd-3-clause |
ellisonbg/altair | altair/vegalite/v2/examples/bar_chart_with_highlight.py | 1 | 1033 | """
Bar Chart with Highlight
------------------------
This example shows a Bar chart that highlights values beyond a threshold.
"""
# category: bar charts
import altair as alt
import pandas as pd
data = pd.DataFrame({"Day": range(1, 16),
"Value": [54.8, 112.1, 63.6, 37.6, 79.7, 137.9, 120.1, 103.3,
394.8, 199.5, 72.3, 51.1, 112.0, 174.5, 130.5]})
data2 = pd.DataFrame([{"ThresholdValue": 300, "Threshold": "hazardous"}])
bar1 = alt.Chart(data).mark_bar().encode(
x='Day:O',
y='Value:Q'
)
bar2 = alt.Chart(data).mark_bar(color="#e45755").encode(
x='Day:O',
y='baseline:Q',
y2='Value:Q'
).transform_filter(
"datum.Value >= 300"
).transform_calculate(
"baseline", "300"
)
rule = alt.Chart(data2).mark_rule().encode(
y='ThresholdValue:Q'
)
text = alt.Chart(data2).mark_text(
align='left', dx=215, dy=-5
).encode(
alt.Y('ThresholdValue:Q', axis=alt.Axis(title='PM2.5 Value')),
text=alt.value('hazardous')
)
bar1 + text + bar2 + rule
| bsd-3-clause |
scauglog/brain_record_toolbox | script_test_classifier.py | 1 | 1207 | import brain_state_calculate as bsc
import cpp_file_tools as cft
from matplotlib import pyplot as plt
import numpy as np
import Tkinter
import tkFileDialog
initdir="C:\\"
my_bsc = bsc.brain_state_calculate(32)
my_cft = cft.cpp_file_tools(32, 1, show=True)
my_bsc.init_networks_on_files(initdir, my_cft, train_mod_chan=False)
my_bsc.save_networks_on_file(initdir, "0606")
my_bsc.load_networks_file(initdir)
print("select the file to test")
root = Tkinter.Tk()
root.withdraw()
file_path = tkFileDialog.askopenfilename(multiple=True, initialdir=initdir, title="select cpp file to train the classifier", filetypes=[('all files', '.*'), ('text files', '.txt')])
print("test the file")
if not file_path == "":
files = root.tk.splitlist(file_path)
for f in files:
print(f)
l_res, l_obs = my_cft.read_cpp_files([f], use_classifier_result=False, cut_after_cue=True, init_in_walk=True)
success, l_of_res = my_bsc.test(l_obs, l_res)
my_cft.plot_result(l_of_res)
plt.figure()
plt.imshow(np.array(l_obs).T, interpolation='none')
my_bsc.train_unsupervised_one_file(f, my_cft, is_healthy=False)
plt.show()
print('#############')
print('#### END ####') | mit |
Winand/pandas | pandas/core/reshape/util.py | 20 | 1915 | import numpy as np
from pandas.core.dtypes.common import is_list_like
from pandas.compat import reduce
from pandas.core.index import Index
from pandas.core import common as com
def match(needles, haystack):
haystack = Index(haystack)
needles = Index(needles)
return haystack.get_indexer(needles)
def cartesian_product(X):
"""
Numpy version of itertools.product or pandas.compat.product.
Sometimes faster (for large inputs)...
Parameters
----------
X : list-like of list-likes
Returns
-------
product : list of ndarrays
Examples
--------
>>> cartesian_product([list('ABC'), [1, 2]])
[array(['A', 'A', 'B', 'B', 'C', 'C'], dtype='|S1'),
array([1, 2, 1, 2, 1, 2])]
See also
--------
itertools.product : Cartesian product of input iterables. Equivalent to
nested for-loops.
pandas.compat.product : An alias for itertools.product.
"""
msg = "Input must be a list-like of list-likes"
if not is_list_like(X):
raise TypeError(msg)
for x in X:
if not is_list_like(x):
raise TypeError(msg)
if len(X) == 0:
return []
lenX = np.fromiter((len(x) for x in X), dtype=np.intp)
cumprodX = np.cumproduct(lenX)
a = np.roll(cumprodX, 1)
a[0] = 1
if cumprodX[-1] != 0:
b = cumprodX[-1] / cumprodX
else:
# if any factor is empty, the cartesian product is empty
b = np.zeros_like(cumprodX)
return [np.tile(np.repeat(np.asarray(com._values_from_object(x)), b[i]),
np.product(a[i]))
for i, x in enumerate(X)]
def _compose2(f, g):
"""Compose 2 callables"""
return lambda *args, **kwargs: f(g(*args, **kwargs))
def compose(*funcs):
"""Compose 2 or more callables"""
assert len(funcs) > 1, 'At least 2 callables must be passed to compose'
return reduce(_compose2, funcs)
| bsd-3-clause |
asnorkin/sentiment_analysis | site/lib/python2.7/site-packages/sklearn/naive_bayes.py | 26 | 30641 | # -*- coding: utf-8 -*-
"""
The :mod:`sklearn.naive_bayes` module implements Naive Bayes algorithms. These
are supervised learning methods based on applying Bayes' theorem with strong
(naive) feature independence assumptions.
"""
# Author: Vincent Michel <vincent.michel@inria.fr>
# Minor fixes by Fabian Pedregosa
# Amit Aides <amitibo@tx.technion.ac.il>
# Yehuda Finkelstein <yehudaf@tx.technion.ac.il>
# Lars Buitinck
# Jan Hendrik Metzen <jhm@informatik.uni-bremen.de>
# (parts based on earlier work by Mathieu Blondel)
#
# License: BSD 3 clause
from abc import ABCMeta, abstractmethod
import numpy as np
from scipy.sparse import issparse
from .base import BaseEstimator, ClassifierMixin
from .preprocessing import binarize
from .preprocessing import LabelBinarizer
from .preprocessing import label_binarize
from .utils import check_X_y, check_array
from .utils.extmath import safe_sparse_dot, logsumexp
from .utils.multiclass import _check_partial_fit_first_call
from .utils.fixes import in1d
from .utils.validation import check_is_fitted
from .externals import six
__all__ = ['BernoulliNB', 'GaussianNB', 'MultinomialNB']
class BaseNB(six.with_metaclass(ABCMeta, BaseEstimator, ClassifierMixin)):
"""Abstract base class for naive Bayes estimators"""
@abstractmethod
def _joint_log_likelihood(self, X):
"""Compute the unnormalized posterior log probability of X
I.e. ``log P(c) + log P(x|c)`` for all rows x of X, as an array-like of
shape [n_classes, n_samples].
Input is passed to _joint_log_likelihood as-is by predict,
predict_proba and predict_log_proba.
"""
def predict(self, X):
"""
Perform classification on an array of test vectors X.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Returns
-------
C : array, shape = [n_samples]
Predicted target values for X
"""
jll = self._joint_log_likelihood(X)
return self.classes_[np.argmax(jll, axis=1)]
def predict_log_proba(self, X):
"""
Return log-probability estimates for the test vector X.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Returns
-------
C : array-like, shape = [n_samples, n_classes]
Returns the log-probability of the samples for each class in
the model. The columns correspond to the classes in sorted
order, as they appear in the attribute `classes_`.
"""
jll = self._joint_log_likelihood(X)
# normalize by P(x) = P(f_1, ..., f_n)
log_prob_x = logsumexp(jll, axis=1)
return jll - np.atleast_2d(log_prob_x).T
def predict_proba(self, X):
"""
Return probability estimates for the test vector X.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Returns
-------
C : array-like, shape = [n_samples, n_classes]
Returns the probability of the samples for each class in
the model. The columns correspond to the classes in sorted
order, as they appear in the attribute `classes_`.
"""
return np.exp(self.predict_log_proba(X))
class GaussianNB(BaseNB):
"""
Gaussian Naive Bayes (GaussianNB)
Can perform online updates to model parameters via `partial_fit` method.
For details on algorithm used to update feature means and variance online,
see Stanford CS tech report STAN-CS-79-773 by Chan, Golub, and LeVeque:
http://i.stanford.edu/pub/cstr/reports/cs/tr/79/773/CS-TR-79-773.pdf
Read more in the :ref:`User Guide <gaussian_naive_bayes>`.
Parameters
----------
priors : array-like, shape (n_classes,)
Prior probabilities of the classes. If specified the priors are not
adjusted according to the data.
Attributes
----------
class_prior_ : array, shape (n_classes,)
probability of each class.
class_count_ : array, shape (n_classes,)
number of training samples observed in each class.
theta_ : array, shape (n_classes, n_features)
mean of each feature per class
sigma_ : array, shape (n_classes, n_features)
variance of each feature per class
Examples
--------
>>> import numpy as np
>>> X = np.array([[-1, -1], [-2, -1], [-3, -2], [1, 1], [2, 1], [3, 2]])
>>> Y = np.array([1, 1, 1, 2, 2, 2])
>>> from sklearn.naive_bayes import GaussianNB
>>> clf = GaussianNB()
>>> clf.fit(X, Y)
GaussianNB(priors=None)
>>> print(clf.predict([[-0.8, -1]]))
[1]
>>> clf_pf = GaussianNB()
>>> clf_pf.partial_fit(X, Y, np.unique(Y))
GaussianNB(priors=None)
>>> print(clf_pf.predict([[-0.8, -1]]))
[1]
"""
def __init__(self, priors=None):
self.priors = priors
def fit(self, X, y, sample_weight=None):
"""Fit Gaussian Naive Bayes according to X, y
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training vectors, where n_samples is the number of samples
and n_features is the number of features.
y : array-like, shape (n_samples,)
Target values.
sample_weight : array-like, shape (n_samples,), optional (default=None)
Weights applied to individual samples (1. for unweighted).
.. versionadded:: 0.17
Gaussian Naive Bayes supports fitting with *sample_weight*.
Returns
-------
self : object
Returns self.
"""
X, y = check_X_y(X, y)
return self._partial_fit(X, y, np.unique(y), _refit=True,
sample_weight=sample_weight)
@staticmethod
def _update_mean_variance(n_past, mu, var, X, sample_weight=None):
"""Compute online update of Gaussian mean and variance.
Given starting sample count, mean, and variance, a new set of
points X, and optionally sample weights, return the updated mean and
variance. (NB - each dimension (column) in X is treated as independent
-- you get variance, not covariance).
Can take scalar mean and variance, or vector mean and variance to
simultaneously update a number of independent Gaussians.
See Stanford CS tech report STAN-CS-79-773 by Chan, Golub, and LeVeque:
http://i.stanford.edu/pub/cstr/reports/cs/tr/79/773/CS-TR-79-773.pdf
Parameters
----------
n_past : int
Number of samples represented in old mean and variance. If sample
weights were given, this should contain the sum of sample
weights represented in old mean and variance.
mu : array-like, shape (number of Gaussians,)
Means for Gaussians in original set.
var : array-like, shape (number of Gaussians,)
Variances for Gaussians in original set.
sample_weight : array-like, shape (n_samples,), optional (default=None)
Weights applied to individual samples (1. for unweighted).
Returns
-------
total_mu : array-like, shape (number of Gaussians,)
Updated mean for each Gaussian over the combined set.
total_var : array-like, shape (number of Gaussians,)
Updated variance for each Gaussian over the combined set.
"""
if X.shape[0] == 0:
return mu, var
# Compute (potentially weighted) mean and variance of new datapoints
if sample_weight is not None:
n_new = float(sample_weight.sum())
new_mu = np.average(X, axis=0, weights=sample_weight / n_new)
new_var = np.average((X - new_mu) ** 2, axis=0,
weights=sample_weight / n_new)
else:
n_new = X.shape[0]
new_var = np.var(X, axis=0)
new_mu = np.mean(X, axis=0)
if n_past == 0:
return new_mu, new_var
n_total = float(n_past + n_new)
# Combine mean of old and new data, taking into consideration
# (weighted) number of observations
total_mu = (n_new * new_mu + n_past * mu) / n_total
# Combine variance of old and new data, taking into consideration
# (weighted) number of observations. This is achieved by combining
# the sum-of-squared-differences (ssd)
old_ssd = n_past * var
new_ssd = n_new * new_var
total_ssd = (old_ssd + new_ssd +
(n_past / float(n_new * n_total)) *
(n_new * mu - n_new * new_mu) ** 2)
total_var = total_ssd / n_total
return total_mu, total_var
def partial_fit(self, X, y, classes=None, sample_weight=None):
"""Incremental fit on a batch of samples.
This method is expected to be called several times consecutively
on different chunks of a dataset so as to implement out-of-core
or online learning.
This is especially useful when the whole dataset is too big to fit in
memory at once.
This method has some performance and numerical stability overhead,
hence it is better to call partial_fit on chunks of data that are
as large as possible (as long as fitting in the memory budget) to
hide the overhead.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training vectors, where n_samples is the number of samples and
n_features is the number of features.
y : array-like, shape (n_samples,)
Target values.
classes : array-like, shape (n_classes,), optional (default=None)
List of all the classes that can possibly appear in the y vector.
Must be provided at the first call to partial_fit, can be omitted
in subsequent calls.
sample_weight : array-like, shape (n_samples,), optional (default=None)
Weights applied to individual samples (1. for unweighted).
.. versionadded:: 0.17
Returns
-------
self : object
Returns self.
"""
return self._partial_fit(X, y, classes, _refit=False,
sample_weight=sample_weight)
def _partial_fit(self, X, y, classes=None, _refit=False,
sample_weight=None):
"""Actual implementation of Gaussian NB fitting.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training vectors, where n_samples is the number of samples and
n_features is the number of features.
y : array-like, shape (n_samples,)
Target values.
classes : array-like, shape (n_classes,), optional (default=None)
List of all the classes that can possibly appear in the y vector.
Must be provided at the first call to partial_fit, can be omitted
in subsequent calls.
_refit: bool, optional (default=False)
If true, act as though this were the first time we called
_partial_fit (ie, throw away any past fitting and start over).
sample_weight : array-like, shape (n_samples,), optional (default=None)
Weights applied to individual samples (1. for unweighted).
Returns
-------
self : object
Returns self.
"""
X, y = check_X_y(X, y)
# If the ratio of data variance between dimensions is too small, it
# will cause numerical errors. To address this, we artificially
# boost the variance by epsilon, a small fraction of the standard
# deviation of the largest dimension.
epsilon = 1e-9 * np.var(X, axis=0).max()
if _refit:
self.classes_ = None
if _check_partial_fit_first_call(self, classes):
# This is the first call to partial_fit:
# initialize various cumulative counters
n_features = X.shape[1]
n_classes = len(self.classes_)
self.theta_ = np.zeros((n_classes, n_features))
self.sigma_ = np.zeros((n_classes, n_features))
self.class_count_ = np.zeros(n_classes, dtype=np.float64)
# Initialise the class prior
n_classes = len(self.classes_)
# Take into account the priors
if self.priors is not None:
priors = np.asarray(self.priors)
# Check that the provide prior match the number of classes
if len(priors) != n_classes:
raise ValueError('Number of priors must match number of'
' classes.')
# Check that the sum is 1
if priors.sum() != 1.0:
raise ValueError('The sum of the priors should be 1.')
# Check that the prior are non-negative
if (priors < 0).any():
raise ValueError('Priors must be non-negative.')
self.class_prior_ = priors
else:
# Initialize the priors to zeros for each class
self.class_prior_ = np.zeros(len(self.classes_),
dtype=np.float64)
else:
if X.shape[1] != self.theta_.shape[1]:
msg = "Number of features %d does not match previous data %d."
raise ValueError(msg % (X.shape[1], self.theta_.shape[1]))
# Put epsilon back in each time
self.sigma_[:, :] -= epsilon
classes = self.classes_
unique_y = np.unique(y)
unique_y_in_classes = in1d(unique_y, classes)
if not np.all(unique_y_in_classes):
raise ValueError("The target label(s) %s in y do not exist in the "
"initial classes %s" %
(unique_y[~unique_y_in_classes], classes))
for y_i in unique_y:
i = classes.searchsorted(y_i)
X_i = X[y == y_i, :]
if sample_weight is not None:
sw_i = sample_weight[y == y_i]
N_i = sw_i.sum()
else:
sw_i = None
N_i = X_i.shape[0]
new_theta, new_sigma = self._update_mean_variance(
self.class_count_[i], self.theta_[i, :], self.sigma_[i, :],
X_i, sw_i)
self.theta_[i, :] = new_theta
self.sigma_[i, :] = new_sigma
self.class_count_[i] += N_i
self.sigma_[:, :] += epsilon
# Update if only no priors is provided
if self.priors is None:
# Empirical prior, with sample_weight taken into account
self.class_prior_ = self.class_count_ / self.class_count_.sum()
return self
def _joint_log_likelihood(self, X):
check_is_fitted(self, "classes_")
X = check_array(X)
joint_log_likelihood = []
for i in range(np.size(self.classes_)):
jointi = np.log(self.class_prior_[i])
n_ij = - 0.5 * np.sum(np.log(2. * np.pi * self.sigma_[i, :]))
n_ij -= 0.5 * np.sum(((X - self.theta_[i, :]) ** 2) /
(self.sigma_[i, :]), 1)
joint_log_likelihood.append(jointi + n_ij)
joint_log_likelihood = np.array(joint_log_likelihood).T
return joint_log_likelihood
class BaseDiscreteNB(BaseNB):
"""Abstract base class for naive Bayes on discrete/categorical data
Any estimator based on this class should provide:
__init__
_joint_log_likelihood(X) as per BaseNB
"""
def _update_class_log_prior(self, class_prior=None):
n_classes = len(self.classes_)
if class_prior is not None:
if len(class_prior) != n_classes:
raise ValueError("Number of priors must match number of"
" classes.")
self.class_log_prior_ = np.log(class_prior)
elif self.fit_prior:
# empirical prior, with sample_weight taken into account
self.class_log_prior_ = (np.log(self.class_count_) -
np.log(self.class_count_.sum()))
else:
self.class_log_prior_ = np.zeros(n_classes) - np.log(n_classes)
def partial_fit(self, X, y, classes=None, sample_weight=None):
"""Incremental fit on a batch of samples.
This method is expected to be called several times consecutively
on different chunks of a dataset so as to implement out-of-core
or online learning.
This is especially useful when the whole dataset is too big to fit in
memory at once.
This method has some performance overhead hence it is better to call
partial_fit on chunks of data that are as large as possible
(as long as fitting in the memory budget) to hide the overhead.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Training vectors, where n_samples is the number of samples and
n_features is the number of features.
y : array-like, shape = [n_samples]
Target values.
classes : array-like, shape = [n_classes], optional (default=None)
List of all the classes that can possibly appear in the y vector.
Must be provided at the first call to partial_fit, can be omitted
in subsequent calls.
sample_weight : array-like, shape = [n_samples], optional (default=None)
Weights applied to individual samples (1. for unweighted).
Returns
-------
self : object
Returns self.
"""
X = check_array(X, accept_sparse='csr', dtype=np.float64)
_, n_features = X.shape
if _check_partial_fit_first_call(self, classes):
# This is the first call to partial_fit:
# initialize various cumulative counters
n_effective_classes = len(classes) if len(classes) > 1 else 2
self.class_count_ = np.zeros(n_effective_classes, dtype=np.float64)
self.feature_count_ = np.zeros((n_effective_classes, n_features),
dtype=np.float64)
elif n_features != self.coef_.shape[1]:
msg = "Number of features %d does not match previous data %d."
raise ValueError(msg % (n_features, self.coef_.shape[-1]))
Y = label_binarize(y, classes=self.classes_)
if Y.shape[1] == 1:
Y = np.concatenate((1 - Y, Y), axis=1)
n_samples, n_classes = Y.shape
if X.shape[0] != Y.shape[0]:
msg = "X.shape[0]=%d and y.shape[0]=%d are incompatible."
raise ValueError(msg % (X.shape[0], y.shape[0]))
# label_binarize() returns arrays with dtype=np.int64.
# We convert it to np.float64 to support sample_weight consistently
Y = Y.astype(np.float64)
if sample_weight is not None:
sample_weight = np.atleast_2d(sample_weight)
Y *= check_array(sample_weight).T
class_prior = self.class_prior
# Count raw events from data before updating the class log prior
# and feature log probas
self._count(X, Y)
# XXX: OPTIM: we could introduce a public finalization method to
# be called by the user explicitly just once after several consecutive
# calls to partial_fit and prior any call to predict[_[log_]proba]
# to avoid computing the smooth log probas at each call to partial fit
self._update_feature_log_prob()
self._update_class_log_prior(class_prior=class_prior)
return self
def fit(self, X, y, sample_weight=None):
"""Fit Naive Bayes classifier according to X, y
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Training vectors, where n_samples is the number of samples and
n_features is the number of features.
y : array-like, shape = [n_samples]
Target values.
sample_weight : array-like, shape = [n_samples], optional (default=None)
Weights applied to individual samples (1. for unweighted).
Returns
-------
self : object
Returns self.
"""
X, y = check_X_y(X, y, 'csr')
_, n_features = X.shape
labelbin = LabelBinarizer()
Y = labelbin.fit_transform(y)
self.classes_ = labelbin.classes_
if Y.shape[1] == 1:
Y = np.concatenate((1 - Y, Y), axis=1)
# LabelBinarizer().fit_transform() returns arrays with dtype=np.int64.
# We convert it to np.float64 to support sample_weight consistently;
# this means we also don't have to cast X to floating point
Y = Y.astype(np.float64)
if sample_weight is not None:
sample_weight = np.atleast_2d(sample_weight)
Y *= check_array(sample_weight).T
class_prior = self.class_prior
# Count raw events from data before updating the class log prior
# and feature log probas
n_effective_classes = Y.shape[1]
self.class_count_ = np.zeros(n_effective_classes, dtype=np.float64)
self.feature_count_ = np.zeros((n_effective_classes, n_features),
dtype=np.float64)
self._count(X, Y)
self._update_feature_log_prob()
self._update_class_log_prior(class_prior=class_prior)
return self
# XXX The following is a stopgap measure; we need to set the dimensions
# of class_log_prior_ and feature_log_prob_ correctly.
def _get_coef(self):
return (self.feature_log_prob_[1:]
if len(self.classes_) == 2 else self.feature_log_prob_)
def _get_intercept(self):
return (self.class_log_prior_[1:]
if len(self.classes_) == 2 else self.class_log_prior_)
coef_ = property(_get_coef)
intercept_ = property(_get_intercept)
class MultinomialNB(BaseDiscreteNB):
"""
Naive Bayes classifier for multinomial models
The multinomial Naive Bayes classifier is suitable for classification with
discrete features (e.g., word counts for text classification). The
multinomial distribution normally requires integer feature counts. However,
in practice, fractional counts such as tf-idf may also work.
Read more in the :ref:`User Guide <multinomial_naive_bayes>`.
Parameters
----------
alpha : float, optional (default=1.0)
Additive (Laplace/Lidstone) smoothing parameter
(0 for no smoothing).
fit_prior : boolean, optional (default=True)
Whether to learn class prior probabilities or not.
If false, a uniform prior will be used.
class_prior : array-like, size (n_classes,), optional (default=None)
Prior probabilities of the classes. If specified the priors are not
adjusted according to the data.
Attributes
----------
class_log_prior_ : array, shape (n_classes, )
Smoothed empirical log probability for each class.
intercept_ : property
Mirrors ``class_log_prior_`` for interpreting MultinomialNB
as a linear model.
feature_log_prob_ : array, shape (n_classes, n_features)
Empirical log probability of features
given a class, ``P(x_i|y)``.
coef_ : property
Mirrors ``feature_log_prob_`` for interpreting MultinomialNB
as a linear model.
class_count_ : array, shape (n_classes,)
Number of samples encountered for each class during fitting. This
value is weighted by the sample weight when provided.
feature_count_ : array, shape (n_classes, n_features)
Number of samples encountered for each (class, feature)
during fitting. This value is weighted by the sample weight when
provided.
Examples
--------
>>> import numpy as np
>>> X = np.random.randint(5, size=(6, 100))
>>> y = np.array([1, 2, 3, 4, 5, 6])
>>> from sklearn.naive_bayes import MultinomialNB
>>> clf = MultinomialNB()
>>> clf.fit(X, y)
MultinomialNB(alpha=1.0, class_prior=None, fit_prior=True)
>>> print(clf.predict(X[2:3]))
[3]
Notes
-----
For the rationale behind the names `coef_` and `intercept_`, i.e.
naive Bayes as a linear classifier, see J. Rennie et al. (2003),
Tackling the poor assumptions of naive Bayes text classifiers, ICML.
References
----------
C.D. Manning, P. Raghavan and H. Schuetze (2008). Introduction to
Information Retrieval. Cambridge University Press, pp. 234-265.
http://nlp.stanford.edu/IR-book/html/htmledition/naive-bayes-text-classification-1.html
"""
def __init__(self, alpha=1.0, fit_prior=True, class_prior=None):
self.alpha = alpha
self.fit_prior = fit_prior
self.class_prior = class_prior
def _count(self, X, Y):
"""Count and smooth feature occurrences."""
if np.any((X.data if issparse(X) else X) < 0):
raise ValueError("Input X must be non-negative")
self.feature_count_ += safe_sparse_dot(Y.T, X)
self.class_count_ += Y.sum(axis=0)
def _update_feature_log_prob(self):
"""Apply smoothing to raw counts and recompute log probabilities"""
smoothed_fc = self.feature_count_ + self.alpha
smoothed_cc = smoothed_fc.sum(axis=1)
self.feature_log_prob_ = (np.log(smoothed_fc) -
np.log(smoothed_cc.reshape(-1, 1)))
def _joint_log_likelihood(self, X):
"""Calculate the posterior log probability of the samples X"""
check_is_fitted(self, "classes_")
X = check_array(X, accept_sparse='csr')
return (safe_sparse_dot(X, self.feature_log_prob_.T) +
self.class_log_prior_)
class BernoulliNB(BaseDiscreteNB):
"""Naive Bayes classifier for multivariate Bernoulli models.
Like MultinomialNB, this classifier is suitable for discrete data. The
difference is that while MultinomialNB works with occurrence counts,
BernoulliNB is designed for binary/boolean features.
Read more in the :ref:`User Guide <bernoulli_naive_bayes>`.
Parameters
----------
alpha : float, optional (default=1.0)
Additive (Laplace/Lidstone) smoothing parameter
(0 for no smoothing).
binarize : float or None, optional (default=0.0)
Threshold for binarizing (mapping to booleans) of sample features.
If None, input is presumed to already consist of binary vectors.
fit_prior : boolean, optional (default=True)
Whether to learn class prior probabilities or not.
If false, a uniform prior will be used.
class_prior : array-like, size=[n_classes,], optional (default=None)
Prior probabilities of the classes. If specified the priors are not
adjusted according to the data.
Attributes
----------
class_log_prior_ : array, shape = [n_classes]
Log probability of each class (smoothed).
feature_log_prob_ : array, shape = [n_classes, n_features]
Empirical log probability of features given a class, P(x_i|y).
class_count_ : array, shape = [n_classes]
Number of samples encountered for each class during fitting. This
value is weighted by the sample weight when provided.
feature_count_ : array, shape = [n_classes, n_features]
Number of samples encountered for each (class, feature)
during fitting. This value is weighted by the sample weight when
provided.
Examples
--------
>>> import numpy as np
>>> X = np.random.randint(2, size=(6, 100))
>>> Y = np.array([1, 2, 3, 4, 4, 5])
>>> from sklearn.naive_bayes import BernoulliNB
>>> clf = BernoulliNB()
>>> clf.fit(X, Y)
BernoulliNB(alpha=1.0, binarize=0.0, class_prior=None, fit_prior=True)
>>> print(clf.predict(X[2:3]))
[3]
References
----------
C.D. Manning, P. Raghavan and H. Schuetze (2008). Introduction to
Information Retrieval. Cambridge University Press, pp. 234-265.
http://nlp.stanford.edu/IR-book/html/htmledition/the-bernoulli-model-1.html
A. McCallum and K. Nigam (1998). A comparison of event models for naive
Bayes text classification. Proc. AAAI/ICML-98 Workshop on Learning for
Text Categorization, pp. 41-48.
V. Metsis, I. Androutsopoulos and G. Paliouras (2006). Spam filtering with
naive Bayes -- Which naive Bayes? 3rd Conf. on Email and Anti-Spam (CEAS).
"""
def __init__(self, alpha=1.0, binarize=.0, fit_prior=True,
class_prior=None):
self.alpha = alpha
self.binarize = binarize
self.fit_prior = fit_prior
self.class_prior = class_prior
def _count(self, X, Y):
"""Count and smooth feature occurrences."""
if self.binarize is not None:
X = binarize(X, threshold=self.binarize)
self.feature_count_ += safe_sparse_dot(Y.T, X)
self.class_count_ += Y.sum(axis=0)
def _update_feature_log_prob(self):
"""Apply smoothing to raw counts and recompute log probabilities"""
smoothed_fc = self.feature_count_ + self.alpha
smoothed_cc = self.class_count_ + self.alpha * 2
self.feature_log_prob_ = (np.log(smoothed_fc) -
np.log(smoothed_cc.reshape(-1, 1)))
def _joint_log_likelihood(self, X):
"""Calculate the posterior log probability of the samples X"""
check_is_fitted(self, "classes_")
X = check_array(X, accept_sparse='csr')
if self.binarize is not None:
X = binarize(X, threshold=self.binarize)
n_classes, n_features = self.feature_log_prob_.shape
n_samples, n_features_X = X.shape
if n_features_X != n_features:
raise ValueError("Expected input with %d features, got %d instead"
% (n_features, n_features_X))
neg_prob = np.log(1 - np.exp(self.feature_log_prob_))
# Compute neg_prob · (1 - X).T as ∑neg_prob - X · neg_prob
jll = safe_sparse_dot(X, (self.feature_log_prob_ - neg_prob).T)
jll += self.class_log_prior_ + neg_prob.sum(axis=1)
return jll
| mit |
jpallister/stm32f4-energy-monitor | pyenergy/src/pyenergy/interactive_graph.py | 1 | 16162 | import sys, os, random
from PyQt4.QtCore import *
from PyQt4.QtGui import *
import matplotlib
from matplotlib.backends.backend_qt4agg import FigureCanvasQTAgg as FigureCanvas
# Matplotlib version change leads to name change
try:
from matplotlib.backends.backend_qt4agg import NavigationToolbar2QTAgg as NavigationToolbar
except ImportError:
from matplotlib.backends.backend_qt4agg import NavigationToolbar2QT as NavigationToolbar
from matplotlib.figure import Figure
from mpl_toolkits.axes_grid.parasite_axes import SubplotHost
import collections
import numpy as np
import scipy
import scipy.stats
import pyenergy
class Graph(QMainWindow):
def __init__(self, em, parent=None):
QMainWindow.__init__(self, parent)
self.setWindowTitle('Energy monitoring graph')
self.em = em
self.tinterval = 0.01
self.ctime = 0
self.wsize = 500
self.toffset = 0
self.tref = None
self.create_main()
self.setUpdate()
self.setup_graphs()
self.on_draw()
def single_graph(self):
self.fig.clear()
self.fig.subplots_adjust(right=0.8,left=0.1)
self.axes = SubplotHost(self.fig, 111)
self.fig.add_axes(self.axes)
self.vaxes = self.axes.twinx()
self.paxes = self.axes.twinx()
self.paxes.axis["right"].set_visible(False)
offset = (70, 0)
new_axisline = self.paxes.get_grid_helper().new_fixed_axis
self.paxes.axis["side"] = new_axisline(loc="right", axes=self.paxes, offset=offset)
self.paxes.axis["side"].label.set_visible(True)
self.paxes.axis["side"].set_label("Power (W)")
self.axes.set_xlabel("Time (s)")
self.axes.set_ylabel("Current (A)")
self.vaxes.set_ylabel("Voltage (V)")
self.paxes.set_ylabel("Power (W)")
def horiz_graph(self):
self.fig.clear()
self.fig.subplots_adjust(right=0.97,left=0.1, wspace=0.33)
self.axes = self.fig.add_subplot(131)
self.vaxes = self.fig.add_subplot(132)
self.paxes = self.fig.add_subplot(133)
self.axes.set_xlabel("Time (s)")
self.vaxes.set_xlabel("Time (s)")
self.paxes.set_xlabel("Time (s)")
self.axes.set_ylabel("Current (A)")
self.vaxes.set_ylabel("Voltage (V)")
self.paxes.set_ylabel("Power (W)")
def vert_graph(self):
self.fig.clear()
self.fig.subplots_adjust(right=0.97,left=0.1, hspace=0.25)
self.axes = self.fig.add_subplot(311)
self.vaxes = self.fig.add_subplot(312)
self.paxes = self.fig.add_subplot(313)
self.axes.xaxis.set_ticklabels([])
self.vaxes.xaxis.set_ticklabels([])
# self.axes.set_xlabel("Time (s)")
# self.vaxes.set_xlabel("Time (s)")
self.paxes.set_xlabel("Time (s)")
self.axes.set_ylabel("Current (A)")
self.vaxes.set_ylabel("Voltage (V)")
self.paxes.set_ylabel("Power (W)")
def create_main(self):
self.main_frame = QWidget()
# Create the mpl Figure and FigCanvas objects.
# 5x4 inches, 100 dots-per-inch
#
self.dpi = 100
self.fig = Figure((5.0, 4.0), dpi=self.dpi)
self.canvas = FigureCanvas(self.fig)
self.canvas.setParent(self.main_frame)
self.canvas.setSizePolicy(QSizePolicy.Expanding, QSizePolicy.Expanding)
self.single_graph()
hbox = QHBoxLayout()
hbox.setAlignment(Qt.AlignCenter)
# Create general settings
grid = QGridLayout()
grid.setSpacing(5)
grid.setAlignment(Qt.AlignCenter)
# Add time slider
grid.addWidget(QLabel("Interval (s)"), 1, 0)
self.timeslider = QSlider(1)
self.timeslider.setMinimum(1)
self.timeslider.setMaximum(2000)
self.timeslider.setValue(self.tinterval*1000)
self.timeslider.setTickInterval(1)
self.timeslider.setSingleStep(1)
self.connect(self.timeslider, SIGNAL('valueChanged(int)'), self.updatesliders)
grid.addWidget(self.timeslider, 2, 0)
# Add window slider
grid.addWidget(QLabel("Window size"), 3, 0)
self.windowslider = QSlider(1)
self.windowslider.setMinimum(20)
self.windowslider.setMaximum(2000)
self.windowslider.setValue(self.wsize)
self.windowslider.setTickInterval(1)
self.windowslider.setSingleStep(1)
self.connect(self.windowslider, SIGNAL('valueChanged(int)'), self.updatesliders)
grid.addWidget(self.windowslider, 4, 0)
# Add graph selector
grid.addWidget(QLabel("Graph stack"), 5, 0)
self.graphselect = QComboBox()
self.graphselect.addItems(["Combined", "Horizontal", "Vertical"])
self.graphselect.setCurrentIndex(0)
self.connect(self.graphselect, SIGNAL('currentIndexChanged(int)'), self.changegraph)
grid.addWidget(self.graphselect, 6,0)
box = QGroupBox("General")
box.setLayout(grid)
box.setSizePolicy(QSizePolicy.Fixed, QSizePolicy.Fixed)
hbox.addWidget(box)
self.controls = collections.defaultdict(list)
for mp in ["1", "2", "3", "Self"]:
grid = QGridLayout()
grid.setSpacing(5)
grid.setAlignment(Qt.AlignCenter)
combo = QComboBox()
combo.addItems(["0.05", "0.5", "1", "5"])
if mp == "Self":
combo.setCurrentIndex(1)
combo.setEnabled(False)
else:
combo.setCurrentIndex(2)
self.controls[mp].append(combo)
grid.addWidget(QLabel("Resistor"), 1, 0)
grid.addWidget(combo, 1, 1, alignment=Qt.AlignCenter)
cb = QCheckBox()
self.controls[mp].append(cb)
grid.addWidget(QLabel("Plot Current"), 2, 0)
grid.addWidget(cb, 2, 1, alignment=Qt.AlignCenter)
cb = QCheckBox()
self.controls[mp].append(cb)
grid.addWidget(QLabel("Plot Voltage"), 3, 0)
grid.addWidget(cb, 3, 1, alignment=Qt.AlignCenter)
cb = QCheckBox()
self.controls[mp].append(cb)
grid.addWidget(QLabel("Plot Power"), 4, 0)
grid.addWidget(cb, 4, 1, alignment=Qt.AlignCenter)
l = QLineEdit()
l.setMinimumWidth(10)
l.setMaximumWidth(50)
self.controls[mp].append(l)
grid.addWidget(QLabel("Label"), 5, 0)
grid.addWidget(l, 5, 1, alignment=Qt.AlignCenter)
box = QGroupBox(mp)
box.setLayout(grid)
box.setSizePolicy(QSizePolicy.Maximum, QSizePolicy.Maximum)
hbox.addWidget(box)
vbox = QVBoxLayout()
vbox.addWidget(self.canvas)
# vbox.addWidget(self.mpl_toolbar)
vbox.addLayout(hbox)
self.main_frame.setLayout(vbox)
self.setCentralWidget(self.main_frame)
def updatesliders(self):
self.tinterval = self.timeslider.value() / 1000.
self.wsize = self.windowslider.value()
print self.tinterval, self.wsize
self.setUpdate()
print self.tinterval
def changegraph(self):
ind = self.graphselect.currentIndex()
self.plots = []
if ind == 0:
self.single_graph()
elif ind == 1:
self.horiz_graph()
else:
self.vert_graph()
def on_draw(self):
mintimes = []
maxtimes = []
maxps = [0]
minps = [10]
maxis = [0]
minis = [10]
maxvs = [0]
minvs = [10]
for p in self.plots:
p.remove()
self.plots = []
n_points = [0,0,0]
for s in self.state.values():
n_points[0] += s[1]
n_points[1] += s[2]
n_points[2] += s[3]
linestyles = [["-","-","-","-"], ["-","-","-","-"], ["-","-","-","-"]]
# for i in range(3):
# if n_points[i] > 1:
# linestyles[i] = ["-*", "-+", "-s", "-"]
for i,(mp, vals) in enumerate(sorted(self.data.items())):
# Calculate the number of samples in the window
n = int(len(filter(lambda x: x >= vals["xdata"][-1] - self.tinterval*self.wsize, vals['xdata']))*1.1)
if self.state[mp][1]:
p1, = self.axes.plot(vals["xdata"], vals["idata"], linestyles[0][i], color='g')
self.plots.append(p1)
maxis.append(max(vals["idata"][-n:]))
minis.append(min(vals["idata"][-n:]))
if self.state[mp][2]:
p1, = self.vaxes.plot(vals["xdata"], vals["vdata"], linestyles[1][i], color='b')
self.plots.append(p1)
maxvs.append(max(vals["vdata"][-n:]))
minvs.append(min(vals["vdata"][-n:]))
if self.state[mp][3]:
p1, = self.paxes.plot(vals["xdata"], vals["pdata"], linestyles[2][i], color='r')
self.plots.append(p1)
maxps.append(max(vals["pdata"][-n:]))
minps.append(min(vals["pdata"][-n:]))
if self.state[mp][1] or self.state[mp][2] or self.state[mp][3]:
maxtimes.append(max(vals["xdata"]))
mintimes.append(min(vals["xdata"]))
mmtimes = self.tinterval * self.wsize
if len(maxtimes) != 0 and max(maxtimes) > mmtimes:
mmtimes = max(maxtimes)
self.axes.set_xlim([ mmtimes - self.tinterval*self.wsize, mmtimes])
self.vaxes.set_xlim([ mmtimes - self.tinterval*self.wsize, mmtimes])
self.paxes.set_xlim([ mmtimes - self.tinterval*self.wsize, mmtimes])
toff = self.tinterval*self.wsize * 0.1
poff = (max(maxps) - min(minps)) *0.1
self.paxes.set_ylim([min(minps)-poff, max(maxps)+poff])
ioff = (max(maxis) - min(minis)) *0.1
self.axes.set_ylim([min(minis) - ioff, max(maxis) + ioff])
voff = (max(maxvs) - min(minvs)) *0.1
self.vaxes.set_ylim([min(minvs)-voff, max(maxvs)+voff])
bbox_props = dict(boxstyle="round", fc="w", ec="0.5", alpha=0.8)
for i,(mp, vals) in enumerate(sorted(self.data.items())):
if self.state[mp][1]:
l = self.axes.text(vals["xdata"][-1] - toff/2, np.mean(vals["idata"][-self.wsize/10:]), self.state[mp][4], ha="right", bbox=bbox_props)
self.plots.append(l)
if self.state[mp][2]:
l = self.vaxes.text(vals["xdata"][-1] - toff/2, np.mean(vals["vdata"][-self.wsize/10:]), self.state[mp][4], ha="right", bbox=bbox_props)
self.plots.append(l)
if self.state[mp][3]:
l = self.paxes.text(vals["xdata"][-1] - toff/2, np.mean(vals["pdata"][-self.wsize/10:]), self.state[mp][4], ha="right", bbox=bbox_props)
self.plots.append(l)
self.canvas.draw()
def setup_graphs(self):
self.data = {
"1": {"xdata": [],
"idata": [],
"pdata": [],
"vdata": []},
"2": {"xdata": [],
"idata": [],
"pdata": [],
"vdata": []},
"3": {"xdata": [],
"idata": [],
"pdata": [],
"vdata": []},
"Self": {"xdata": [],
"idata": [],
"pdata": [],
"vdata": []},
}
self.state = {
"1": [2, False, False, False, "1"],
"2": [2, False, False, False, "2"],
"3": [2, False, False, False, "3"],
"Self": [2, False, False, False, "Self"],
}
self.plots = []
def setUpdate(self):
self.timer = QTimer()
self.timer.setSingleShot(False)
self.timer.timeout.connect(self.update)
self.timer.start(self.tinterval * 1000)
def getState(self):
state = collections.defaultdict(list)
for mp, vals in self.controls.items():
for i, control in enumerate(vals):
if i == 0:
state[mp].append(control.currentIndex())
elif i == 4:
state[mp].append(control.text())
else:
state[mp].append(control.checkState() == 2)
return state
def update(self):
state = self.getState()
self.ctime += self.tinterval
disabled_tref = None
measurements = {}
first_on = None
for i, mp in enumerate(sorted(state.keys())):
self.em.measurement_params[i+1]['resistor'] = [0.05, 0.5, 1.0, 5.0][state[mp][0]]
stateChange = False
# Check we need to enable or disable a measurement point
if bool(sum(state[mp][1:4])) != bool(sum(self.state[mp][1:4])):
stateChange = True
if bool(sum(state[mp][1:4])):
self.em.enableMeasurementPoint(i+1)
self.em.start(i+1)
self.data[mp]['xdata'] = []
self.data[mp]['idata'] = []
self.data[mp]['vdata'] = []
self.data[mp]['pdata'] = []
else:
self.em.stop(i+1)
self.em.disableMeasurementPoint(i+1)
if mp == self.tref:
disabled_tref = mp
if bool(sum(state[mp][1:4])) or stateChange:
m = self.em.getInstantaneous(i+1)
measurements[mp] = m
if first_on is None and mp != disabled_tref:
first_on = mp
if self.tref is None or first_on is None:
self.tref = first_on
if disabled_tref is not None and first_on is not None:
self.toffset += (measurements[self.tref][4] - measurements[first_on][4]) * 2. / 168000000*2.
self.tref = first_on
if self.tref is not None:
base_t = measurements[self.tref][4]* 2. / 168000000*2. + self.toffset
else:
base_t = 0
problem = False
for mp, m in measurements.items():
i = {"1": 1, "2":2, "3":3, "Self":4}[mp]
res = self.em.measurement_params[i]['resistor']
vref = self.em.measurement_params[i]['vref']
gain = self.em.measurement_params[i]['gain']
v = float(vref) / 4096. * m[2] * 2
c = float(vref) / gain / res / 4096. * m[3]
p = v * c
t = base_t
if self.data[mp]['xdata'] and t < self.data[mp]['xdata'][-1]:
problem=True
self.data[mp]['xdata'].append(t)
self.data[mp]['idata'].append(c)
self.data[mp]['vdata'].append(v)
self.data[mp]['pdata'].append(p)
self.data[mp]['xdata'] = self.data[mp]['xdata'][-self.wsize:]
self.data[mp]['idata'] = self.data[mp]['idata'][-self.wsize:]
self.data[mp]['vdata'] = self.data[mp]['vdata'][-self.wsize:]
self.data[mp]['pdata'] = self.data[mp]['pdata'][-self.wsize:]
if problem:
self.data = {
"1": {"xdata": [],
"idata": [],
"pdata": [],
"vdata": []},
"2": {"xdata": [],
"idata": [],
"pdata": [],
"vdata": []},
"3": {"xdata": [],
"idata": [],
"pdata": [],
"vdata": []},
"Self": {"xdata": [],
"idata": [],
"pdata": [],
"vdata": []},
}
self.state = state
self.on_draw()
def main(serial):
app = QApplication(sys.argv)
app.setStyle("plastique")
em = pyenergy.EnergyMonitor(serial)
em.connect()
form = Graph(em)
form.show()
app.exec_()
if __name__ == "__main__":
main("EE00")
| gpl-3.0 |
csieg/ardupilot | libraries/AP_Math/tools/geodesic_grid/plot.py | 110 | 2876 | # Copyright (C) 2016 Intel Corporation. All rights reserved.
#
# This file is free software: you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the
# Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This file is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program. If not, see <http://www.gnu.org/licenses/>.
import matplotlib.pyplot as plt
import matplotlib.patches as mpatches
from mpl_toolkits.mplot3d import Axes3D
from mpl_toolkits.mplot3d.art3d import Poly3DCollection
import icosahedron as ico
import grid
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
ax.set_xlim3d(-2, 2)
ax.set_ylim3d(-2, 2)
ax.set_zlim3d(-2, 2)
ax.set_xlabel('x')
ax.set_ylabel('y')
ax.set_zlabel('z')
ax.invert_zaxis()
ax.invert_xaxis()
ax.set_aspect('equal')
added_polygons = set()
added_sections = set()
def polygons(polygons):
for p in polygons:
polygon(p)
def polygon(polygon):
added_polygons.add(polygon)
def section(s):
added_sections.add(s)
def sections(sections):
for s in sections:
section(s)
def show(subtriangles=False):
polygons = []
facecolors = []
triangles_indexes = set()
subtriangle_facecolors = (
'#CCCCCC',
'#CCE5FF',
'#E5FFCC',
'#FFCCCC',
)
if added_sections:
subtriangles = True
for p in added_polygons:
try:
i = ico.triangles.index(p)
except ValueError:
polygons.append(p)
continue
if subtriangles:
sections(range(i * 4, i * 4 + 4))
else:
triangles_indexes.add(i)
polygons.append(p)
facecolors.append('#DDDDDD')
for s in added_sections:
triangles_indexes.add(int(s / 4))
subtriangle_index = s % 4
polygons.append(grid.section_triangle(s))
facecolors.append(subtriangle_facecolors[subtriangle_index])
ax.add_collection3d(Poly3DCollection(
polygons,
facecolors=facecolors,
edgecolors="#777777",
))
for i in triangles_indexes:
t = ico.triangles[i]
mx = my = mz = 0
for x, y, z in t:
mx += x
my += y
mz += z
ax.text(mx / 2.6, my / 2.6, mz / 2.6, i, color='#444444')
if subtriangles:
ax.legend(
handles=tuple(
mpatches.Patch(color=c, label='Sub-triangle #%d' % i)
for i, c in enumerate(subtriangle_facecolors)
),
)
plt.show()
| gpl-3.0 |
qilicun/python | python3/matplotlib/tex_demo.py | 5 | 1061 | """
Demo of TeX rendering.
You can use TeX to render all of your matplotlib text if the rc
parameter text.usetex is set. This works currently on the agg and ps
backends, and requires that you have tex and the other dependencies
described at http://matplotlib.sf.net/matplotlib.texmanager.html
properly installed on your system. The first time you run a script
you will see a lot of output from tex and associated tools. The next
time, the run may be silent, as a lot of the information is cached in
~/.tex.cache
"""
import numpy as np
import matplotlib.pyplot as plt
# Example data
t = np.arange(0.0, 1.0 + 0.01, 0.01)
s = np.cos(4 * np.pi * t) + 2
plt.rc('text', usetex=True)
plt.rc('font', family='serif')
plt.plot(t, s)
plt.xlabel(r'\textbf{time} (s)')
plt.ylabel(r'\textit{voltage} (mV)',fontsize=16)
plt.title(r"\TeX\ is Number "
r"$\displaystyle\sum_{n=1}^\infty\frac{-e^{i\pi}}{2^n}$!",
fontsize=16, color='gray')
# Make room for the ridiculously large title.
plt.subplots_adjust(top=0.8)
plt.savefig('tex_demo')
plt.show()
| gpl-3.0 |
fivejjs/pybasicbayes | pybasicbayes/distributions/gaussian.py | 2 | 51874 | from __future__ import division
__all__ = \
['Gaussian', 'GaussianFixedMean', 'GaussianFixedCov', 'GaussianFixed',
'GaussianNonConj', 'DiagonalGaussian', 'DiagonalGaussianNonconjNIG',
'IsotropicGaussian', 'ScalarGaussianNIX', 'ScalarGaussianNonconjNIX',
'ScalarGaussianNonconjNIG', 'ScalarGaussianFixedvar']
import numpy as np
from numpy import newaxis as na
from numpy.core.umath_tests import inner1d
import scipy.linalg
import scipy.stats as stats
import scipy.special as special
import copy
from pybasicbayes.abstractions import GibbsSampling, MeanField, \
MeanFieldSVI, Collapsed, MaxLikelihood, MAP, Tempering
from pybasicbayes.distributions.meta import _FixedParamsMixin
from pybasicbayes.util.stats import sample_niw, invwishart_entropy, \
sample_invwishart, invwishart_log_partitionfunction, \
getdatasize, flattendata, getdatadimension, \
combinedata, multivariate_t_loglik, gi
weps = 1e-12
class _GaussianBase(object):
@property
def params(self):
return dict(mu=self.mu, sigma=self.sigma)
@property
def D(self):
return self.mu.shape[0]
### internals
def getsigma(self):
return self._sigma
def setsigma(self,sigma):
self._sigma = sigma
self._sigma_chol = None
sigma = property(getsigma,setsigma)
@property
def sigma_chol(self):
if not hasattr(self,'_sigma_chol') or self._sigma_chol is None:
self._sigma_chol = np.linalg.cholesky(self.sigma)
return self._sigma_chol
### distribution stuff
def rvs(self,size=None):
size = 1 if size is None else size
size = size + (self.mu.shape[0],) if isinstance(size,tuple) \
else (size,self.mu.shape[0])
return self.mu + np.random.normal(size=size).dot(self.sigma_chol.T)
def log_likelihood(self,x):
try:
mu, D = self.mu, self.D
sigma_chol = self.sigma_chol
bads = np.isnan(np.atleast_2d(x)).any(axis=1)
x = np.nan_to_num(x).reshape((-1,D)) - mu
xs = scipy.linalg.solve_triangular(sigma_chol,x.T,lower=True)
out = -1./2. * inner1d(xs.T,xs.T) - D/2*np.log(2*np.pi) \
- np.log(sigma_chol.diagonal()).sum()
out[bads] = 0
return out
except np.linalg.LinAlgError:
# NOTE: degenerate distribution doesn't have a density
return np.repeat(-np.inf,x.shape[0])
### plotting
# TODO making animations, this seems to generate an extra notebook figure
_scatterplot = None
_parameterplot = None
def plot(self,ax=None,data=None,indices=None,color='b',
plot_params=True,label='',alpha=1.,
update=False,draw=True):
import matplotlib.pyplot as plt
from pybasicbayes.util.plot import project_data, \
plot_gaussian_projection, plot_gaussian_2D
ax = ax if ax else plt.gca()
D = self.D
if data is not None:
data = flattendata(data)
if data is not None:
if D > 2:
plot_basis = np.random.RandomState(seed=0).randn(2,D)
data = project_data(data,plot_basis)
if update and self._scatterplot is not None:
self._scatterplot.set_offsets(data)
self._scatterplot.set_color(color)
else:
self._scatterplot = ax.scatter(
data[:,0],data[:,1],marker='.',color=color)
if plot_params:
if D > 2:
plot_basis = np.random.RandomState(seed=0).randn(2,D)
self._parameterplot = \
plot_gaussian_projection(
self.mu,self.sigma,plot_basis,
color=color,label=label,alpha=min(1-1e-3,alpha),
ax=ax, artists=self._parameterplot if update else None)
else:
self._parameterplot = \
plot_gaussian_2D(
self.mu,self.sigma,color=color,label=label,
alpha=min(1-1e-3,alpha), ax=ax,
artists=self._parameterplot if update else None)
if draw:
plt.draw()
return [self._scatterplot] + list(self._parameterplot)
def to_json_dict(self):
D = self.mu.shape[0]
assert D == 2
U,s,_ = np.linalg.svd(self.sigma)
U /= np.linalg.det(U)
theta = np.arctan2(U[0,0],U[0,1])*180/np.pi
return {'x':self.mu[0],'y':self.mu[1],'rx':np.sqrt(s[0]),
'ry':np.sqrt(s[1]), 'theta':theta}
class Gaussian(
_GaussianBase, GibbsSampling, MeanField, MeanFieldSVI,
Collapsed, MAP, MaxLikelihood):
'''
Multivariate Gaussian distribution class.
NOTE: Only works for 2 or more dimensions. For a scalar Gaussian, use a
scalar class. Uses a conjugate Normal/Inverse-Wishart prior.
Hyperparameters mostly follow Gelman et al.'s notation in Bayesian Data
Analysis:
nu_0, sigma_0, mu_0, kappa_0
Parameters are mean and covariance matrix:
mu, sigma
'''
def __init__(
self, mu=None, sigma=None,
mu_0=None, sigma_0=None, kappa_0=None, nu_0=None):
self.mu = mu
self.sigma = sigma
self.mu_0 = self.mu_mf = mu_0
self.sigma_0 = self.sigma_mf = sigma_0
self.kappa_0 = self.kappa_mf = kappa_0
self.nu_0 = self.nu_mf = nu_0
# NOTE: resampling will set mu_mf and sigma_mf if necessary
if mu is sigma is None \
and not any(_ is None for _ in (mu_0,sigma_0,kappa_0,nu_0)):
self.resample() # initialize from prior
if mu is not None and sigma is not None \
and not any(_ is None for _ in (mu_0,sigma_0,kappa_0,nu_0)):
self.mu_mf = mu
self.sigma_mf = sigma * (self.nu_0 - self.mu_mf.shape[0] - 1)
@property
def hypparams(self):
return dict(
mu_0=self.mu_0,sigma_0=self.sigma_0,
kappa_0=self.kappa_0,nu_0=self.nu_0)
@property
def natural_hypparam(self):
return self._standard_to_natural(
self.mu_0,self.sigma_0,self.kappa_0,self.nu_0)
@natural_hypparam.setter
def natural_hypparam(self,natparam):
self.mu_0, self.sigma_0, self.kappa_0, self.nu_0 = \
self._natural_to_standard(natparam)
def _standard_to_natural(self,mu_mf,sigma_mf,kappa_mf,nu_mf):
D = sigma_mf.shape[0]
out = np.zeros((D+2,D+2))
out[:D,:D] = sigma_mf + kappa_mf * np.outer(mu_mf,mu_mf)
out[:D,-2] = out[-2,:D] = kappa_mf * mu_mf
out[-2,-2] = kappa_mf
out[-1,-1] = nu_mf + 2 + D
return out
def _natural_to_standard(self,natparam):
D = natparam.shape[0]-2
A = natparam[:D,:D]
b = natparam[:D,-2]
c = natparam[-2,-2]
d = natparam[-1,-1]
return b/c, A - np.outer(b,b)/c, c, d - 2 - D
@property
def num_parameters(self):
D = self.D
return D*(D+1)/2
@property
def D(self):
if self.mu is not None:
return self.mu.shape[0]
elif self.mu_0 is not None:
return self.mu_0.shape[0]
def _get_statistics(self,data,D=None):
if D is None:
D = self.D if self.D is not None else getdatadimension(data)
out = np.zeros((D+2,D+2))
if isinstance(data,np.ndarray):
out[:D,:D] = data.T.dot(data)
out[-2,:D] = out[:D,-2] = data.sum(0)
out[-2,-2] = out[-1,-1] = data.shape[0]
return out
else:
return sum(map(self._get_statistics,data),out)
def _get_weighted_statistics(self,data,weights,D=None):
D = getdatadimension(data) if D is None else D
out = np.zeros((D+2,D+2))
if isinstance(data,np.ndarray):
out[:D,:D] = data.T.dot(weights[:,na]*data)
out[-2,:D] = out[:D,-2] = weights.dot(data)
out[-2,-2] = out[-1,-1] = weights.sum()
return out
else:
return sum(map(self._get_weighted_statistics,data,weights),out)
def _get_empty_statistics(self, D):
out = np.zeros((D+2,D+2))
return out
def empirical_bayes(self,data):
self.natural_hypparam = self._get_statistics(data)
self.resample() # intialize from prior given new hyperparameters
return self
### Gibbs sampling
def resample(self,data=[]):
D = len(self.mu_0)
self.mu, self.sigma = \
sample_niw(*self._natural_to_standard(
self.natural_hypparam + self._get_statistics(data,D)))
# NOTE: next lines let Gibbs sampling initialize mean
nu = self.nu_mf if hasattr(self,'nu_mf') and self.nu_mf \
else self.nu_0
self.mu_mf, self._sigma_mf = self.mu, self.sigma * (nu - D - 1)
return self
def copy_sample(self):
new = copy.copy(self)
new.mu = self.mu.copy()
new.sigma = self.sigma.copy()
return new
### Mean Field
def _resample_from_mf(self):
self.mu, self.sigma = \
sample_niw(*self._natural_to_standard(
self.mf_natural_hypparam))
return self
def meanfieldupdate(self,data,weights):
D = len(self.mu_0)
self.mf_natural_hypparam = \
self.natural_hypparam + self._get_weighted_statistics(
data, weights, D)
def meanfield_sgdstep(self,data,weights,minibatchfrac,stepsize):
D = len(self.mu_0)
self.mf_natural_hypparam = \
(1-stepsize) * self.mf_natural_hypparam + stepsize * (
self.natural_hypparam
+ 1./minibatchfrac
* self._get_weighted_statistics(data,weights,D))
@property
def mf_natural_hypparam(self):
return self._standard_to_natural(
self.mu_mf,self.sigma_mf,self.kappa_mf,self.nu_mf)
@mf_natural_hypparam.setter
def mf_natural_hypparam(self,natparam):
self.mu_mf, self.sigma_mf, self.kappa_mf, self.nu_mf = \
self._natural_to_standard(natparam)
# NOTE: next line is for plotting
self.mu, self.sigma = \
self.mu_mf, self.sigma_mf/(self.nu_mf - self.mu_mf.shape[0] - 1)
@property
def sigma_mf(self):
return self._sigma_mf
@sigma_mf.setter
def sigma_mf(self,val):
self._sigma_mf = val
self._sigma_mf_chol = None
@property
def sigma_mf_chol(self):
if self._sigma_mf_chol is None:
self._sigma_mf_chol = np.linalg.cholesky(self.sigma_mf)
return self._sigma_mf_chol
def get_vlb(self):
D = len(self.mu_0)
loglmbdatilde = self._loglmbdatilde()
# see Eq. 10.77 in Bishop
q_entropy = -0.5 * (loglmbdatilde + D * (np.log(self.kappa_mf/(2*np.pi))-1)) \
+ invwishart_entropy(self.sigma_mf,self.nu_mf)
# see Eq. 10.74 in Bishop, we aren't summing over K
p_avgengy = 0.5 * (D * np.log(self.kappa_0/(2*np.pi)) + loglmbdatilde
- D*self.kappa_0/self.kappa_mf - self.kappa_0*self.nu_mf*
np.dot(self.mu_mf -
self.mu_0,np.linalg.solve(self.sigma_mf,self.mu_mf - self.mu_0))) \
+ invwishart_log_partitionfunction(self.sigma_0,self.nu_0) \
+ (self.nu_0 - D - 1)/2*loglmbdatilde - 1/2*self.nu_mf \
* np.linalg.solve(self.sigma_mf,self.sigma_0).trace()
return p_avgengy + q_entropy
def expected_log_likelihood(self,x):
mu_n, kappa_n, nu_n = self.mu_mf, self.kappa_mf, self.nu_mf
D = len(mu_n)
x = np.reshape(x,(-1,D)) - mu_n # x is now centered
xs = np.linalg.solve(self.sigma_mf_chol,x.T)
# see Eqs. 10.64, 10.67, and 10.71 in Bishop
return self._loglmbdatilde()/2 - D/(2*kappa_n) - nu_n/2 * \
inner1d(xs.T,xs.T) - D/2*np.log(2*np.pi)
def _loglmbdatilde(self):
# see Eq. 10.65 in Bishop
D = len(self.mu_0)
chol = self.sigma_mf_chol
return special.digamma((self.nu_mf-np.arange(D))/2.).sum() \
+ D*np.log(2) - 2*np.log(chol.diagonal()).sum()
### Collapsed
def log_marginal_likelihood(self,data):
n, D = getdatasize(data), len(self.mu_0)
return self._log_partition_function(
*self._natural_to_standard(
self.natural_hypparam + self._get_statistics(data,D))) \
- self._log_partition_function(self.mu_0,self.sigma_0,self.kappa_0,self.nu_0) \
- n*D/2 * np.log(2*np.pi)
def _log_partition_function(self,mu,sigma,kappa,nu):
D = len(mu)
chol = np.linalg.cholesky(sigma)
return nu*D/2*np.log(2) + special.multigammaln(nu/2,D) + D/2*np.log(2*np.pi/kappa) \
- nu*np.log(chol.diagonal()).sum()
def log_predictive_studentt_datapoints(self,datapoints,olddata):
D = len(self.mu_0)
mu_n, sigma_n, kappa_n, nu_n = \
self._natural_to_standard(
self.natural_hypparam + self._get_statistics(olddata,D))
return multivariate_t_loglik(
datapoints,nu_n-D+1,mu_n,(kappa_n+1)/(kappa_n*(nu_n-D+1))*sigma_n)
def log_predictive_studentt(self,newdata,olddata):
newdata = np.atleast_2d(newdata)
return sum(self.log_predictive_studentt_datapoints(
d,combinedata((olddata,newdata[:i])))[0] for i,d in enumerate(newdata))
### Max likelihood
def max_likelihood(self,data,weights=None):
D = getdatadimension(data)
if weights is None:
statmat = self._get_statistics(data,D)
else:
statmat = self._get_weighted_statistics(data,weights,D)
n, x, xxt = statmat[-1,-1], statmat[-2,:D], statmat[:D,:D]
# this SVD is necessary to check if the max likelihood solution is
# degenerate, which can happen in the EM algorithm
if n < D or (np.linalg.svd(xxt,compute_uv=False) > 1e-6).sum() < D:
self.broken = True
self.mu = 99999999*np.ones(D)
self.sigma = np.eye(D)
else:
self.mu = x/n
self.sigma = xxt/n - np.outer(self.mu,self.mu)
return self
def MAP(self,data,weights=None):
D = getdatadimension(data)
# max likelihood with prior pseudocounts included in data
if weights is None:
statmat = self._get_statistics(data)
else:
statmat = self._get_weighted_statistics(data,weights)
statmat += self.natural_hypparam
n, x, xxt = statmat[-1,-1], statmat[-2,:D], statmat[:D,:D]
self.mu = x/n
self.sigma = xxt/n - np.outer(self.mu,self.mu)
return self
class GaussianFixedMean(_GaussianBase, GibbsSampling, MaxLikelihood):
def __init__(self,mu=None,sigma=None,nu_0=None,lmbda_0=None):
self.sigma = sigma
self.mu = mu
self.nu_0 = nu_0
self.lmbda_0 = lmbda_0
if sigma is None and not any(_ is None for _ in (nu_0,lmbda_0)):
self.resample() # initialize from prior
@property
def hypparams(self):
return dict(nu_0=self.nu_0,lmbda_0=self.lmbda_0)
@property
def num_parameters(self):
D = len(self.mu)
return D*(D+1)/2
def _get_statistics(self,data):
n = getdatasize(data)
if n > 1e-4:
if isinstance(data,np.ndarray):
centered = data[gi(data)] - self.mu
sumsq = centered.T.dot(centered)
n = len(centered)
else:
sumsq = sum((d[gi(d)]-self.mu).T.dot(d[gi(d)]-self.mu) for d in data)
else:
sumsq = None
return n, sumsq
def _get_weighted_statistics(self,data,weights):
if isinstance(data,np.ndarray):
neff = weights.sum()
if neff > weps:
centered = data - self.mu
sumsq = centered.T.dot(weights[:,na]*centered)
else:
sumsq = None
else:
neff = sum(w.sum() for w in weights)
if neff > weps:
sumsq = sum((d-self.mu).T.dot(w[:,na]*(d-self.mu)) for w,d in zip(weights,data))
else:
sumsq = None
return neff, sumsq
def _posterior_hypparams(self,n,sumsq):
nu_0, lmbda_0 = self.nu_0, self.lmbda_0
if n > 1e-4:
nu_0 = nu_0 + n
sigma_n = self.lmbda_0 + sumsq
return sigma_n, nu_0
else:
return lmbda_0, nu_0
### Gibbs sampling
def resample(self, data=[]):
self.sigma = sample_invwishart(*self._posterior_hypparams(
*self._get_statistics(data)))
return self
### Max likelihood
def max_likelihood(self,data,weights=None):
D = getdatadimension(data)
if weights is None:
n, sumsq = self._get_statistics(data)
else:
n, sumsq = self._get_weighted_statistics(data,weights)
if n < D or (np.linalg.svd(sumsq,compute_uv=False) > 1e-6).sum() < D:
# broken!
self.sigma = np.eye(D)*1e-9
self.broken = True
else:
self.sigma = sumsq/n
return self
class GaussianFixedCov(_GaussianBase, GibbsSampling, MaxLikelihood):
# See Gelman's Bayesian Data Analysis notation around Eq. 3.18, p. 85
# in 2nd Edition. We replaced \Lambda_0 with sigma_0 since it is a prior
# *covariance* matrix rather than a precision matrix.
def __init__(self,mu=None,sigma=None,mu_0=None,sigma_0=None):
self.mu = mu
self.sigma = sigma
self.mu_0 = mu_0
self.sigma_0 = sigma_0
if mu is None and not any(_ is None for _ in (mu_0,sigma_0)):
self.resample()
@property
def hypparams(self):
return dict(mu_0=self.mu_0,sigma_0=self.sigma_0)
@property
def sigma_inv(self):
if not hasattr(self,'_sigma_inv'):
self._sigma_inv = np.linalg.inv(self.sigma)
return self._sigma_inv
@property
def sigma_inv_0(self):
if not hasattr(self,'_sigma_inv_0'):
self._sigma_inv_0 = np.linalg.inv(self.sigma_0)
return self._sigma_inv_0
@property
def num_parameters(self):
return len(self.mu)
def _get_statistics(self,data):
n = getdatasize(data)
if n > 0:
if isinstance(data,np.ndarray):
xbar = data.mean(0)
else:
xbar = sum(d.sum(0) for d in data) / n
else:
xbar = None
return n, xbar
def _get_weighted_statistics(self,data,weights):
if isinstance(data,np.ndarray):
neff = weights.sum()
if neff > weps:
xbar = weights.dot(data) / neff
else:
xbar = None
else:
neff = sum(w.sum() for w in weights)
if neff > weps:
xbar = sum(w.dot(d) for w,d in zip(weights,data)) / neff
else:
xbar = None
return neff, xbar
def _posterior_hypparams(self,n,xbar):
# It seems we should be working with lmbda and sigma inv (unless lmbda
# is a covariance, not a precision)
sigma_inv, mu_0, sigma_inv_0 = self.sigma_inv, self.mu_0, self.sigma_inv_0
if n > 0:
sigma_inv_n = n*sigma_inv + sigma_inv_0
mu_n = np.linalg.solve(
sigma_inv_n, sigma_inv_0.dot(mu_0) + n*sigma_inv.dot(xbar))
return mu_n, sigma_inv_n
else:
return mu_0, sigma_inv_0
### Gibbs sampling
def resample(self,data=[]):
mu_n, sigma_n_inv = self._posterior_hypparams(*self._get_statistics(data))
D = len(mu_n)
L = np.linalg.cholesky(sigma_n_inv)
self.mu = scipy.linalg.solve_triangular(L,np.random.normal(size=D),lower=True) \
+ mu_n
return self
### Max likelihood
def max_likelihood(self,data,weights=None):
if weights is None:
n, xbar = self._get_statistics(data)
else:
n, xbar = self._get_weighted_statistics(data,weights)
self.mu = xbar
return self
class GaussianFixed(_FixedParamsMixin, Gaussian):
def __init__(self,mu,sigma):
self.mu = mu
self.sigma = sigma
class GaussianNonConj(_GaussianBase, GibbsSampling):
def __init__(self,mu=None,sigma=None,
mu_0=None,mu_lmbda_0=None,nu_0=None,sigma_lmbda_0=None):
self._sigma_distn = GaussianFixedMean(mu=mu,
nu_0=nu_0,lmbda_0=sigma_lmbda_0,sigma=sigma)
self._mu_distn = GaussianFixedCov(sigma=self._sigma_distn.sigma,
mu_0=mu_0, sigma_0=mu_lmbda_0,mu=mu)
self._sigma_distn.mu = self._mu_distn.mu
@property
def hypparams(self):
d = self._mu_distn.hypparams
d.update(**self._sigma_distn.hypparams)
return d
def _get_mu(self):
return self._mu_distn.mu
def _set_mu(self,val):
self._mu_distn.mu = val
self._sigma_distn.mu = val
mu = property(_get_mu,_set_mu)
def _get_sigma(self):
return self._sigma_distn.sigma
def _set_sigma(self,val):
self._sigma_distn.sigma = val
self._mu_distn.sigma = val
sigma = property(_get_sigma,_set_sigma)
### Gibbs sampling
def resample(self,data=[],niter=1):
if getdatasize(data) == 0:
niter = 1
# TODO this is kinda dumb because it collects statistics over and over
# instead of updating them...
for itr in xrange(niter):
# resample mu
self._mu_distn.sigma = self._sigma_distn.sigma
self._mu_distn.resample(data)
# resample sigma
self._sigma_distn.mu = self._mu_distn.mu
self._sigma_distn.resample(data)
return self
# TODO collapsed
class DiagonalGaussian(_GaussianBase,GibbsSampling,MaxLikelihood,MeanField,Tempering):
'''
Product of normal-inverse-gamma priors over mu (mean vector) and sigmas
(vector of scalar variances).
The prior follows
sigmas ~ InvGamma(alphas_0,betas_0) iid
mu | sigma ~ N(mu_0,1/nus_0 * diag(sigmas))
It allows placing different prior hyperparameters on different components.
'''
def __init__(self,mu=None,sigmas=None,mu_0=None,nus_0=None,alphas_0=None,betas_0=None):
# all the s's refer to the fact that these are vectors of length
# len(mu_0) OR scalars
if mu_0 is not None:
D = mu_0.shape[0]
if nus_0 is not None and \
(isinstance(nus_0,int) or isinstance(nus_0,float)):
nus_0 = nus_0*np.ones(D)
if alphas_0 is not None and \
(isinstance(alphas_0,int) or isinstance(alphas_0,float)):
alphas_0 = alphas_0*np.ones(D)
if betas_0 is not None and \
(isinstance(betas_0,int) or isinstance(betas_0,float)):
betas_0 = betas_0*np.ones(D)
self.mu_0 = self.mf_mu = mu_0
self.nus_0 = self.mf_nus = nus_0
self.alphas_0 = self.mf_alphas = alphas_0
self.betas_0 = self.mf_betas = betas_0
self.mu = mu
self.sigmas = sigmas
assert self.mu is None or (isinstance(self.mu,np.ndarray) and not isinstance(self.mu,np.ma.MaskedArray))
assert self.sigmas is None or (isinstance(self.sigmas,np.ndarray) and not isinstance(self.sigmas,np.ma.MaskedArray))
if mu is sigmas is None \
and not any(_ is None for _ in (mu_0,nus_0,alphas_0,betas_0)):
self.resample() # intialize from prior
### the basics!
@property
def parameters(self):
return self.mu, self.sigmas
@parameters.setter
def parameters(self, mu_sigmas_tuple):
(mu,sigmas) = mu_sigmas_tuple
self.mu, self.sigmas = mu, sigmas
@property
def sigma(self):
return np.diag(self.sigmas)
@sigma.setter
def sigma(self,val):
val = np.array(val)
assert val.ndim in (1,2)
if val.ndim == 1:
self.sigmas = val
else:
self.sigmas = np.diag(val)
@property
def hypparams(self):
return dict(mu_0=self.mu_0,nus_0=self.nus_0,
alphas_0=self.alphas_0,betas_0=self.betas_0)
def rvs(self,size=None):
size = np.array(size,ndmin=1)
return np.sqrt(self.sigmas)*\
np.random.normal(size=np.concatenate((size,self.mu.shape))) + self.mu
def log_likelihood(self,x,temperature=1.):
mu, sigmas, D = self.mu, self.sigmas * temperature, self.mu.shape[0]
x = np.reshape(x,(-1,D))
Js = -1./(2*sigmas)
return (np.einsum('ij,ij,j->i',x,x,Js) - np.einsum('ij,j,j->i',x,2*mu,Js)) \
+ (mu**2*Js - 1./2*np.log(2*np.pi*sigmas)).sum()
### posterior updating stuff
@property
def natural_hypparam(self):
return self._standard_to_natural(self.alphas_0,self.betas_0,self.mu_0,self.nus_0)
@natural_hypparam.setter
def natural_hypparam(self,natparam):
self.alphas_0, self.betas_0, self.mu_0, self.nus_0 = \
self._natural_to_standard(natparam)
def _standard_to_natural(self,alphas,betas,mu,nus):
return np.array([2*betas + nus * mu**2, nus*mu, nus, 2*alphas])
def _natural_to_standard(self,natparam):
nus = natparam[2]
mu = natparam[1] / nus
alphas = natparam[3]/2.
betas = (natparam[0] - nus*mu**2) / 2.
return alphas, betas, mu, nus
def _get_statistics(self,data):
if isinstance(data,np.ndarray) and data.shape[0] > 0:
data = data[gi(data)]
ns = np.repeat(*data.shape)
return np.array([
np.einsum('ni,ni->i',data,data),
np.einsum('ni->i',data),
ns,
ns,
])
else:
return sum((self._get_statistics(d) for d in data), self._empty_stats())
def _get_weighted_statistics(self,data,weights):
if isinstance(data,np.ndarray):
idx = ~np.isnan(data).any(1)
data = data[idx]
weights = weights[idx]
assert data.ndim == 2 and weights.ndim == 1 \
and data.shape[0] == weights.shape[0]
neff = np.repeat(weights.sum(),data.shape[1])
return np.array([weights.dot(data**2), weights.dot(data), neff, neff])
else:
return sum(
(self._get_weighted_statistics(d,w) for d, w in zip(data,weights)),
self._empty_stats())
def _empty_stats(self):
return np.zeros_like(self.natural_hypparam)
### Gibbs sampling
def resample(self,data=[],temperature=1.,stats=None):
stats = self._get_statistics(data) if stats is None else stats
alphas_n, betas_n, mu_n, nus_n = self._natural_to_standard(
self.natural_hypparam + stats / temperature)
D = mu_n.shape[0]
self.sigmas = 1/np.random.gamma(alphas_n,scale=1/betas_n)
self.mu = np.sqrt(self.sigmas/nus_n)*np.random.randn(D) + mu_n
assert not np.isnan(self.mu).any()
assert not np.isnan(self.sigmas).any()
# NOTE: next line is to use Gibbs sampling to initialize mean field
self.mf_mu = self.mu
assert self.sigmas.ndim == 1
return self
def copy_sample(self):
new = copy.copy(self)
new.mu = self.mu.copy()
new.sigmas = self.sigmas.copy()
return new
### max likelihood
def max_likelihood(self,data,weights=None):
if weights is None:
n, muhat, sumsq = self._get_statistics(data)
else:
n, muhat, sumsq = self._get_weighted_statistics_old(data,weights)
self.mu = muhat
self.sigmas = sumsq/n
return self
### Mean Field
@property
def mf_natural_hypparam(self):
return self._standard_to_natural(self.mf_alphas,self.mf_betas,self.mf_mu,self.mf_nus)
@mf_natural_hypparam.setter
def mf_natural_hypparam(self,natparam):
self.mf_alphas, self.mf_betas, self.mf_mu, self.mf_nus = \
self._natural_to_standard(natparam)
# NOTE: this part is for plotting
self.mu = self.mf_mu
self.sigmas = np.where(self.mf_alphas > 1,self.mf_betas / (self.mf_alphas - 1),100000)
def meanfieldupdate(self,data,weights):
self.mf_natural_hypparam = \
self.natural_hypparam + self._get_weighted_statistics(data,weights)
def meanfield_sgdstep(self,data,weights,minibatchfrac,stepsize):
self.mf_natural_hypparam = \
(1-stepsize) * self.mf_natural_hypparam + stepsize * (
self.natural_hypparam
+ 1./minibatchfrac * self._get_weighted_statistics(data,weights))
def get_vlb(self):
natparam_diff = self.natural_hypparam - self.mf_natural_hypparam
expected_stats = self._expected_statistics(
self.mf_alphas,self.mf_betas,self.mf_mu,self.mf_nus)
linear_term = sum(v1.dot(v2) for v1, v2 in zip(natparam_diff, expected_stats))
normalizer_term = \
self._log_Z(self.alphas_0,self.betas_0,self.mu_0,self.nus_0) \
- self._log_Z(self.mf_alphas,self.mf_betas,self.mf_mu,self.mf_nus)
return linear_term - normalizer_term - len(self.mf_mu)/2. * np.log(2*np.pi)
def expected_log_likelihood(self,x):
x = np.atleast_2d(x).reshape((-1,len(self.mf_mu)))
a,b,c,d = self._expected_statistics(
self.mf_alphas,self.mf_betas,self.mf_mu,self.mf_nus)
return (x**2).dot(a) + x.dot(b) + c.sum() + d.sum() \
- len(self.mf_mu)/2. * np.log(2*np.pi)
def _expected_statistics(self,alphas,betas,mu,nus):
return np.array([
-1./2 * alphas/betas,
mu * alphas/betas,
-1./2 * (1./nus + mu**2 * alphas/betas),
-1./2 * (np.log(betas) - special.digamma(alphas))])
def _log_Z(self,alphas,betas,mu,nus):
return (special.gammaln(alphas) - alphas*np.log(betas) - 1./2*np.log(nus)).sum()
# TODO meanfield
class DiagonalGaussianNonconjNIG(_GaussianBase,GibbsSampling):
'''
Product of normal priors over mu and product of gamma priors over sigmas.
Note that while the conjugate prior in DiagonalGaussian is of the form
p(mu,sigmas), this prior is of the form p(mu)p(sigmas). Therefore its
resample() update has to perform inner iterations.
The prior follows
mu ~ N(mu_0,diag(sigmas_0))
sigmas ~ InvGamma(alpha_0,beta_0) iid
'''
def __init__(self,mu=None,sigmas=None,mu_0=None,sigmas_0=None,alpha_0=None,beta_0=None,
niter=20):
self.mu_0, self.sigmas_0 = mu_0, sigmas_0
self.alpha_0, self.beta_0 = alpha_0, beta_0
self.niter = niter
if None in (mu,sigmas):
self.resample()
else:
self.mu, self.sigmas = mu, sigmas
@property
def hypparams(self):
return dict(mu_0=self.mu_0,sigmas_0=self.sigmas_0,alpha_0=self.alpha_0,beta_0=self.beta_0)
# TODO next three methods are copied from DiagonalGaussian, factor them out
@property
def sigma(self):
return np.diag(self.sigmas)
def rvs(self,size=None):
size = np.array(size,ndmin=1)
return np.sqrt(self.sigmas)*\
np.random.normal(size=np.concatenate((size,self.mu.shape))) + self.mu
def log_likelihood(self,x):
mu, sigmas, D = self.mu, self.sigmas, self.mu.shape[0]
x = np.reshape(x,(-1,D))
Js = -1./(2*sigmas)
return (np.einsum('ij,ij,j->i',x,x,Js) - np.einsum('ij,j,j->i',x,2*mu,Js)) \
+ (mu**2*Js - 1./2*np.log(2*np.pi*sigmas)).sum()
def resample(self,data=[]):
n, y, ysq = self._get_statistics(data)
if n == 0:
self.mu = np.sqrt(self.sigmas_0) * np.random.randn(self.mu_0.shape[0]) + self.mu_0
self.sigmas = 1./np.random.gamma(self.alpha_0,scale=1./self.beta_0)
else:
for itr in xrange(self.niter):
sigmas_n = 1./(1./self.sigmas_0 + n / self.sigmas)
mu_n = (self.mu_0 / self.sigmas_0 + y / self.sigmas) * sigmas_n
self.mu = np.sqrt(sigmas_n) * np.random.randn(mu_n.shape[0]) + mu_n
alphas_n = self.alpha_0 + 1./2*n
betas_n = self.beta_0 + 1./2*(ysq + n*self.mu**2 - 2*self.mu*y)
self.sigmas = 1./np.random.gamma(alphas_n,scale=1./betas_n)
return self
def _get_statistics(self,data):
# TODO dont forget to handle nans
assert isinstance(data,(list,np.ndarray)) and not isinstance(data,np.ma.MaskedArray)
if isinstance(data,np.ndarray):
data = data[gi(data)]
n = data.shape[0]
y = np.einsum('ni->i',data)
ysq = np.einsum('ni,ni->i',data,data)
return np.array([n,y,ysq],dtype=np.object)
else:
return sum((self._get_statistics(d) for d in data),self._empty_stats)
@property
def _empty_stats(self):
return np.array([0.,np.zeros_like(self.mu_0),np.zeros_like(self.mu_0)],
dtype=np.object)
# TODO collapsed, meanfield, max_likelihood
class IsotropicGaussian(GibbsSampling):
'''
Normal-Inverse-Gamma prior over mu (mean vector) and sigma (scalar
variance). Essentially, all coordinates of all observations inform the
variance.
The prior follows
sigma ~ InvGamma(alpha_0,beta_0)
mu | sigma ~ N(mu_0,sigma/nu_0 * I)
'''
def __init__(self,mu=None,sigma=None,mu_0=None,nu_0=None,alpha_0=None,beta_0=None):
self.mu = mu
self.sigma = sigma
self.mu_0 = mu_0
self.nu_0 = nu_0
self.alpha_0 = alpha_0
self.beta_0 = beta_0
if mu is sigma is None and not any(_ is None for _ in (mu_0,nu_0,alpha_0,beta_0)):
self.resample() # intialize from prior
@property
def hypparams(self):
return dict(mu_0=self.mu_0,nu_0=self.nu_0,alpha_0=self.alpha_0,beta_0=self.beta_0)
def rvs(self,size=None):
return np.sqrt(self.sigma)*np.random.normal(size=tuple(size)+self.mu.shape) + self.mu
def log_likelihood(self,x):
mu, sigma, D = self.mu, self.sigma, self.mu.shape[0]
x = np.reshape(x,(-1,D))
return (-0.5*((x-mu)**2).sum(1)/sigma - D*np.log(np.sqrt(2*np.pi*sigma)))
def _posterior_hypparams(self,n,xbar,sumsq):
mu_0, nu_0, alpha_0, beta_0 = self.mu_0, self.nu_0, self.alpha_0, self.beta_0
D = mu_0.shape[0]
if n > 0:
nu_n = D*n + nu_0
alpha_n = alpha_0 + D*n/2
beta_n = beta_0 + 1/2*sumsq + (n*D*nu_0)/(n*D+nu_0) * 1/2 * ((xbar - mu_0)**2).sum()
mu_n = (n*xbar + nu_0*mu_0)/(n+nu_0)
return mu_n, nu_n, alpha_n, beta_n
else:
return mu_0, nu_0, alpha_0, beta_0
### Gibbs sampling
def resample(self,data=[]):
mu_n, nu_n, alpha_n, beta_n = self._posterior_hypparams(
*self._get_statistics(data, D=self.mu_0.shape[0]))
D = mu_n.shape[0]
self.sigma = 1/np.random.gamma(alpha_n,scale=1/beta_n)
self.mu = np.sqrt(self.sigma/nu_n)*np.random.randn(D)+mu_n
return self
def _get_statistics(self,data, D=None):
n = getdatasize(data)
if n > 0:
D = D if D else getdatadimension(data)
if isinstance(data,np.ndarray):
assert (data.ndim == 1 and data.shape == (D,)) \
or (data.ndim == 2 and data.shape[1] == D)
data = np.reshape(data,(-1,D))
xbar = data.mean(0)
sumsq = ((data-xbar)**2).sum()
else:
xbar = sum(np.reshape(d,(-1,D)).sum(0) for d in data) / n
sumsq = sum(((np.reshape(data,(-1,D)) - xbar)**2).sum() for d in data)
else:
xbar, sumsq = None, None
return n, xbar, sumsq
class _ScalarGaussianBase(object):
@property
def params(self):
return dict(mu=self.mu,sigmasq=self.sigmasq)
def rvs(self,size=None):
return np.sqrt(self.sigmasq)*np.random.normal(size=size)+self.mu
def log_likelihood(self,x):
x = np.reshape(x,(-1,1))
return (-0.5*(x-self.mu)**2/self.sigmasq - np.log(np.sqrt(2*np.pi*self.sigmasq))).ravel()
def __repr__(self):
return self.__class__.__name__ + '(mu=%f,sigmasq=%f)' % (self.mu,self.sigmasq)
def plot(self,data=None,indices=None,color='b',plot_params=True,label=None):
import matplotlib.pyplot as plt
data = np.concatenate(data) if data is not None else None
indices = np.concatenate(indices) if indices is not None else None
if data is not None:
assert indices is not None
plt.plot(indices,data,color=color,marker='x',linestyle='')
if plot_params:
assert indices is not None
if len(indices) > 1:
from util.general import rle
vals, lens = rle(np.diff(indices))
starts = np.concatenate(((0,),lens.cumsum()[:-1]))
for start, blocklen in zip(starts[vals == 1], lens[vals == 1]):
plt.plot(indices[start:start+blocklen],
np.repeat(self.mu,blocklen),color=color,linestyle='--')
else:
plt.plot(indices,[self.mu],color=color,marker='+')
### mostly shared statistics gathering
def _get_statistics(self,data):
n = getdatasize(data)
if n > 0:
if isinstance(data,np.ndarray):
ybar = data.mean()
centered = data.ravel() - ybar
sumsqc = centered.dot(centered)
elif isinstance(data,list):
ybar = sum(d.sum() for d in data)/n
sumsqc = sum((d.ravel()-ybar).dot(d.ravel()-ybar) for d in data)
else:
ybar = data
sumsqc = 0
else:
ybar = None
sumsqc = None
return n, ybar, sumsqc
def _get_weighted_statistics(self,data,weights):
if isinstance(data,np.ndarray):
neff = weights.sum()
if neff > weps:
ybar = weights.dot(data.ravel()) / neff
centered = data.ravel() - ybar
sumsqc = centered.dot(weights*centered)
else:
ybar = None
sumsqc = None
elif isinstance(data,list):
neff = sum(w.sum() for w in weights)
if neff > weps:
ybar = sum(w.dot(d.ravel()) for d,w in zip(data,weights)) / neff
sumsqc = sum((d.ravel()-ybar).dot(w*(d.ravel()-ybar))
for d,w in zip(data,weights))
else:
ybar = None
sumsqc = None
else:
ybar = data
sumsqc = 0
return neff, ybar, sumsqc
### max likelihood
def max_likelihood(self,data,weights=None):
if weights is None:
n, ybar, sumsqc = self._get_statistics(data)
else:
n, ybar, sumsqc = self._get_weighted_statistics(data,weights)
if sumsqc > 0:
self.mu = ybar
self.sigmasq = sumsqc/n
else:
self.broken = True
self.mu = 999999999.
self.sigmsq = 1.
return self
class ScalarGaussianNIX(_ScalarGaussianBase, GibbsSampling, Collapsed):
'''
Conjugate Normal-(Scaled-)Inverse-ChiSquared prior. (Another parameterization is the
Normal-Inverse-Gamma.)
'''
def __init__(self,mu=None,sigmasq=None,mu_0=None,kappa_0=None,sigmasq_0=None,nu_0=None):
self.mu = mu
self.sigmasq = sigmasq
self.mu_0 = mu_0
self.kappa_0 = kappa_0
self.sigmasq_0 = sigmasq_0
self.nu_0 = nu_0
if mu is sigmasq is None \
and not any(_ is None for _ in (mu_0,kappa_0,sigmasq_0,nu_0)):
self.resample() # intialize from prior
@property
def hypparams(self):
return dict(mu_0=self.mu_0,kappa_0=self.kappa_0,
sigmasq_0=self.sigmasq_0,nu_0=self.nu_0)
def _posterior_hypparams(self,n,ybar,sumsqc):
mu_0, kappa_0, sigmasq_0, nu_0 = self.mu_0, self.kappa_0, self.sigmasq_0, self.nu_0
if n > 0:
kappa_n = kappa_0 + n
mu_n = (kappa_0 * mu_0 + n * ybar) / kappa_n
nu_n = nu_0 + n
sigmasq_n = 1/nu_n * (nu_0 * sigmasq_0 + sumsqc + kappa_0 * n / (kappa_0 + n) * (ybar - mu_0)**2)
return mu_n, kappa_n, sigmasq_n, nu_n
else:
return mu_0, kappa_0, sigmasq_0, nu_0
### Gibbs sampling
def resample(self,data=[]):
mu_n, kappa_n, sigmasq_n, nu_n = self._posterior_hypparams(*self._get_statistics(data))
self.sigmasq = nu_n * sigmasq_n / np.random.chisquare(nu_n)
self.mu = np.sqrt(self.sigmasq / kappa_n) * np.random.randn() + mu_n
return self
### Collapsed
def log_marginal_likelihood(self,data):
n = getdatasize(data)
kappa_0, sigmasq_0, nu_0 = self.kappa_0, self.sigmasq_0, self.nu_0
mu_n, kappa_n, sigmasq_n, nu_n = self._posterior_hypparams(*self._get_statistics(data))
return special.gammaln(nu_n/2) - special.gammaln(nu_0/2) \
+ 0.5*(np.log(kappa_0) - np.log(kappa_n)
+ nu_0 * (np.log(nu_0) + np.log(sigmasq_0))
- nu_n * (np.log(nu_n) + np.log(sigmasq_n))
- n*np.log(np.pi))
def log_predictive_single(self,y,olddata):
# mostly for testing or speed
mu_n, kappa_n, sigmasq_n, nu_n = self._posterior_hypparams(*self._get_statistics(olddata))
return stats.t.logpdf(y,nu_n,loc=mu_n,scale=np.sqrt((1+kappa_n)*sigmasq_n/kappa_n))
class ScalarGaussianNonconjNIX(_ScalarGaussianBase, GibbsSampling):
'''
Non-conjugate separate priors on mean and variance parameters, via
mu ~ Normal(mu_0,tausq_0)
sigmasq ~ (Scaled-)Inverse-ChiSquared(sigmasq_0,nu_0)
'''
def __init__(self,mu=None,sigmasq=None,mu_0=None,tausq_0=None,sigmasq_0=None,nu_0=None,
niter=1):
self.mu, self.sigmasq = mu, sigmasq
self.mu_0, self.tausq_0 = mu_0, tausq_0
self.sigmasq_0, self.nu_0 = sigmasq_0, nu_0
self.niter = niter
if mu is sigmasq is None \
and not any(_ is None for _ in (mu_0, tausq_0, sigmasq_0, nu_0)):
self.resample() # intialize from prior
@property
def hypparams(self):
return dict(mu_0=self.mu_0,tausq_0=self.tausq_0,
sigmasq_0=self.sigmasq_0,nu_0=self.nu_0)
def resample(self,data=[],niter=None):
n = getdatasize(data)
niter = self.niter if niter is None else niter
if n > 0:
data = flattendata(data)
datasum = data[gi(data)].sum()
datasqsum = (data[gi(data)]**2).sum()
nu_n = self.nu_0 + n
for itr in range(niter):
# resample mean
tausq_n = 1/(1/self.tausq_0 + n/self.sigmasq)
mu_n = tausq_n*(self.mu_0/self.tausq_0 + datasum/self.sigmasq)
self.mu = np.sqrt(tausq_n)*np.random.normal() + mu_n
# resample variance
sigmasq_n = (self.nu_0*self.sigmasq_0 + (datasqsum + n*self.mu**2-2*datasum*self.mu))/nu_n
self.sigmasq = sigmasq_n*nu_n/np.random.chisquare(nu_n)
else:
self.mu = np.sqrt(self.tausq_0) * np.random.normal() + self.mu_0
self.sigmasq = self.sigmasq_0*self.nu_0/np.random.chisquare(self.nu_0)
return self
class ScalarGaussianNonconjNIG(_ScalarGaussianBase, MeanField, MeanFieldSVI):
# NOTE: this is like ScalarGaussianNonconjNiIG except prior is in natural
# coordinates
def __init__(self,h_0,J_0,alpha_0,beta_0,
mu=None,sigmasq=None,
h_mf=None,J_mf=None,alpha_mf=None,beta_mf=None,niter=1):
self.h_0, self.J_0 = h_0, J_0
self.alpha_0, self.beta_0 = alpha_0, beta_0
self.h_mf = h_mf if h_mf is not None else J_0 * np.random.normal(h_0/J_0,1./np.sqrt(J_0))
self.J_mf = J_mf if J_mf is not None else J_0
self.alpha_mf = alpha_mf if alpha_mf is not None else alpha_0
self.beta_mf = beta_mf if beta_mf is not None else beta_0
self.niter = niter
self.mu = mu if mu is not None else np.random.normal(h_0/J_0,1./np.sqrt(J_0))
self.sigmasq = sigmasq if sigmasq is not None else 1./np.random.gamma(alpha_0,1./beta_0)
@property
def hypparams(self):
return dict(h_0=self.h_0,J_0=self.J_0,alpha_0=self.alpha_0,beta_0=self.beta_0)
@property
def _E_mu(self):
# E[mu], E[mu**2]
return self.h_mf / self.J_mf, 1./self.J_mf + (self.h_mf / self.J_mf)**2
@property
def _E_sigmasq(self):
# E[1/sigmasq], E[ln sigmasq]
return self.alpha_mf / self.beta_mf, \
np.log(self.beta_mf) - special.digamma(self.alpha_mf)
@property
def natural_hypparam(self):
return np.array([self.alpha_0,self.beta_0,self.h_0,self.J_0])
@natural_hypparam.setter
def natural_hypparam(self,natural_hypparam):
self.alpha_0, self.beta_0, self.h_0, self.J_0 = natural_hypparam
@property
def mf_natural_hypparam(self):
return np.array([self.alpha_mf,self.beta_mf,self.h_mf,self.J_mf])
@mf_natural_hypparam.setter
def mf_natural_hypparam(self,mf_natural_hypparam):
self.alpha_mf, self.beta_mf, self.h_mf, self.J_mf = mf_natural_hypparam
# set point estimates of (mu, sigmasq) for plotting and stuff
self.mu, self.sigmasq = self.h_mf / self.J_mf, self.beta_mf / (self.alpha_mf-1)
def _resample_from_mf(self):
self.mu, self.sigmasq = np.random.normal(self.h_mf/self.J_mf,np.sqrt(1./self.J_mf)), \
np.random.gamma(self.alpha_mf,1./self.beta_mf)
return self
def expected_log_likelihood(self,x):
(Emu, Emu2), (Esigmasqinv, Elnsigmasq) = self._E_mu, self._E_sigmasq
return -1./2 * Esigmasqinv * (x**2 + Emu2 - 2*x*Emu) \
- 1./2*Elnsigmasq - 1./2*np.log(2*np.pi)
def get_vlb(self):
# E[ln p(mu) / q(mu)] part
h_0, J_0, J_mf = self.h_0, self.J_0, self.J_mf
Emu, Emu2 = self._E_mu
p_mu_avgengy = -1./2*J_0*Emu2 + h_0*Emu \
- 1./2*(h_0**2/J_0) + 1./2*np.log(J_0) - 1./2*np.log(2*np.pi)
q_mu_entropy = 1./2*np.log(2*np.pi*np.e/J_mf)
# E[ln p(sigmasq) / q(sigmasq)] part
alpha_0, beta_0, alpha_mf, beta_mf = \
self.alpha_0, self.beta_0, self.alpha_mf, self.beta_mf
(Esigmasqinv, Elnsigmasq) = self._E_sigmasq
p_sigmasq_avgengy = (-alpha_0-1)*Elnsigmasq + (-beta_0)*Esigmasqinv \
- (special.gammaln(alpha_0) - alpha_0*np.log(beta_0))
q_sigmasq_entropy = alpha_mf + np.log(beta_mf) + special.gammaln(alpha_mf) \
- (1+alpha_mf)*special.digamma(alpha_mf)
return p_mu_avgengy + q_mu_entropy + p_sigmasq_avgengy + q_sigmasq_entropy
def meanfield_sgdstep(self,data,weights,minibatchfrac,stepsize):
# like meanfieldupdate except we step the factors simultaneously
# NOTE: unlike the fully conjugate case, there are interaction terms, so
# we work on the destructured pieces
neff, y, ysq = self._get_weighted_statistics(data,weights)
Emu, _ = self._E_mu
Esigmasqinv, _ = self._E_sigmasq
# form new natural hyperparameters as if doing a batch update
alpha_new = self.alpha_0 + 1./minibatchfrac * 1./2*neff
beta_new = self.beta_0 + 1./minibatchfrac * 1./2*(ysq + neff*Emu**2 - 2*Emu*y)
h_new = self.h_0 + 1./minibatchfrac * Esigmasqinv * y
J_new = self.J_0 + 1./minibatchfrac * Esigmasqinv * neff
# take a step
self.alpha_mf = (1-stepsize)*self.alpha_mf + stepsize*alpha_new
self.beta_mf = (1-stepsize)*self.beta_mf + stepsize*beta_new
self.h_mf = (1-stepsize)*self.h_mf + stepsize*h_new
self.J_mf = (1-stepsize)*self.J_mf + stepsize*J_new
# calling this setter will set point estimates for (mu,sigmasq) for
# plotting and sampling and stuff
self.mf_natural_hypparam = (self.alpha_mf, self.beta_mf, self.h_mf, self.J_mf)
return self
def meanfieldupdate(self,data,weights,niter=None):
niter = niter if niter is not None else self.niter
neff, y, ysq = self._get_weighted_statistics(data,weights)
for niter in xrange(niter):
# update q(sigmasq)
Emu, _ = self._E_mu
self.alpha_mf = self.alpha_0 + 1./2*neff
self.beta_mf = self.beta_0 + 1./2*(ysq + neff*Emu**2 - 2*Emu*y)
# update q(mu)
Esigmasqinv, _ = self._E_sigmasq
self.h_mf = self.h_0 + Esigmasqinv * y
self.J_mf = self.J_0 + Esigmasqinv * neff
# calling this setter will set point estimates for (mu,sigmasq) for
# plotting and sampling and stuff
self.mf_natural_hypparam = \
(self.alpha_mf, self.beta_mf, self.h_mf, self.J_mf)
return self
def _get_weighted_statistics(self,data,weights):
if isinstance(data,np.ndarray):
neff = weights.sum()
y = weights.dot(data)
ysq = weights.dot(data**2)
else:
return sum(
self._get_weighted_statistics(d,w) for d,w in zip(data,weights))
return np.array([neff,y,ysq])
class ScalarGaussianFixedvar(_ScalarGaussianBase, GibbsSampling):
'''
Conjugate normal prior on mean.
'''
def __init__(self,mu=None,sigmasq=None,mu_0=None,tausq_0=None):
self.mu = mu
self.sigmasq = sigmasq
self.mu_0 = mu_0
self.tausq_0 = tausq_0
if mu is None and not any(_ is None for _ in (mu_0,tausq_0)):
self.resample() # intialize from prior
@property
def hypparams(self):
return dict(mu_0=self.mu_0,tausq_0=self.tausq_0)
def _posterior_hypparams(self,n,xbar):
mu_0, tausq_0 = self.mu_0, self.tausq_0
sigmasq = self.sigmasq
if n > 0:
tausq_n = 1/(1/tausq_0 + n/sigmasq)
mu_n = (mu_0/tausq_0 + n*xbar/sigmasq)*tausq_n
return mu_n, tausq_n
else:
return mu_0, tausq_0
def resample(self,data=[]):
mu_n, tausq_n = self._posterior_hypparams(*self._get_statistics(data))
self.mu = np.sqrt(tausq_n)*np.random.randn()+mu_n
return self
def _get_statistics(self,data):
n = getdatasize(data)
if n > 0:
if isinstance(data,np.ndarray):
xbar = data.mean()
else:
xbar = sum(d.sum() for d in data)/n
else:
xbar = None
return n, xbar
def _get_weighted_statistics(self,data,weights):
if isinstance(data,np.ndarray):
neff = weights.sum()
else:
neff = sum(w.sum() for w in weights)
if neff > weps:
if isinstance(data,np.ndarray):
xbar = data.dot(weights) / neff
else:
xbar = sum(w.dot(d) for d,w in zip(data,weights)) / neff
else:
xbar = None
return neff, xbar
def max_likelihood(self,data,weights=None):
if weights is None:
_, xbar = self._get_statistics(data)
else:
_, xbar = self._get_weighted_statistics(data,weights)
self.mu = xbar
| mit |
brguez/TEIBA | src/python/germlineSrcElements_plotNbActiveElementsPerDonor.py | 1 | 3767 | #!/usr/bin/env python
#coding: utf-8
#### FUNCTIONS ####
def header(string):
"""
Display header
"""
timeInfo = time.strftime("%Y-%m-%d %H:%M")
print '\n', timeInfo, "****", string, "****"
def subHeader(string):
"""
Display subheader
"""
timeInfo = time.strftime("%Y-%m-%d %H:%M")
print timeInfo, "**", string, "**"
def info(string):
"""
Display basic information
"""
timeInfo = time.strftime("%Y-%m-%d %H:%M")
print timeInfo, string
#### MAIN ####
## Import modules ##
import argparse
import sys
import os.path
import time
import numpy as np
import pandas as pd
from matplotlib import pyplot as plt
import seaborn as sns
import operator
## Graphic style ##
sns.set_style("white")
sns.set_style("ticks")
## Get user's input ##
parser = argparse.ArgumentParser(description= "Plot the number of active source source elements per tumor genome across each tumor type")
parser.add_argument('activeSource', help='')
parser.add_argument('histologyOrder', help='File containing histology ordering. One row per histology')
parser.add_argument('palette', help='')
parser.add_argument('-o', '--outDir', default=os.getcwd(), dest='outDir', help='output directory. Default: current working directory.' )
args = parser.parse_args()
activeSource = args.activeSource
histologyOrder = args.histologyOrder
palette = args.palette
outDir = args.outDir
scriptName = os.path.basename(sys.argv[0])
## Display configuration to standard output ##
print
print "***** ", scriptName, " configuration *****"
print "activeSource: ", activeSource
print "histologyOrder: ", histologyOrder
print "palette: ", palette
print "outDir: ", outDir
print
print "***** Executing ", scriptName, ".... *****"
print
## Start ##
### 1. Read histology and create list with histology ordering
##############################################################
header("1. Read histology and create list with histology ordering")
histologyFile = open(histologyOrder, 'r')
histologyList = []
for line in histologyFile:
line = line.rstrip('\n')
line = line.split("\t")
histology = line[0]
histologyList.append(histology)
#### 2. Read palette file
##########################
# Initialize a dictionary with the following structure:
# - dict: key(tumor_histology) -> RGB_colorI
header("2. Read palette file")
paletteFile = open(palette, 'r')
colorHistologyDict = {}
for line in paletteFile:
# Skip header
if not line.startswith("#"):
line = line.rstrip('\n')
line = line.split('\t')
tumorType = line[0]
rgbColor = line[1]
colorHistologyDict[tumorType] = rgbColor
#### 3. Load number of active source elements into a dataframe
################################################################
header("3. Load number of active source elements per donor into a dataframe")
activeSourceDf = pd.read_csv(activeSource, header=0, index_col=0, sep='\t')
#### 4. Make the strip plot
############################
header("4. Make the strip plot")
fig = plt.figure(figsize=(10,4))
ax = sns.stripplot(x='tumorHistology', y='nbActiveSrc', data=activeSourceDf, size=6, edgecolor="black", linewidth=0.5, alpha=1, jitter=0.25, palette=colorHistologyDict, order=histologyList)
### Axis labels
ax.set_xlabel('')
ax.set_ylabel('Active source elements / sample')
# turn the axis labels
for item in ax.get_yticklabels():
item.set_rotation(0)
for item in ax.get_xticklabels():
item.set_rotation(90)
## Y ticks
ax.set(yticks=np.arange(0,26,2))
## Save figure
fileName = outDir + "/nbActive_srcElements_perDonor_striplot.pdf"
plt.savefig(fileName)
fileName = outDir + "/nbActive_srcElements_perDonor_striplot.svg"
plt.savefig(fileName)
#### End
header("FINISH!!")
| gpl-3.0 |
phoebe-project/phoebe2-docs | 2.1/examples/legacy_contact_binary.py | 1 | 3134 | #!/usr/bin/env python
# coding: utf-8
# Comparing Contacts Binaries in PHOEBE 2 vs PHOEBE Legacy
# ============================
#
# **NOTE**: PHOEBE 1.0 legacy is an alternate backend and is not installed with PHOEBE 2. In order to run this backend, you'll need to have [PHOEBE 1.0](https://phoebe-project.org/1.0) installed and manually install the python wrappers in the `phoebe-py` directory.
#
# Setup
# -----------------------------
# Let's first make sure we have the latest version of PHOEBE 2.1 installed. (You can comment out this line if you don't use pip for your installation or don't want to update to the latest release).
# In[ ]:
get_ipython().system('pip install -I "phoebe>=2.1,<2.2"')
# As always, let's do imports and initialize a logger and a new bundle. See [Building a System](../tutorials/building_a_system.html) for more details.
# In[1]:
get_ipython().run_line_magic('matplotlib', 'inline')
# In[2]:
import phoebe
from phoebe import u
import numpy as np
import matplotlib.pyplot as plt
logger = phoebe.logger()
b = phoebe.default_binary(contact_binary=True)
b['q'] = 0.7
# Adding Datasets and Compute Options
# --------------------
# In[3]:
b.add_dataset('lc', times=np.linspace(0,1,101), dataset='lc01')
b.add_dataset('rv', times=np.linspace(0,1,101), dataset='rv01')
# Now we add compute options for the 'legacy' backend.
# In[4]:
b.add_compute('legacy')
# Let's use the external atmospheres available for both phoebe1 and phoebe2
# In[5]:
b.set_value_all('atm', 'extern_planckint')
# Set value of gridsize for the trapezoidal (WD) mesh.
# In[6]:
b.set_value_all('gridsize', 30)
# Let's also disable other special effect such as heating, gravity, and light-time effects.
# In[7]:
b.set_value_all('ld_func', 'logarithmic')
b.set_value_all('ld_coeffs', [0.0, 0.0])
b.set_value_all('rv_grav', False)
b.set_value_all('ltte', False)
# Finally, let's compute our models
# In[8]:
b.run_compute(kind='phoebe', model='phoebe2model', irrad_method='none')
# In[9]:
b.run_compute(kind='legacy', model='phoebe1model', irrad_method='none')
# Plotting
# -------------------------
# ### Light Curve
# In[10]:
afig, mplfig = b.filter(dataset='lc01').plot(c={'phoebe2model': 'g', 'phoebe1model': 'r'}, linestyle='solid',
legend=True, show=True)
# Now let's plot the residuals between these two models
# In[11]:
artist, = plt.plot(b.get_value('fluxes@lc01@phoebe2model') - b.get_value('fluxes@lc01@phoebe1model'), 'g-')
artist = plt.axhline(0.0, linestyle='dashed', color='k')
# ### RVs
# In[12]:
afig, mplfig = b['rv01'].plot(c={'phoebe2model': 'g', 'phoebe1model': 'r'}, linestyle='solid',
legend=True, show=True)
# In[13]:
artist, = plt.plot(b.get_value('rvs@primary@phoebe2model', ) - b.get_value('rvs@primary@phoebe1model'), color='g', ls=':')
artist, = plt.plot(b.get_value('rvs@secondary@phoebe2model') - b.get_value('rvs@secondary@phoebe1model'), color='g', ls='-.')
artist = plt.axhline(0.0, linestyle='dashed', color='k')
ylim = plt.ylim(-0.3, 0.3)
# In[ ]:
| gpl-3.0 |
mlucchini/keras-playground | networks/mnist-digit-recognition-perceptron-network.py | 1 | 1742 | from keras.callbacks import TensorBoard
from keras.datasets import mnist
from keras.models import Sequential
from keras.layers import Dense
from keras.layers import Dropout
from keras.optimizers import SGD
from keras.utils import np_utils
import matplotlib.pyplot as plt
import numpy
numpy.random.seed(7)
def plot_image(plotArgs, x, index):
plt.subplot(plotArgs)
plt.imshow(x[index], cmap=plt.get_cmap('gray'))
def plot_first_images(x):
plot_image(221, x, 0)
plot_image(222, x, 1)
plot_image(223, x, 2)
plot_image(224, x, 3)
plt.show()
(X_train, Y_train), (X_test, Y_test) = mnist.load_data()
# plot_first_images(X_train)
print("Shape of images before transformation: %s" % (X_train[0].shape,))
num_pixels = X_train.shape[1] * X_train.shape[2]
X_train = X_train.reshape(X_train.shape[0], num_pixels).astype('float32')
X_test = X_test.reshape(X_test.shape[0], num_pixels).astype('float32')
print("Shape of images after transformation: %s" % (X_train[0].shape,))
X_train /= 255
X_test /= 255
Y_train = np_utils.to_categorical(Y_train)
Y_test = np_utils.to_categorical(Y_test)
num_classes = Y_test.shape[1]
def baseline_model():
sgd = SGD(lr=0.006, momentum=0.9, nesterov=True)
model = Sequential()
model.add(Dense(1000, input_dim=num_pixels, init='uniform', activation='relu'))
model.add(Dense(num_classes, init='uniform', activation='softmax'))
model.compile(loss='categorical_crossentropy', optimizer=sgd, metrics=['accuracy'])
return model
model = baseline_model()
model.fit(X_train, Y_train, validation_data=(X_test, Y_test), nb_epoch=15, batch_size=128, verbose=2)
scores = model.evaluate(X_test, Y_test, verbose=0)
print("Baseline error: %.2f%%" % (100 - scores[1] * 100))
| gpl-3.0 |
marcocaccin/scikit-learn | sklearn/ensemble/partial_dependence.py | 251 | 15097 | """Partial dependence plots for tree ensembles. """
# Authors: Peter Prettenhofer
# License: BSD 3 clause
from itertools import count
import numbers
import numpy as np
from scipy.stats.mstats import mquantiles
from ..utils.extmath import cartesian
from ..externals.joblib import Parallel, delayed
from ..externals import six
from ..externals.six.moves import map, range, zip
from ..utils import check_array
from ..tree._tree import DTYPE
from ._gradient_boosting import _partial_dependence_tree
from .gradient_boosting import BaseGradientBoosting
def _grid_from_X(X, percentiles=(0.05, 0.95), grid_resolution=100):
"""Generate a grid of points based on the ``percentiles of ``X``.
The grid is generated by placing ``grid_resolution`` equally
spaced points between the ``percentiles`` of each column
of ``X``.
Parameters
----------
X : ndarray
The data
percentiles : tuple of floats
The percentiles which are used to construct the extreme
values of the grid axes.
grid_resolution : int
The number of equally spaced points that are placed
on the grid.
Returns
-------
grid : ndarray
All data points on the grid; ``grid.shape[1] == X.shape[1]``
and ``grid.shape[0] == grid_resolution * X.shape[1]``.
axes : seq of ndarray
The axes with which the grid has been created.
"""
if len(percentiles) != 2:
raise ValueError('percentile must be tuple of len 2')
if not all(0. <= x <= 1. for x in percentiles):
raise ValueError('percentile values must be in [0, 1]')
axes = []
for col in range(X.shape[1]):
uniques = np.unique(X[:, col])
if uniques.shape[0] < grid_resolution:
# feature has low resolution use unique vals
axis = uniques
else:
emp_percentiles = mquantiles(X, prob=percentiles, axis=0)
# create axis based on percentiles and grid resolution
axis = np.linspace(emp_percentiles[0, col],
emp_percentiles[1, col],
num=grid_resolution, endpoint=True)
axes.append(axis)
return cartesian(axes), axes
def partial_dependence(gbrt, target_variables, grid=None, X=None,
percentiles=(0.05, 0.95), grid_resolution=100):
"""Partial dependence of ``target_variables``.
Partial dependence plots show the dependence between the joint values
of the ``target_variables`` and the function represented
by the ``gbrt``.
Read more in the :ref:`User Guide <partial_dependence>`.
Parameters
----------
gbrt : BaseGradientBoosting
A fitted gradient boosting model.
target_variables : array-like, dtype=int
The target features for which the partial dependecy should be
computed (size should be smaller than 3 for visual renderings).
grid : array-like, shape=(n_points, len(target_variables))
The grid of ``target_variables`` values for which the
partial dependecy should be evaluated (either ``grid`` or ``X``
must be specified).
X : array-like, shape=(n_samples, n_features)
The data on which ``gbrt`` was trained. It is used to generate
a ``grid`` for the ``target_variables``. The ``grid`` comprises
``grid_resolution`` equally spaced points between the two
``percentiles``.
percentiles : (low, high), default=(0.05, 0.95)
The lower and upper percentile used create the extreme values
for the ``grid``. Only if ``X`` is not None.
grid_resolution : int, default=100
The number of equally spaced points on the ``grid``.
Returns
-------
pdp : array, shape=(n_classes, n_points)
The partial dependence function evaluated on the ``grid``.
For regression and binary classification ``n_classes==1``.
axes : seq of ndarray or None
The axes with which the grid has been created or None if
the grid has been given.
Examples
--------
>>> samples = [[0, 0, 2], [1, 0, 0]]
>>> labels = [0, 1]
>>> from sklearn.ensemble import GradientBoostingClassifier
>>> gb = GradientBoostingClassifier(random_state=0).fit(samples, labels)
>>> kwargs = dict(X=samples, percentiles=(0, 1), grid_resolution=2)
>>> partial_dependence(gb, [0], **kwargs) # doctest: +SKIP
(array([[-4.52..., 4.52...]]), [array([ 0., 1.])])
"""
if not isinstance(gbrt, BaseGradientBoosting):
raise ValueError('gbrt has to be an instance of BaseGradientBoosting')
if gbrt.estimators_.shape[0] == 0:
raise ValueError('Call %s.fit before partial_dependence' %
gbrt.__class__.__name__)
if (grid is None and X is None) or (grid is not None and X is not None):
raise ValueError('Either grid or X must be specified')
target_variables = np.asarray(target_variables, dtype=np.int32,
order='C').ravel()
if any([not (0 <= fx < gbrt.n_features) for fx in target_variables]):
raise ValueError('target_variables must be in [0, %d]'
% (gbrt.n_features - 1))
if X is not None:
X = check_array(X, dtype=DTYPE, order='C')
grid, axes = _grid_from_X(X[:, target_variables], percentiles,
grid_resolution)
else:
assert grid is not None
# dont return axes if grid is given
axes = None
# grid must be 2d
if grid.ndim == 1:
grid = grid[:, np.newaxis]
if grid.ndim != 2:
raise ValueError('grid must be 2d but is %dd' % grid.ndim)
grid = np.asarray(grid, dtype=DTYPE, order='C')
assert grid.shape[1] == target_variables.shape[0]
n_trees_per_stage = gbrt.estimators_.shape[1]
n_estimators = gbrt.estimators_.shape[0]
pdp = np.zeros((n_trees_per_stage, grid.shape[0],), dtype=np.float64,
order='C')
for stage in range(n_estimators):
for k in range(n_trees_per_stage):
tree = gbrt.estimators_[stage, k].tree_
_partial_dependence_tree(tree, grid, target_variables,
gbrt.learning_rate, pdp[k])
return pdp, axes
def plot_partial_dependence(gbrt, X, features, feature_names=None,
label=None, n_cols=3, grid_resolution=100,
percentiles=(0.05, 0.95), n_jobs=1,
verbose=0, ax=None, line_kw=None,
contour_kw=None, **fig_kw):
"""Partial dependence plots for ``features``.
The ``len(features)`` plots are arranged in a grid with ``n_cols``
columns. Two-way partial dependence plots are plotted as contour
plots.
Read more in the :ref:`User Guide <partial_dependence>`.
Parameters
----------
gbrt : BaseGradientBoosting
A fitted gradient boosting model.
X : array-like, shape=(n_samples, n_features)
The data on which ``gbrt`` was trained.
features : seq of tuples or ints
If seq[i] is an int or a tuple with one int value, a one-way
PDP is created; if seq[i] is a tuple of two ints, a two-way
PDP is created.
feature_names : seq of str
Name of each feature; feature_names[i] holds
the name of the feature with index i.
label : object
The class label for which the PDPs should be computed.
Only if gbrt is a multi-class model. Must be in ``gbrt.classes_``.
n_cols : int
The number of columns in the grid plot (default: 3).
percentiles : (low, high), default=(0.05, 0.95)
The lower and upper percentile used to create the extreme values
for the PDP axes.
grid_resolution : int, default=100
The number of equally spaced points on the axes.
n_jobs : int
The number of CPUs to use to compute the PDs. -1 means 'all CPUs'.
Defaults to 1.
verbose : int
Verbose output during PD computations. Defaults to 0.
ax : Matplotlib axis object, default None
An axis object onto which the plots will be drawn.
line_kw : dict
Dict with keywords passed to the ``pylab.plot`` call.
For one-way partial dependence plots.
contour_kw : dict
Dict with keywords passed to the ``pylab.plot`` call.
For two-way partial dependence plots.
fig_kw : dict
Dict with keywords passed to the figure() call.
Note that all keywords not recognized above will be automatically
included here.
Returns
-------
fig : figure
The Matplotlib Figure object.
axs : seq of Axis objects
A seq of Axis objects, one for each subplot.
Examples
--------
>>> from sklearn.datasets import make_friedman1
>>> from sklearn.ensemble import GradientBoostingRegressor
>>> X, y = make_friedman1()
>>> clf = GradientBoostingRegressor(n_estimators=10).fit(X, y)
>>> fig, axs = plot_partial_dependence(clf, X, [0, (0, 1)]) #doctest: +SKIP
...
"""
import matplotlib.pyplot as plt
from matplotlib import transforms
from matplotlib.ticker import MaxNLocator
from matplotlib.ticker import ScalarFormatter
if not isinstance(gbrt, BaseGradientBoosting):
raise ValueError('gbrt has to be an instance of BaseGradientBoosting')
if gbrt.estimators_.shape[0] == 0:
raise ValueError('Call %s.fit before partial_dependence' %
gbrt.__class__.__name__)
# set label_idx for multi-class GBRT
if hasattr(gbrt, 'classes_') and np.size(gbrt.classes_) > 2:
if label is None:
raise ValueError('label is not given for multi-class PDP')
label_idx = np.searchsorted(gbrt.classes_, label)
if gbrt.classes_[label_idx] != label:
raise ValueError('label %s not in ``gbrt.classes_``' % str(label))
else:
# regression and binary classification
label_idx = 0
X = check_array(X, dtype=DTYPE, order='C')
if gbrt.n_features != X.shape[1]:
raise ValueError('X.shape[1] does not match gbrt.n_features')
if line_kw is None:
line_kw = {'color': 'green'}
if contour_kw is None:
contour_kw = {}
# convert feature_names to list
if feature_names is None:
# if not feature_names use fx indices as name
feature_names = [str(i) for i in range(gbrt.n_features)]
elif isinstance(feature_names, np.ndarray):
feature_names = feature_names.tolist()
def convert_feature(fx):
if isinstance(fx, six.string_types):
try:
fx = feature_names.index(fx)
except ValueError:
raise ValueError('Feature %s not in feature_names' % fx)
return fx
# convert features into a seq of int tuples
tmp_features = []
for fxs in features:
if isinstance(fxs, (numbers.Integral,) + six.string_types):
fxs = (fxs,)
try:
fxs = np.array([convert_feature(fx) for fx in fxs], dtype=np.int32)
except TypeError:
raise ValueError('features must be either int, str, or tuple '
'of int/str')
if not (1 <= np.size(fxs) <= 2):
raise ValueError('target features must be either one or two')
tmp_features.append(fxs)
features = tmp_features
names = []
try:
for fxs in features:
l = []
# explicit loop so "i" is bound for exception below
for i in fxs:
l.append(feature_names[i])
names.append(l)
except IndexError:
raise ValueError('features[i] must be in [0, n_features) '
'but was %d' % i)
# compute PD functions
pd_result = Parallel(n_jobs=n_jobs, verbose=verbose)(
delayed(partial_dependence)(gbrt, fxs, X=X,
grid_resolution=grid_resolution,
percentiles=percentiles)
for fxs in features)
# get global min and max values of PD grouped by plot type
pdp_lim = {}
for pdp, axes in pd_result:
min_pd, max_pd = pdp[label_idx].min(), pdp[label_idx].max()
n_fx = len(axes)
old_min_pd, old_max_pd = pdp_lim.get(n_fx, (min_pd, max_pd))
min_pd = min(min_pd, old_min_pd)
max_pd = max(max_pd, old_max_pd)
pdp_lim[n_fx] = (min_pd, max_pd)
# create contour levels for two-way plots
if 2 in pdp_lim:
Z_level = np.linspace(*pdp_lim[2], num=8)
if ax is None:
fig = plt.figure(**fig_kw)
else:
fig = ax.get_figure()
fig.clear()
n_cols = min(n_cols, len(features))
n_rows = int(np.ceil(len(features) / float(n_cols)))
axs = []
for i, fx, name, (pdp, axes) in zip(count(), features, names,
pd_result):
ax = fig.add_subplot(n_rows, n_cols, i + 1)
if len(axes) == 1:
ax.plot(axes[0], pdp[label_idx].ravel(), **line_kw)
else:
# make contour plot
assert len(axes) == 2
XX, YY = np.meshgrid(axes[0], axes[1])
Z = pdp[label_idx].reshape(list(map(np.size, axes))).T
CS = ax.contour(XX, YY, Z, levels=Z_level, linewidths=0.5,
colors='k')
ax.contourf(XX, YY, Z, levels=Z_level, vmax=Z_level[-1],
vmin=Z_level[0], alpha=0.75, **contour_kw)
ax.clabel(CS, fmt='%2.2f', colors='k', fontsize=10, inline=True)
# plot data deciles + axes labels
deciles = mquantiles(X[:, fx[0]], prob=np.arange(0.1, 1.0, 0.1))
trans = transforms.blended_transform_factory(ax.transData,
ax.transAxes)
ylim = ax.get_ylim()
ax.vlines(deciles, [0], 0.05, transform=trans, color='k')
ax.set_xlabel(name[0])
ax.set_ylim(ylim)
# prevent x-axis ticks from overlapping
ax.xaxis.set_major_locator(MaxNLocator(nbins=6, prune='lower'))
tick_formatter = ScalarFormatter()
tick_formatter.set_powerlimits((-3, 4))
ax.xaxis.set_major_formatter(tick_formatter)
if len(axes) > 1:
# two-way PDP - y-axis deciles + labels
deciles = mquantiles(X[:, fx[1]], prob=np.arange(0.1, 1.0, 0.1))
trans = transforms.blended_transform_factory(ax.transAxes,
ax.transData)
xlim = ax.get_xlim()
ax.hlines(deciles, [0], 0.05, transform=trans, color='k')
ax.set_ylabel(name[1])
# hline erases xlim
ax.set_xlim(xlim)
else:
ax.set_ylabel('Partial dependence')
if len(axes) == 1:
ax.set_ylim(pdp_lim[1])
axs.append(ax)
fig.subplots_adjust(bottom=0.15, top=0.7, left=0.1, right=0.95, wspace=0.4,
hspace=0.3)
return fig, axs
| bsd-3-clause |
0todd0000/spm1d | spm1d/_plot.py | 1 | 10320 |
'''
This module contains classes for low-level plotting.
Users should access plotting functions through spm1d.plot (not spm1d._plot).
'''
# Copyright (C) 2016 Todd Pataky
# updated (2016/10/01) todd
from copy import copy,deepcopy
import numpy as np
from scipy import ndimage
import matplotlib
from matplotlib import pyplot, cm as colormaps
from matplotlib.patches import Polygon
from matplotlib.collections import PatchCollection
eps = np.finfo(float).eps #smallest float, used to avoid divide-by-zero errors
def p2string(p):
return 'p < 0.001' if p<0.0005 else 'p = %.03f'%p
class DataPlotter(object):
def __init__(self, ax=None):
self.ax = self._gca(ax)
self.x = None
@staticmethod
def _gca(ax):
return pyplot.gca() if ax is None else ax
def _set_axlim(self):
self._set_xlim()
self._set_ylim()
def _set_x(self, x, Q):
self.x = np.arange(Q) if x is None else x
def _set_xlim(self):
pyplot.setp(self.ax, xlim=(self.x.min(), self.x.max()) )
def _set_ylim(self, pad=0.075):
def minmax(x):
return np.ma.min(x), np.ma.max(x)
ax = self.ax
ymin,ymax = +1e10, -1e10
for line in ax.lines:
y0,y1 = minmax( line.get_data()[1] )
ymin = min(y0, ymin)
ymax = max(y1, ymax)
for collection in ax.collections:
datalim = collection.get_datalim(ax.transData)
y0,y1 = minmax( np.asarray(datalim)[:,1] )
ymin = min(y0, ymin)
ymax = max(y1, ymax)
for text in ax.texts:
r = matplotlib.backend_bases.RendererBase()
bbox = text.get_window_extent(r)
y0,y1 = ax.transData.inverted().transform(bbox)[:,1]
ymin = min(y0, ymin)
ymax = max(y1, ymax)
dy = 0.075*(ymax-ymin)
ax.set_ylim(ymin-dy, ymax+dy)
def plot(self, *args, **kwdargs):
if self.x is None:
h = self.ax.plot(*args, **kwdargs)
else:
h = self.ax.plot(self.x, *args, **kwdargs)
return h
# def plot(self, y, **kwdargs):
# return self.ax.plot(y, **kwdargs)
def plot_cloud(self, Y, facecolor='0.8', edgecolor='0.8', alpha=0.5, edgelinestyle='-'):
### create patches:
y0,y1 = Y
x,y0,y1 = self.x.tolist(), y0.tolist(), y1.tolist()
x = [x[0]] + x + [x[-1]]
y0 = [y0[0]] + y0 + [y0[-1]]
y1 = [y1[0]] + y1 + [y1[-1]]
y1.reverse()
### concatenate:
x1 = np.copy(x).tolist()
x1.reverse()
x,y = x + x1, y0 + y1
patches = PatchCollection( [ Polygon( np.array([x,y]).T ) ], edgecolors=None)
### plot:
self.ax.add_collection(patches)
pyplot.setp(patches, facecolor=facecolor, edgecolor=edgecolor, alpha=alpha, linestyle=edgelinestyle)
return patches
def plot_datum(self, y=0, color='k', linestyle=':'):
self.ax.axhline(y, color=color, lw=1, linestyle=linestyle)
def plot_errorbar(self, y, e, x=0, color=None, markersize=10, linewidth=2, hbarw=0.1):
self.ax.plot(x, y, 'o', markersize=markersize, color=color)
self.ax.plot([x,x], [y-e, y+e], '-', color=color, lw=linewidth)
w = hbarw * e
self.ax.plot([x-w,x+w], [y-e]*2, '-', color=color, lw=linewidth)
self.ax.plot([x-w,x+w], [y+e]*2, '-', color=color, lw=linewidth)
def plot_roi(self, roi, ylim=None, facecolor='b', edgecolor='w', alpha=0.5):
L,n = ndimage.label(roi)
y0,y1 = self.ax.get_ylim() if ylim is None else ylim
poly = []
for i in range(n):
x0,x1 = np.argwhere(L==(i+1)).flatten()[[0,-1]]
verts = [(x0,y0), (x1,y0), (x1,y1), (x0,y1)]
poly.append( Polygon(verts) )
pyplot.setp(poly, facecolor=facecolor, edgecolor=edgecolor, alpha=alpha)
self.ax.add_collection( PatchCollection(poly, match_original=True) )
def set_ax_prop(self, *args, **kwdargs):
pyplot.setp(self.ax, *args, **kwdargs)
class SPMPlotter(DataPlotter):
def __init__(self, spm, ax=None):
self.ax = self._gca(ax)
self.x = np.arange(spm.Q)
self.spm = spm
self.z = None
self.zma = None #masked
self.ismasked = None
self.set_data()
def _get_statstr(self):
return 't' if self.spm.STAT=='T' else self.spm.STAT
def plot(self, color='k', lw=3, label=None):
self.plot_field(color=color, lw=lw, label=label)
self.plot_datum()
self._set_ylim()
def plot_design(self, factor_labels=None, fontsize=10):
def scaleColumns(X):
mn,mx = np.min(X,axis=0) , np.max(X,axis=0)
Xs = (X-mn)/(mx-mn+eps)
Xs[np.isnan(Xs)] = 1 #if the whole column is a constant
return Xs
X = self.spm.X
vmin,vmax = None, None
if np.all(X==1):
vmin,vmax = 0, 1
self.ax.imshow(scaleColumns(X), cmap=colormaps.gray, interpolation='nearest', vmin=vmin, vmax=vmax)
if factor_labels != None:
gs = X.shape
tx = [self.ax.text(i, -0.05*gs[0], label) for i,label in enumerate(factor_labels)]
pyplot.setp(tx, ha='center', va='bottom', color='k', fontsize=fontsize)
self.ax.axis('normal')
self.ax.axis('off')
def plot_field(self, **kwdargs):
keys = kwdargs.keys()
if 'color' not in keys:
kwdargs.update( dict(color='k') )
if ('lw' not in keys) and ('linewidth' not in keys):
kwdargs.update( dict(lw=2) )
if ('ls' not in keys) and ('linestyle' not in keys):
kwdargs.update( dict(ls='-') )
if 'marker' not in keys:
kwdargs.update( dict(marker=' ') )
ax,x = self.ax, self.x
if self.ismasked:
ax.plot(x, self.zma, **kwdargs)
else:
ax.plot(x, self.z, **kwdargs)
def plot_ylabel(self):
spmlabel = 'SPM' if self.spm.isparametric else 'SnPM'
label = '%s{%s}' %( spmlabel, self._get_statstr() )
self.ax.set_ylabel(label, size=16)
def set_data(self):
if isinstance(self.spm.z, np.ma.MaskedArray):
self.zma = deepcopy(self.spm.z)
self.z = np.asarray(self.spm.z, dtype=float)
self.ismasked = True
else:
self.z = self.spm.z
self.ismasked = False
class SPMiPlotter(SPMPlotter):
def __init__(self, spmi, ax=None):
super(SPMiPlotter, self).__init__(spmi, ax)
def plot(self, color='k', lw=3, linestyle='-', marker=' ', facecolor='0.7', thresh_color='k', label=None):
self.plot_field(color=color, lw=lw, linestyle=linestyle, marker=marker, label=label)
self.plot_datum()
self.plot_threshold(color=thresh_color)
self.plot_cluster_patches(facecolor=facecolor)
self._set_ylim()
def plot_cluster_patches(self, facecolor='0.8'):
if self.spm.nClusters > 0:
polyg = []
for cluster in self.spm.clusters:
x,z = cluster.get_patch_vertices()
polyg.append( Polygon( np.array([x,z]).T ) )
if cluster.iswrapped:
x,z = cluster._other.get_patch_vertices()
polyg.append( Polygon( np.array([x,z]).T ) )
patches = PatchCollection(polyg, edgecolors=None)
self.ax.add_collection(patches)
pyplot.setp(patches, facecolor=facecolor, edgecolor=facecolor)
def plot_p_values(self, size=8, offsets=None, offset_all_clusters=None):
n = len(self.spm.p)
if offsets is None:
if offset_all_clusters is None:
offsets = [(0,0)]*n
else:
offsets = [offset_all_clusters]*n
if len(offsets) < n:
print('WARNING: there are fewer offsets than clusters. To set offsets for all clusters use the offset_all_clusters keyword.')
h = []
for cluster,offset in zip(self.spm.clusters, offsets):
x,y = cluster.xy[0] if cluster.iswrapped else cluster.xy
x += offset[0]
y += offset[1]
s = p2string(cluster.P)
hh = self.ax.text(x, y, s, size=size, ha='center', va='center', bbox=dict(facecolor='w', alpha=0.3))
h.append(hh)
return h
def plot_threshold(self, color='k'):
ax,zs,spmi = self.ax, self.spm.zstar, self.spm
if spmi.roi is None:
h = [ax.axhline(zs)]
if spmi.two_tailed:
h.append( ax.axhline(-zs) )
else:
if spmi.roi.dtype == bool:
zz = np.ma.masked_array([zs]*spmi.Q, np.logical_not(spmi.roi))
h = [ax.plot(self.x, zz)]
if spmi.two_tailed:
h.append( ax.plot(self.x, -zz) )
else: #directional ROI
h = []
if np.any(spmi.roi>0):
zz = np.ma.masked_array([zs]*spmi.Q, np.logical_not(spmi.roi>0))
h.append( ax.plot(self.x, zz) )
if np.any(spmi.roi<0):
zz = np.ma.masked_array([-zs]*spmi.Q, np.logical_not(spmi.roi<0))
h.append( ax.plot(self.x, zz) )
pyplot.setp(h, color=color, lw=1, linestyle='--')
return h
def plot_threshold_label(self, lower=False, pos=None, **kwdargs):
spmi = self.spm
if pos is None:
x0,x1 = self.x.min(), self.x.max()
y0,y1 = self.ax.get_ylim()
x = x0 + 0.4*(x1-x0)
if lower and spmi.two_tailed:
y = -spmi.zstar + 0.005*(y1-y0)
else:
y = spmi.zstar + 0.005*(y1-y0)
else:
x,y = pos
if 'color' not in kwdargs.keys():
kwdargs.update( dict(color='r') )
s = r'$\alpha$=%.2f: $%s^*$=%.3f' %(spmi.alpha, self._get_statstr(), spmi.zstar)
h = self.ax.text(x, y, s, **kwdargs)
return h
def _legend_manual(ax, colors=None, labels=None, linestyles=None, markerfacecolors=None, linewidths=None, **kwdargs):
n = len(colors)
if linestyles is None:
linestyles = ['-']*n
if linewidths is None:
linewidths = [1]*n
if markerfacecolors is None:
markerfacecolors = colors
x0,x1 = ax.get_xlim()
y0,y1 = ax.get_ylim()
h = [ax.plot([x1+1,x1+2,x1+3], [y1+1,y1+2,y1+3], ls, color=color, linewidth=lw, markerfacecolor=mfc)[0] for color,ls,lw,mfc in zip(colors,linestyles,linewidths,markerfacecolors)]
ax.set_xlim(x0, x1)
ax.set_ylim(y0, y1)
return ax.legend(h, labels, **kwdargs)
def _plot_F_list(FF, plot_threshold_label=True, plot_p_values=True, autoset_ylim=True):
m = FF.nFactors
# mm = 2 if len(FF)<5 else 3
AX = []
for i,F in enumerate(FF):
ax = pyplot.subplot(m,m,i+1)
F.plot(ax=ax)
ax.set_title( F.effect )
if F.isinference:
if plot_threshold_label:
F.plot_threshold_label(fontsize=8)
if plot_p_values:
F.plot_p_values(size=8)
AX.append(ax)
### remove y label:
if i%m > 0:
ax.set_ylabel('')
### set x ticklabels:
if len(FF)>2:
AXX = []
if m==2:
AXX = [ AX[0] ]
elif m==3:
if len(FF)==7:
AXX = AX[:4]
[ax.set_xticklabels([]) for ax in AXX]
### set y limits:
if autoset_ylim:
ylim = np.array( [ax.get_ylim() for ax in AX] )
ylim = ylim[:,0].min(), ylim[:,1].max()
pyplot.setp(AX, ylim=ylim)
| gpl-3.0 |
ofgulban/segmentator | segmentator/gui_utils.py | 1 | 28321 | #!/usr/bin/env python
"""Functions covering the user interaction with the GUI."""
from __future__ import division, print_function
import os
import numpy as np
import matplotlib.pyplot as plt
import segmentator.config as cfg
from segmentator.utils import map_2D_hist_to_ima
from nibabel import save, Nifti1Image
from scipy.ndimage.morphology import binary_erosion
class responsiveObj:
"""Stuff to interact in the user interface."""
def __init__(self, **kwargs):
"""Initialize variables used acros functions here."""
if kwargs is not None:
for key, value in kwargs.items():
setattr(self, key, value)
self.basename = self.nii.get_filename().split(os.extsep, 1)[0]
self.press = None
self.ctrlHeld = False
self.labelNr = 0
self.imaSlcMskSwitch, self.volHistHighlightSwitch = 0, 0
self.TranspVal = 0.5
self.nrExports = 0
self.borderSwitch = 0
self.imaSlc = self.orig[:, :, self.sliceNr] # selected slice
self.cycleCount = 0
self.cycRotHistory = [[0, 0], [0, 0], [0, 0]]
self.highlights = [[], []] # to hold image to histogram circles
def remapMsks(self, remap_slice=True):
"""Update volume histogram to image mapping.
Parameters
----------
remap_slice : bool
Do histogram to image mapping. Used to map displayed slice mask.
"""
if self.segmType == 'main':
self.volHistMask = self.sectorObj.binaryMask()
self.volHistMask = self.lassoArr(self.volHistMask, self.idxLasso)
self.volHistMaskH.set_data(self.volHistMask)
elif self.segmType == 'ncut':
self.labelContours()
self.volHistMaskH.set_data(self.volHistMask)
self.volHistMaskH.set_extent((0, self.nrBins, self.nrBins, 0))
# histogram to image mapping
if remap_slice:
temp_slice = self.invHistVolume[:, :, self.sliceNr]
image_slice_shape = self.invHistVolume[:, :, self.sliceNr].shape
if cfg.discard_zeros:
zmask = temp_slice != 0
image_slice_mask = map_2D_hist_to_ima(temp_slice[zmask],
self.volHistMask)
# reshape to image slice shape
self.imaSlcMsk = np.zeros(image_slice_shape)
self.imaSlcMsk[zmask] = image_slice_mask
else:
image_slice_mask = map_2D_hist_to_ima(temp_slice.flatten(),
self.volHistMask)
# reshape to image slice shape
self.imaSlcMsk = image_slice_mask.reshape(image_slice_shape)
# for optional border visualization
if self.borderSwitch == 1:
self.imaSlcMsk = self.calcImaMaskBrd()
def updatePanels(self, update_slice=True, update_rotation=False,
update_extent=False):
"""Update histogram and image panels."""
if update_rotation:
self.checkRotation()
if update_extent:
self.updateImaExtent()
if update_slice:
self.imaSlcH.set_data(self.imaSlc)
self.imaSlcMskH.set_data(self.imaSlcMsk)
self.figure.canvas.draw()
def connect(self):
"""Make the object responsive."""
self.cidpress = self.figure.canvas.mpl_connect(
'button_press_event', self.on_press)
self.cidrelease = self.figure.canvas.mpl_connect(
'button_release_event', self.on_release)
self.cidmotion = self.figure.canvas.mpl_connect(
'motion_notify_event', self.on_motion)
self.cidkeypress = self.figure.canvas.mpl_connect(
'key_press_event', self.on_key_press)
self.cidkeyrelease = self.figure.canvas.mpl_connect(
'key_release_event', self.on_key_release)
def on_key_press(self, event):
"""Determine what happens when a keyboard button is pressed."""
if event.key == 'control':
self.ctrlHeld = True
elif event.key == '1':
self.imaSlcMskIncr(-0.1)
elif event.key == '2':
self.imaSlcMskTransSwitch()
elif event.key == '3':
self.imaSlcMskIncr(0.1)
elif event.key == '4':
self.volHistHighlightTransSwitch()
elif event.key == '5':
self.borderSwitch = (self.borderSwitch + 1) % 2
self.remapMsks()
self.updatePanels(update_slice=False, update_rotation=True,
update_extent=True)
if self.segmType == 'main':
if event.key == 'up':
self.sectorObj.scale_r(1.05)
self.remapMsks()
self.updatePanels(update_slice=False, update_rotation=True,
update_extent=False)
elif event.key == 'down':
self.sectorObj.scale_r(0.95)
self.remapMsks()
self.updatePanels(update_slice=False, update_rotation=True,
update_extent=False)
elif event.key == 'right':
self.sectorObj.rotate(-10.0)
self.remapMsks()
self.updatePanels(update_slice=False, update_rotation=True,
update_extent=False)
elif event.key == 'left':
self.sectorObj.rotate(10.0)
self.remapMsks()
self.updatePanels(update_slice=True, update_rotation=True,
update_extent=False)
else:
return
def on_key_release(self, event):
"""Determine what happens if key is released."""
if event.key == 'control':
self.ctrlHeld = False
def findVoxInHist(self, event):
"""Find voxel's location in histogram."""
self.press = event.xdata, event.ydata
pixel_x = int(np.floor(event.xdata))
pixel_y = int(np.floor(event.ydata))
aoi = self.invHistVolume[:, :, self.sliceNr] # array of interest
# Check rotation
cyc_rot = self.cycRotHistory[self.cycleCount][1]
if cyc_rot == 1: # 90
aoi = np.rot90(aoi, axes=(0, 1))
elif cyc_rot == 2: # 180
aoi = aoi[::-1, ::-1]
elif cyc_rot == 3: # 270
aoi = np.rot90(aoi, axes=(1, 0))
# Switch x and y voxel to get linear index since not Cartesian!!!
pixelLin = aoi[pixel_y, pixel_x]
# ind2sub
xpix = (pixelLin / self.nrBins)
ypix = (pixelLin % self.nrBins)
# Switch x and y for circle centre since back to Cartesian.
circle_colors = [np.array([8, 48, 107, 255])/255,
np.array([33, 113, 181, 255])/255]
self.highlights[0].append(plt.Circle((ypix, xpix), radius=1,
edgecolor=None, color=circle_colors[0]))
self.highlights[1].append(plt.Circle((ypix, xpix), radius=5,
edgecolor=None, color=circle_colors[1]))
self.axes.add_artist(self.highlights[0][-1]) # small circle
self.axes.add_artist(self.highlights[1][-1]) # large circle
self.figure.canvas.draw()
def on_press(self, event):
"""Determine what happens if mouse button is clicked."""
if self.segmType == 'main':
if event.button == 1: # left button
if event.inaxes == self.axes: # cursor in left plot (hist)
if self.ctrlHeld is False: # ctrl no
contains = self.contains(event)
if not contains:
print('cursor outside circle mask')
if not contains:
return
# get sector centre x and y positions
x0 = self.sectorObj.cx
y0 = self.sectorObj.cy
# also get cursor x and y position and safe to press
self.press = x0, y0, event.xdata, event.ydata
elif event.inaxes == self.axes2: # cursor in right plot (brow)
self.findVoxInHist(event)
else:
return
elif event.button == 2: # scroll button
if event.inaxes != self.axes: # outside axes
return
# increase/decrease radius of the sector mask
if self.ctrlHeld is False: # ctrl no
self.sectorObj.scale_r(1.05)
self.remapMsks()
self.updatePanels(update_slice=False, update_rotation=True,
update_extent=False)
elif self.ctrlHeld is True: # ctrl yes
self.sectorObj.rotate(10.0)
self.remapMsks()
self.updatePanels(update_slice=False, update_rotation=True,
update_extent=False)
else:
return
elif event.button == 3: # right button
if event.inaxes != self.axes:
return
# rotate the sector mask
if self.ctrlHeld is False: # ctrl no
self.sectorObj.scale_r(0.95)
self.remapMsks()
self.updatePanels(update_slice=False, update_rotation=True,
update_extent=False)
elif self.ctrlHeld is True: # ctrl yes
self.sectorObj.rotate(-10.0)
self.remapMsks()
self.updatePanels(update_slice=False, update_rotation=True,
update_extent=False)
else:
return
elif self.segmType == 'ncut':
if event.button == 1: # left mouse button
if event.inaxes == self.axes: # cursor in left plot (hist)
xbin = int(np.floor(event.xdata))
ybin = int(np.floor(event.ydata))
val = self.volHistMask[ybin][xbin]
# increment counterField for values in clicked subfield, at
# the first click the entire field constitutes the subfield
counter = int(self.counterField[ybin][xbin])
if counter+1 >= self.ima_ncut_labels.shape[2]:
print("already at maximum ncut dimension")
return
self.counterField[(
self.ima_ncut_labels[:, :, counter] ==
self.ima_ncut_labels[[ybin], [xbin], counter])] += 1
print("counter:" + str(counter+1))
# define arrays with old and new labels for later indexing
oLabels = self.ima_ncut_labels[:, :, counter]
nLabels = self.ima_ncut_labels[:, :, counter+1]
# replace old values with new values (in clicked subfield)
self.volHistMask[oLabels == val] = np.copy(
nLabels[oLabels == val])
self.remapMsks()
self.updatePanels(update_slice=False, update_rotation=True,
update_extent=False)
elif event.inaxes == self.axes2: # cursor in right plot (brow)
self.findVoxInHist(event)
else:
return
elif event.button == 3: # right mouse button
if event.inaxes == self.axes: # cursor in left plot (hist)
xbin = int(np.floor(event.xdata))
ybin = int(np.floor(event.ydata))
val = self.volHistMask[ybin][xbin]
# fetch the slider value to get label nr
self.volHistMask[self.volHistMask == val] = \
np.copy(self.labelNr)
self.remapMsks()
self.updatePanels(update_slice=False, update_rotation=True,
update_extent=False)
def on_motion(self, event):
"""Determine what happens if mouse button moves."""
if self.segmType == 'main':
# ... button is pressed
if self.press is None:
return
# ... cursor is in left plot
if event.inaxes != self.axes:
return
# get former sector centre x and y positions,
# cursor x and y positions
x0, y0, xpress, ypress = self.press
# calculate difference betw cursor pos on click
# and new pos dur motion
# switch x0 & y0 cause volHistMask not Cart
dy = event.xdata - xpress
dx = event.ydata - ypress
# update x and y position of sector,
# based on past motion of cursor
self.sectorObj.set_x(x0 + dx)
self.sectorObj.set_y(y0 + dy)
# update masks
self.remapMsks()
self.updatePanels(update_slice=False, update_rotation=True,
update_extent=False)
else:
return
def on_release(self, event):
"""Determine what happens if mouse button is released."""
self.press = None
# Remove highlight circle
if self.highlights[1]:
self.highlights[1][-1].set_visible(False)
self.figure.canvas.draw()
def disconnect(self):
"""Make the object unresponsive."""
self.figure.canvas.mpl_disconnect(self.cidpress)
self.figure.canvas.mpl_disconnect(self.cidrelease)
self.figure.canvas.mpl_disconnect(self.cidmotion)
def updateColorBar(self, val):
"""Update slider for scaling log colorbar in 2D hist."""
histVMax = np.power(10, self.sHistC.val)
plt.clim(vmax=histVMax)
def updateSliceNr(self):
"""Update slice number and the selected slice."""
self.sliceNr = int(self.sSliceNr.val*self.orig.shape[2])
self.imaSlc = self.orig[:, :, self.sliceNr]
def updateImaBrowser(self, val):
"""Update image browse."""
# scale slider value [0,1) to dimension index
self.updateSliceNr()
self.remapMsks()
self.updatePanels(update_slice=True, update_rotation=True,
update_extent=True)
def updateImaExtent(self):
"""Update both image and mask extent in image browser."""
self.imaSlcH.set_extent((0, self.imaSlc.shape[1],
self.imaSlc.shape[0], 0))
self.imaSlcMskH.set_extent((0, self.imaSlc.shape[1],
self.imaSlc.shape[0], 0))
def cycleView(self, event):
"""Cycle through views."""
self.cycleCount = (self.cycleCount + 1) % 3
# transpose data
self.orig = np.transpose(self.orig, (2, 0, 1))
# transpose ima2volHistMap
self.invHistVolume = np.transpose(self.invHistVolume, (2, 0, 1))
# updates
self.updateSliceNr()
self.remapMsks()
self.updatePanels(update_slice=True, update_rotation=True,
update_extent=True)
def rotateIma90(self, axes=(0, 1)):
"""Rotate image slice 90 degrees."""
self.imaSlc = np.rot90(self.imaSlc, axes=axes)
self.imaSlcMsk = np.rot90(self.imaSlcMsk, axes=axes)
def changeRotation(self, event):
"""Change rotation of image after clicking the button."""
self.cycRotHistory[self.cycleCount][1] += 1
self.cycRotHistory[self.cycleCount][1] %= 4
self.rotateIma90()
self.updatePanels(update_slice=True, update_rotation=False,
update_extent=True)
def checkRotation(self):
"""Check rotation update if changed."""
cyc_rot = self.cycRotHistory[self.cycleCount][1]
if cyc_rot == 1: # 90
self.rotateIma90(axes=(0, 1))
elif cyc_rot == 2: # 180
self.imaSlc = self.imaSlc[::-1, ::-1]
self.imaSlcMsk = self.imaSlcMsk[::-1, ::-1]
elif cyc_rot == 3: # 270
self.rotateIma90(axes=(1, 0))
def exportNifti(self, event):
"""Export labels in the image browser as a nifti file."""
print(" Exporting nifti file...")
# put the permuted indices back to their original format
cycBackPerm = (self.cycleCount, (self.cycleCount+1) % 3,
(self.cycleCount+2) % 3)
# assing unique integers (for ncut labels)
out_volHistMask = np.copy(self.volHistMask)
labels = np.unique(self.volHistMask)
intLabels = [i for i in range(labels.size)]
for label, newLabel in zip(labels, intLabels):
out_volHistMask[out_volHistMask == label] = intLabels[newLabel]
# get 3D brain mask
volume_image = np.transpose(self.invHistVolume, cycBackPerm)
if cfg.discard_zeros:
zmask = volume_image != 0
temp_labeled_image = map_2D_hist_to_ima(volume_image[zmask],
out_volHistMask)
out_nii = np.zeros(volume_image.shape)
out_nii[zmask] = temp_labeled_image # put back flat labels
else:
out_nii = map_2D_hist_to_ima(volume_image.flatten(),
out_volHistMask)
out_nii = out_nii.reshape(volume_image.shape)
# save mask image as nii
new_image = Nifti1Image(out_nii, header=self.nii.get_header(),
affine=self.nii.get_affine())
# get new flex file name and check for overwriting
labels_out = '{}_labels_{}.nii.gz'.format(
self.basename, self.nrExports)
while os.path.isfile(labels_out):
self.nrExports += 1
labels_out = '{}_labels_{}.nii.gz'.format(
self.basename, self.nrExports)
save(new_image, labels_out)
print(" Saved as: {}".format(labels_out))
def clearOverlays(self):
"""Clear overlaid items such as circle highlights."""
if self.highlights[0]:
{h.remove() for h in self.highlights[0]}
{h.remove() for h in self.highlights[1]}
self.highlights[0] = []
def resetGlobal(self, event):
"""Reset stuff."""
# reset highlights
self.clearOverlays()
# reset color bar
self.sHistC.reset()
# reset transparency
self.TranspVal = 0.5
if self.segmType == 'main':
if self.lassoSwitchCount == 1: # reset only lasso drawing
self.idxLasso = np.zeros(self.nrBins*self.nrBins, dtype=bool)
else:
# reset theta sliders
self.sThetaMin.reset()
self.sThetaMax.reset()
# reset values for mask
self.sectorObj.set_x(cfg.init_centre[0])
self.sectorObj.set_y(cfg.init_centre[1])
self.sectorObj.set_r(cfg.init_radius)
self.sectorObj.tmin, self.sectorObj.tmax = np.deg2rad(
cfg.init_theta)
elif self.segmType == 'ncut':
self.sLabelNr.reset()
# reset ncut labels
self.ima_ncut_labels = np.copy(self.orig_ncut_labels)
# reset values for volHistMask
self.volHistMask = self.ima_ncut_labels[:, :, 0].reshape(
(self.nrBins, self.nrBins))
# reset counter field
self.counterField = np.zeros((self.nrBins, self.nrBins))
# reset political borders
self.pltMap = np.zeros((self.nrBins, self.nrBins))
self.pltMapH.set_data(self.pltMap)
self.updateSliceNr()
self.remapMsks()
self.updatePanels(update_slice=False, update_rotation=True,
update_extent=False)
def updateThetaMin(self, val):
"""Update theta (min) in volume histogram mask."""
if self.segmType == 'main':
theta_val = self.sThetaMin.val # get theta value from slider
self.sectorObj.theta_min(theta_val)
self.remapMsks()
self.updatePanels(update_slice=False, update_rotation=True,
update_extent=False)
else:
return
def updateThetaMax(self, val):
"""Update theta(max) in volume histogram mask."""
if self.segmType == 'main':
theta_val = self.sThetaMax.val # get theta value from slider
self.sectorObj.theta_max(theta_val)
self.remapMsks()
self.updatePanels(update_slice=False, update_rotation=True,
update_extent=False)
else:
return
def exportNyp(self, event):
"""Export histogram counts as a numpy array."""
print(" Exporting numpy file...")
outFileName = '{}_identifier_pcMax{}_pcMin{}_sc{}'.format(
self.basename, cfg.perc_max, cfg.perc_min, int(cfg.scale))
if self.segmType == 'ncut':
outFileName = outFileName.replace('identifier', 'volHistLabels')
out_data = self.volHistMask
elif self.segmType == 'main':
outFileName = outFileName.replace('identifier', 'volHist')
out_data = self.counts
outFileName = outFileName.replace('.', 'pt')
np.save(outFileName, out_data)
print(" Saved as: {}{}".format(outFileName, '.npy'))
def updateLabels(self, val):
"""Update labels in volume histogram with slider."""
if self.segmType == 'ncut':
self.labelNr = self.sLabelNr.val
else: # NOTE: might be used in the future
return
def imaSlcMskIncr(self, incr):
"""Update transparency of image mask by increment."""
if (self.TranspVal + incr >= 0) & (self.TranspVal + incr <= 1):
self.TranspVal += incr
self.imaSlcMskH.set_alpha(self.TranspVal)
self.figure.canvas.draw()
def imaSlcMskTransSwitch(self):
"""Update transparency of image mask to toggle transparency."""
self.imaSlcMskSwitch = (self.imaSlcMskSwitch+1) % 2
if self.imaSlcMskSwitch == 1: # set imaSlcMsk transp
self.imaSlcMskH.set_alpha(0)
else: # set imaSlcMsk opaque
self.imaSlcMskH.set_alpha(self.TranspVal)
self.figure.canvas.draw()
def volHistHighlightTransSwitch(self):
"""Update transparency of highlights to toggle transparency."""
self.volHistHighlightSwitch = (self.volHistHighlightSwitch+1) % 2
if self.volHistHighlightSwitch == 1 and self.highlights[0]:
if self.highlights[0]:
{h.set_visible(False) for h in self.highlights[0]}
elif self.volHistHighlightSwitch == 0 and self.highlights[0]:
{h.set_visible(True) for h in self.highlights[0]}
self.figure.canvas.draw()
def updateLabelsRadio(self, val):
"""Update labels with radio buttons."""
labelScale = self.lMax / 6. # nr of non-zero radio buttons
self.labelNr = int(float(val) * labelScale)
def labelContours(self):
"""Plot political borders used in ncut version."""
grad = np.gradient(self.volHistMask)
self.pltMap = np.greater(np.sqrt(np.power(grad[0], 2) +
np.power(grad[1], 2)), 0)
self.pltMapH.set_data(self.pltMap)
self.pltMapH.set_extent((0, self.nrBins, self.nrBins, 0))
def lassoArr(self, array, indices):
"""Update lasso volume histogram mask."""
lin = np.arange(array.size)
newArray = array.flatten()
newArray[lin[indices]] = True
return newArray.reshape(array.shape)
def calcImaMaskBrd(self):
"""Calculate borders of image mask slice."""
return self.imaSlcMsk - binary_erosion(self.imaSlcMsk)
class sector_mask:
"""A pacman-like shape with useful parameters.
Disclaimer
----------
This script is adapted from a stackoverflow post by user ali_m:
[1] http://stackoverflow.com/questions/18352973/mask-a-circular-sector-in-a-numpy-array
"""
def __init__(self, shape, centre, radius, angle_range):
"""Initialize variables used acros functions here."""
self.radius, self.shape = radius, shape
self.x, self.y = np.ogrid[:shape[0], :shape[1]]
self.cx, self.cy = centre
self.tmin, self.tmax = np.deg2rad(angle_range)
# ensure stop angle > start angle
if self.tmax < self.tmin:
self.tmax += 2 * np.pi
# convert cartesian to polar coordinates
self.r2 = (self.x - self.cx) * (self.x - self.cx) + (
self.y-self.cy) * (self.y - self.cy)
self.theta = np.arctan2(self.x - self.cx, self.y - self.cy) - self.tmin
# wrap angles between 0 and 2*pi
self.theta %= 2 * np.pi
def set_polCrd(self):
"""Convert cartesian to polar coordinates."""
self.r2 = (self.x-self.cx)*(self.x-self.cx) + (
self.y-self.cy)*(self.y-self.cy)
self.theta = np.arctan2(self.x-self.cx, self.y-self.cy) - self.tmin
# wrap angles between 0 and 2*pi
self.theta %= (2*np.pi)
def set_x(self, x):
"""Set x axis value."""
self.cx = x
self.set_polCrd() # update polar coordinates
def set_y(self, y):
"""Set y axis value."""
self.cy = y
self.set_polCrd() # update polar coordinates
def set_r(self, radius):
"""Set radius of the circle."""
self.radius = radius
def scale_r(self, scale):
"""Scale (multiply) the radius."""
self.radius = self.radius * scale
def rotate(self, degree):
"""Rotate shape."""
rad = np.deg2rad(degree)
self.tmin += rad
self.tmax += rad
self.set_polCrd() # update polar coordinates
def theta_min(self, degree):
"""Angle to determine one the cut out piece in circular mask."""
rad = np.deg2rad(degree)
self.tmin = rad
# ensure stop angle > start angle
if self.tmax <= self.tmin:
self.tmax += 2*np.pi
# ensure stop angle- 2*np.pi NOT > start angle
if self.tmax - 2*np.pi >= self.tmin:
self.tmax -= 2*np.pi
# update polar coordinates
self.set_polCrd()
def theta_max(self, degree):
"""Angle to determine one the cut out piece in circular mask."""
rad = np.deg2rad(degree)
self.tmax = rad
# ensure stop angle > start angle
if self.tmax <= self.tmin:
self.tmax += 2*np.pi
# ensure stop angle- 2*np.pi NOT > start angle
if self.tmax - 2*np.pi >= self.tmin:
self.tmax -= 2*np.pi
# update polar coordinates
self.set_polCrd()
def binaryMask(self):
"""Return a boolean mask for a circular sector."""
# circular mask
self.circmask = self.r2 <= self.radius*self.radius
# angular mask
self.anglemask = self.theta <= (self.tmax-self.tmin)
# return binary mask
return self.circmask*self.anglemask
def contains(self, event):
"""Check if a cursor pointer is inside the sector mask."""
xbin = np.floor(event.xdata)
ybin = np.floor(event.ydata)
Mask = self.binaryMask()
# the next line doesn't follow pep 8 (otherwise it fails)
if Mask[ybin][xbin] is True: # switch x and ybin, volHistMask not Cart
return True
else:
return False
def draw(self, ax, cmap='Reds', alpha=0.2, vmin=0.1, zorder=0,
interpolation='nearest', origin='lower', extent=[0, 100, 0, 100]):
"""Draw sector mask."""
BinMask = self.binaryMask()
FigObj = ax.imshow(BinMask, cmap=cmap, alpha=alpha, vmin=vmin,
interpolation=interpolation, origin=origin,
extent=extent, zorder=zorder)
return (FigObj, BinMask)
| bsd-3-clause |
MikeDT/CNN_2_BBN | CNN_2_BBN_Optimiser.py | 1 | 5940 | from __future__ import print_function
from hyperopt import Trials, STATUS_OK, tpe
from keras.datasets import mnist
from keras.layers.core import Dense, Dropout, Activation,Flatten
from keras.models import Sequential
from keras.utils import np_utils
from Synthetic_Data_Creator import Synthetic_Data_Creator
from hyperas import optim
from hyperas.distributions import choice, uniform, conditional
from sklearn.model_selection import train_test_split
from keras.constraints import maxnorm
from keras.optimizers import SGD, Adam
from keras.layers.convolutional import Conv2D,MaxPooling2D
from keras.callbacks import EarlyStopping
from keras.utils import np_utils
from keras import backend as K
from keras import regularizers
from keras.layers.advanced_activations import ELU
import keras
from XY_Container import Xy_Container_New
import numpy as np
def data():
"""
Data providing function:
This function is separated from model() so that hyperopt
won't reload data for each evaluation run.
"""
cnnMaxDim=10
XYCont = Xy_Container_New(cnnMaxDim=cnnMaxDim)
for j in range(3,cnnMaxDim+1):
for i in range (0,100):
scd = Synthetic_Data_Creator()
df, G = scd.run(dimensions=j)
XYCont.importPrepX(df=df,G=G,dfid=i)
Xtmp = XYCont.CNNX.values()
Xtmp = list(Xtmp)
xlst = []
ytmp = XYCont.CNNy.values()
ytmp = list(ytmp)
ylst = []
for x in Xtmp:
xlst.append(x)
for y in ytmp:
ylst.append(y)
X = np.vstack(xlst)/30 # better normalisation would be nicer
y = np.vstack(ylst)
ytmp = y-1
ytmp = abs(ytmp)
y = np.hstack((y,ytmp))
x_train,x_test,y_train,y_test=train_test_split(X,y,test_size=0.25,random_state=99)
print (x_train.shape)
return x_train, y_train, x_test, y_test
def model(x_train, y_train, x_test, y_test):
"""
Model providing function:
Create Keras model with double curly brackets dropped-in as needed.
Return value has to be a valid python dictionary with two customary keys:
- loss: Specify a numeric evaluation metric to be minimized
- status: Just use STATUS_OK and see hyperopt documentation if not feasible
The last one is optional, though recommended, namely:
- model: specify the model just created so that we can later use it again.
"""
K.set_image_dim_ordering('th')
filter_hp = {{choice([16,34,32,36])}}
l1_hp = {{uniform(0.01,0.1)}}
kernelini_hp = {{choice(['random_uniform','random_normal'])}}
dense_hp = {{choice([100,150,200,350,300])}}
alpha_hp = {{uniform(0.1,1.0)}}
dropout_1 = {{uniform(0.2, 0.8)}}
dropout_2 = {{uniform(0.2, 0.8)}}
dropout_3 = {{uniform(0.2, 0.8)}}
kernel_hp = {{choice([(2,2),(3,3),(4,4),(5,5)])}}
compressFactor = 30
num_classes=2
dimensions = 10
model = Sequential()
model.add(Conv2D(filters=filter_hp, # dimensionality of output
kernel_size=kernel_hp,
input_shape=(1, compressFactor, dimensions),
padding='same',
bias_regularizer=regularizers.l1(l1_hp),
kernel_initializer=kernelini_hp,
kernel_constraint=maxnorm(3)))
model.add(keras.layers.advanced_activations.ELU(alpha=alpha_hp))
model.add(Dropout(dropout_1))
model.add(Conv2D(filters=filter_hp,
kernel_size=kernel_hp,
padding='same',
bias_regularizer=regularizers.l1(l1_hp),
kernel_initializer=kernelini_hp,
kernel_constraint=maxnorm(3)))
model.add(keras.layers.advanced_activations.ELU(alpha=alpha_hp))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Conv2D(filters=filter_hp,
kernel_size=kernel_hp,
padding='same',
bias_regularizer=regularizers.l1(l1_hp),
kernel_initializer=kernelini_hp,
kernel_constraint=maxnorm(3)))
model.add(keras.layers.advanced_activations.ELU(alpha=alpha_hp))
model.add(Dropout(dropout_2))
model.add(Conv2D(filters=filter_hp,
kernel_size=kernel_hp,
padding='same',
kernel_initializer=kernelini_hp,
bias_regularizer=regularizers.l1(l1_hp),
kernel_constraint=maxnorm(3)))
model.add(keras.layers.advanced_activations.ELU(alpha=alpha_hp))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Flatten())
model.add(Dense(dense_hp,#512, 200 best so far
bias_regularizer=regularizers.l1(l1_hp),
kernel_initializer=kernelini_hp,
kernel_constraint=maxnorm(3)))
model.add(keras.layers.advanced_activations.ELU(alpha=alpha_hp))
model.add(Dropout(dropout_3))
model.add(Dense(num_classes, activation='softmax'))
model.compile(loss='categorical_crossentropy', metrics=['accuracy'],optimizer='adam')
model.fit(x_train, y_train,
batch_size=32,
epochs=5,
verbose=1,
validation_data=(x_test, y_test))
score, acc = model.evaluate(x_test, y_test, verbose=0)
print('Test loss, accuracy:', score, acc)
return {'loss': score, 'status': STATUS_OK, 'model': model}
def run():
best_run, best_model = optim.minimize(model=model,
data=data,
algo=tpe.suggest,
max_evals=1000,
trials=Trials())
X_train, Y_train, X_test, Y_test = data()
print("Evalutation of best performing model:")
print(best_model.evaluate(X_test, Y_test))
print("Best performing model chosen hyper-parameters:")
print(best_run) | apache-2.0 |
MatthieuBizien/scikit-learn | examples/svm/plot_custom_kernel.py | 43 | 1546 | """
======================
SVM with custom kernel
======================
Simple usage of Support Vector Machines to classify a sample. It will
plot the decision surface and the support vectors.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn import svm, datasets
# import some data to play with
iris = datasets.load_iris()
X = iris.data[:, :2] # we only take the first two features. We could
# avoid this ugly slicing by using a two-dim dataset
Y = iris.target
def my_kernel(X, Y):
"""
We create a custom kernel:
(2 0)
k(X, Y) = X ( ) Y.T
(0 1)
"""
M = np.array([[2, 0], [0, 1.0]])
return np.dot(np.dot(X, M), Y.T)
h = .02 # step size in the mesh
# we create an instance of SVM and fit out data.
clf = svm.SVC(kernel=my_kernel)
clf.fit(X, Y)
# Plot the decision boundary. For that, we will assign a color to each
# point in the mesh [x_min, x_max]x[y_min, y_max].
x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1
y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, h), np.arange(y_min, y_max, h))
Z = clf.predict(np.c_[xx.ravel(), yy.ravel()])
# Put the result into a color plot
Z = Z.reshape(xx.shape)
plt.pcolormesh(xx, yy, Z, cmap=plt.cm.Paired)
# Plot also the training points
plt.scatter(X[:, 0], X[:, 1], c=Y, cmap=plt.cm.Paired)
plt.title('3-Class classification using Support Vector Machine with custom'
' kernel')
plt.axis('tight')
plt.show()
| bsd-3-clause |
gschivley/Index-variability | src/Analysis/capacity.py | 1 | 15174 | import pandas as pd
import os
import calendar
from joblib import Parallel, delayed
idx = pd.IndexSlice
def month_hours(year, month):
'Look up the number of hours in a given month'
# second value in tuple is number of days in a month
days = calendar.monthrange(year, month)[-1]
hours = days * 24
return hours
def monthly_capacity_all(op, ret, years, nerc_plant_list, fuels,
months=range(1,13), cap_type='nameplate capacity (mw)',
n_jobs=-1, print_year=False,):
"""
Calculate the operable capacity for every month in a range of years
inputs:
op (df): data from the EIA-860m operable sheet - must have columns
[op datetime, nerc, fuel category, nameplate capacity (mw)]
ret (df): data from the EIA-860m retired sheet - must have columns
[ret datetime, op datetime, nerc, fuel category,
nameplate capacity (mw)]
years (list): one or more years to calculate capacity during
nerc_plant_list (dict): dict of dicts (year -> nerc -> list(plant id))
fuels (list): fuel categories
months (list): months to calculate - default is all months
cap_type (str): options are 'nameplate capacity (mw)',
'net summer capacity (mw)', or 'net winter capacity (mw)'
n_jobs (int): number of threads for parallel processing
print_year (bool): print each year during processing
outputs:
df: dataframe with all capacity that was operable (including out of
service and standby) during the years and months specified
"""
kwargs = dict(
op = op,
ret = ret,
fuels = fuels,
months = months,
cap_type = cap_type,
print_year = print_year
)
# pass a single year and all of the other arguments
df_list = Parallel(n_jobs=n_jobs)(delayed(monthly_capacity_year)
(year, nerc_plant_list[year], **kwargs)
for year in years)
# combine list of dataframes and sort the index
op_df_capacity = pd.concat(df_list)
op_df_capacity.sort_index(inplace=True)
return op_df_capacity
def monthly_capacity_year(year, nerc_plants, op, ret, fuels,
months=range(1,13),
cap_type='nameplate capacity (mw)',
print_year=False):
"""
Calculate the operable capacity for every month in a single year
inputs:
op (df): data from the EIA-860m operable sheet - must have columns
[op datetime, nerc, fuel category, nameplate capacity (mw)]
ret (df): data from the EIA-860m retired sheet - must have columns
[ret datetime, op datetime, nerc, fuel category,
nameplate capacity (mw)]
year (int): single year to calculate capacity during
nerc_plants (dict): nerc regions for the keys with a list of plant ids
for each value
fuels (list): fuel categories
months (list): months to calculate - default is all months
cap_type (str): options are 'nameplate capacity (mw)',
'net summer capacity (mw)', or 'net winter capacity (mw)'
print_year (bool): print each year during processing
outputs:
df: dataframe with all capacity that was operable (including out of
service and standby) during the years and months specified
"""
if print_year:
print(year)
# create list of strings and convert to datetime
date_strings = ['{}-{}-01'.format(year, month) for month in months]
dt_list = pd.to_datetime(date_strings, yearfirst=True)
# Make an empty dataframe to fill with capacity and possible generation
nercs = nerc_plants.keys()
# Add USA to the list of nerc regions for national calculations
nercs_national = list(nercs) + ['USA']
# Create a multiindex
index = pd.MultiIndex.from_product([nercs_national, fuels, [year], months],
names=['nerc', 'fuel category',
'year', 'month'])
# Create an empty dataframe
op_df_capacity = pd.DataFrame(index=index, columns=['active capacity',
'possible gen',
'datetime'])
op_df_capacity.sort_index(inplace=True)
# add datetime column, which is dt_list repeated for every nerc and fuel
op_df_capacity['datetime'] = (list(dt_list) * len(nercs_national)
* len(fuels))
for dt, month in zip(dt_list, months):
hours_in_month = month_hours(year=year, month=month)
# Initial slice of operating and retirement dataframes by datetime
# Don't include units the month that they come online or retire
op_month = op.loc[(op['op datetime'] < dt), :]
ret_month = ret.loc[(ret['ret datetime'] > dt) &
(ret['op datetime'] < dt), :]
for fuel in fuels:
# Further slice the dataframes for just the fuel category
op_fuel = op_month.loc[op_month['fuel category'] == fuel, :]
ret_fuel = ret_month.loc[ret_month['fuel category'] == fuel, :]
# National totals - in case not all plant ids show up in a nerc
total_op = op_fuel.loc[:, cap_type].sum()
total_ret = ret_fuel.loc[:, cap_type].sum()
total_active = total_op + total_ret
# Insert total USA capacity for the fuel and month into dataframe
op_df_capacity.loc[idx['USA', fuel, year, month],
'active capacity'] = total_active
# Possible generation is active capacity multiplied by hours in
# month
op_df_capacity.loc[idx['USA', fuel, year, month],
'possible gen'] = hours_in_month * total_active
# Loop through the dictionary, where each set of values is a list with
# plant ids in a nerc
for nerc, plant_ids in nerc_plants.items():
# Capacity on operable sheet
plants_op = (op_fuel.loc[op_fuel['plant id'].isin(plant_ids),
cap_type]
.sum())
# Capacity on retired sheet that was active for the given month
plants_ret = (ret_fuel.loc[ret_fuel['plant id'].isin(plant_ids),
cap_type]
.sum())
# all active capacity from both sheets
active_cap = plants_op + plants_ret
# Add capacity from active and retired sheets to dataframe
op_df_capacity.loc[idx[nerc, fuel, year, month],
'active capacity'] = active_cap
# Possible generation is active capacity multiplied by hours in
# month
op_df_capacity.loc[idx[nerc, fuel, year, month],
'possible gen'] = hours_in_month * active_cap
return op_df_capacity
def monthly_ng_type_all(op, ret, years, nerc_plant_list, fuels,
months=range(1,13), cap_type='nameplate capacity (mw)',
n_jobs=-1, print_year=False):
"""
Calculate natural gas capacity by prime mover type (NGCC, Turbine, and
Other) and the fraction of capacity for each.
inputs:
op (df): data from the EIA-860m operable sheet - must have columns
[op datetime, nerc, fuel category, nameplate capacity (mw)]
ret (df): data from the EIA-860m retired sheet - must have columns
[ret datetime, op datetime, nerc, fuel category,
nameplate capacity (mw)]
years (list): one or more years to calculate capacity during
nerc_plant_list (dict): dict of dicts (year -> nerc -> list(plant id))
fuels (list): fuel categories
months (list): months to calculate - default is all months
cap_type (str): options are 'nameplate capacity (mw)',
'net summer capacity (mw)', or 'net winter capacity (mw)'
n_jobs (int): number of threads for parallel processing
print_year (bool): print each year during processing
outputs:
df
"""
kwargs = dict(
op = op,
ret = ret,
fuels = fuels,
months = months,
cap_type = cap_type,
print_year = print_year
)
# pass a single year and all of the other arguments
df_list = Parallel(n_jobs=n_jobs)(delayed(monthly_ng_type_year)
(year, nerc_plant_list[year], **kwargs)
for year in years)
# combine list of dataframes and sort the index
op_ng_capacity = pd.concat(df_list)
op_ng_capacity.sort_index(inplace=True)
return op_ng_capacity
def monthly_ng_type_year(year, nerc_plants, op, ret, fuels,
months=range(1,13),
cap_type='nameplate capacity (mw)',
print_year=False):
"""
Calculate the operable natural gas capacity and prime mover type
for every month in a single year
inputs:
op (df): data from the EIA-860m operable sheet - must have columns
[op datetime, nerc, fuel category, nameplate capacity (mw)]
ret (df): data from the EIA-860m retired sheet - must have columns
[ret datetime, op datetime, nerc, fuel category,
nameplate capacity (mw)]
year (int): single year to calculate capacity during
nerc_plants (dict): nerc regions for the keys with a list of plant ids
for each value
fuels (list): fuel categories
months (list): months to calculate - default is all months
cap_type (str): options are 'nameplate capacity (mw)',
'net summer capacity (mw)', or 'net winter capacity (mw)'
print_year (bool): print each year during processing
outputs:
df
"""
if print_year:
print(year)
# create list of strings and convert to datetime
date_strings = ['{}-{}-01'.format(year, month) for month in months]
dt_list = pd.to_datetime(date_strings, yearfirst=True)
# Make an empty dataframe to fill with capacity and possible generation
nercs = nerc_plants.keys()
# Add USA to the list of nerc regions for national calculations
nercs_national = list(nercs) + ['USA']
# Create a multiindex
index = pd.MultiIndex.from_product([nercs_national, [year], months],
names=['nerc', 'year', 'month'])
# Create an empty dataframe
op_ng_type = pd.DataFrame(index=index,
columns=['ngcc', 'turbine', 'other', 'total',
'ngcc fraction', 'turbine fraction',
'other fraction'])
op_ng_type.sort_index(inplace=True)
# add datetime column, which is dt_list repeated for every nerc and fuel
op_ng_type['datetime'] = (list(dt_list) * len(nercs_national))
# Lists of prime mover codes for each category
ngcc_pm = ['CA', 'CS', 'CT']
turbine_pm = ['GT']
other_pm = ['IC', 'ST']
for dt, month in zip(dt_list, months):
# Split out generator types into separate dataframes for given month
op_ngcc = op.loc[(op['fuel category'] == 'Natural Gas') &
(op['prime mover code'].isin(ngcc_pm)) &
(op['op datetime'] < dt), :]
op_turbine = op.loc[(op['fuel category'] == 'Natural Gas') &
(op['prime mover code'].isin(turbine_pm)) &
(op['op datetime'] < dt), :]
op_other = op.loc[(op['fuel category'] == 'Natural Gas') &
(op['prime mover code'].isin(other_pm)) &
(op['op datetime'] < dt), :]
ret_ngcc = ret.loc[(ret['fuel category'] == 'Natural Gas') &
(ret['prime mover code'].isin(ngcc_pm)) &
(ret['ret datetime'] > dt) &
(ret['op datetime'] < dt), :]
ret_turbine = ret.loc[(ret['fuel category'] == 'Natural Gas') &
(ret['prime mover code'].isin(turbine_pm)) &
(ret['ret datetime'] > dt) &
(ret['op datetime'] < dt), :]
ret_other = ret.loc[(ret['fuel category'] == 'Natural Gas') &
(ret['prime mover code'].isin(other_pm)) &
(ret['ret datetime'] > dt) &
(ret['op datetime'] < dt), :]
# National level statistics
ngcc_total = (op_ngcc.loc[:, cap_type].sum()
+ ret_ngcc.loc[:, cap_type].sum())
turbine_total = (op_turbine.loc[:, cap_type].sum()
+ ret_turbine.loc[:, cap_type].sum())
other_total = (op_other.loc[:, cap_type].sum()
+ ret_other.loc[:, cap_type].sum())
total = sum_ng_cap(ngcc_total, turbine_total, other_total)
op_ng_type.loc[idx['USA', year, month], 'total'] = total
op_ng_type.loc[idx['USA', year, month], 'ngcc'] = ngcc_total
op_ng_type.loc[idx['USA', year, month], 'turbine'] = turbine_total
op_ng_type.loc[idx['USA', year, month], 'other'] = other_total
# For each nerc region
for nerc, plant_ids in nerc_plants.items():
ngcc = ng_nerc_type(op=op_ngcc, ret=ret_ngcc,
plant_list=plant_ids, cap_type=cap_type)
turbine = ng_nerc_type(op=op_turbine, ret=ret_turbine,
plant_list=plant_ids, cap_type=cap_type)
other = ng_nerc_type(op=op_other, ret=ret_other,
plant_list=plant_ids, cap_type=cap_type)
total = sum_ng_cap(ngcc, turbine, other)
op_ng_type.loc[idx[nerc, year, month], 'total'] = total
op_ng_type.loc[idx[nerc, year, month], 'ngcc'] = ngcc
op_ng_type.loc[idx[nerc, year, month], 'turbine'] = turbine
op_ng_type.loc[idx[nerc, year, month], 'other'] = other
# Calculate fraction of capacity by prime mover type
op_ng_type['ngcc fraction'] = op_ng_type['ngcc'] / op_ng_type['total']
op_ng_type['turbine fraction'] = op_ng_type['turbine'] / op_ng_type['total']
op_ng_type['other fraction'] = op_ng_type['other'] / op_ng_type['total']
op_ng_type.fillna(0, inplace=True)
return op_ng_type
######
# A couple helper function for the NG calculations
def sum_ng_cap(ngcc, turbine, other):
total = ngcc + turbine + other
return total
def ng_nerc_type(op, ret, plant_list, cap_type):
op_cap = op.loc[op['plant id'].isin(plant_list), cap_type].sum()
ret_cap = ret.loc[ret['plant id'].isin(plant_list), cap_type].sum()
total_cap = op_cap + ret_cap
return total_cap
| bsd-3-clause |
dingocuster/scikit-learn | examples/linear_model/plot_sgd_comparison.py | 77 | 1820 | """
==================================
Comparing various online solvers
==================================
An example showing how different online solvers perform
on the hand-written digits dataset.
"""
# Author: Rob Zinkov <rob at zinkov dot com>
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn import datasets
from sklearn.cross_validation import train_test_split
from sklearn.linear_model import SGDClassifier, Perceptron
from sklearn.linear_model import PassiveAggressiveClassifier
from sklearn.linear_model import LogisticRegression
heldout = [0.95, 0.90, 0.75, 0.50, 0.01]
rounds = 20
digits = datasets.load_digits()
X, y = digits.data, digits.target
classifiers = [
("SGD", SGDClassifier()),
("ASGD", SGDClassifier(average=True)),
("Perceptron", Perceptron()),
("Passive-Aggressive I", PassiveAggressiveClassifier(loss='hinge',
C=1.0)),
("Passive-Aggressive II", PassiveAggressiveClassifier(loss='squared_hinge',
C=1.0)),
("SAG", LogisticRegression(solver='sag', tol=1e-1, C=1.e4 / X.shape[0]))
]
xx = 1. - np.array(heldout)
for name, clf in classifiers:
print("training %s" % name)
rng = np.random.RandomState(42)
yy = []
for i in heldout:
yy_ = []
for r in range(rounds):
X_train, X_test, y_train, y_test = \
train_test_split(X, y, test_size=i, random_state=rng)
clf.fit(X_train, y_train)
y_pred = clf.predict(X_test)
yy_.append(1 - np.mean(y_pred == y_test))
yy.append(np.mean(yy_))
plt.plot(xx, yy, label=name)
plt.legend(loc="upper right")
plt.xlabel("Proportion train")
plt.ylabel("Test Error Rate")
plt.show()
| bsd-3-clause |
ajdawson/windspharm | examples/cdms/rws_example.py | 1 | 1972 | """Compute Rossby wave source from the long-term mean flow.
This example uses the cdms interface.
Additional requirements for this example:
* cdms2 (http://uvcdat.llnl.gov/)
* matplotlib (http://matplotlib.org/)
* cartopy (http://scitools.org.uk/cartopy/)
"""
import cartopy.crs as ccrs
import cdms2
import matplotlib as mpl
import matplotlib.pyplot as plt
from windspharm.cdms import VectorWind
from windspharm.examples import example_data_path
mpl.rcParams['mathtext.default'] = 'regular'
# Read zonal and meridional wind components from file using the cdms2 module
# from CDAT. The components are in separate files.
ncu = cdms2.open(example_data_path('uwnd_mean.nc'), 'r')
uwnd = ncu('uwnd')
ncu.close()
ncv = cdms2.open(example_data_path('vwnd_mean.nc'), 'r')
vwnd = ncv('vwnd')
ncv.close()
# Create a VectorWind instance to handle the computations.
w = VectorWind(uwnd, vwnd)
# Compute components of rossby wave source: absolute vorticity, divergence,
# irrotational (divergent) wind components, gradients of absolute vorticity.
eta = w.absolutevorticity()
div = w.divergence()
uchi, vchi = w.irrotationalcomponent()
etax, etay = w.gradient(eta)
# Combine the components to form the Rossby wave source term.
S = -eta * div - (uchi * etax + vchi * etay)
# Pick out the field for December and add a cyclic point (the cyclic point is
# for plotting purposes).
S_dec = S(time=slice(11, 12), longitude=(0, 360), squeeze=True)
# Plot Rossby wave source.
lons, lats = S_dec.getLongitude()[:], S_dec.getLatitude()[:]
ax = plt.axes(projection=ccrs.PlateCarree(central_longitude=180))
clevs = [-30, -25, -20, -15, -10, -5, 0, 5, 10, 15, 20, 25, 30]
fill = ax.contourf(lons, lats, S_dec.asma() * 1e11, clevs,
transform=ccrs.PlateCarree(), cmap=plt.cm.RdBu_r,
extend='both')
ax.coastlines()
ax.gridlines()
plt.colorbar(fill, orientation='horizontal')
plt.title('Rossby Wave Source ($10^{-11}$s$^{-1}$)', fontsize=16)
plt.show()
| mit |
wmvanvliet/mne-python | mne/utils/mixin.py | 4 | 19575 | # -*- coding: utf-8 -*-
"""Some utility functions."""
# Authors: Alexandre Gramfort <alexandre.gramfort@inria.fr>
#
# License: BSD (3-clause)
from collections import OrderedDict
from copy import deepcopy
import logging
import json
import numpy as np
from .check import _check_pandas_installed, _check_preload, _validate_type
from ._logging import warn, verbose
from .numerics import object_size, object_hash
logger = logging.getLogger('mne') # one selection here used across mne-python
logger.propagate = False # don't propagate (in case of multiple imports)
class SizeMixin(object):
"""Estimate MNE object sizes."""
def __eq__(self, other):
"""Compare self to other.
Parameters
----------
other : object
The object to compare to.
Returns
-------
eq : bool
True if the two objects are equal.
"""
return isinstance(other, type(self)) and hash(self) == hash(other)
@property
def _size(self):
"""Estimate the object size."""
try:
size = object_size(self.info)
except Exception:
warn('Could not get size for self.info')
return -1
if hasattr(self, 'data'):
size += object_size(self.data)
elif hasattr(self, '_data'):
size += object_size(self._data)
return size
def __hash__(self):
"""Hash the object.
Returns
-------
hash : int
The hash
"""
from ..evoked import Evoked
from ..epochs import BaseEpochs
from ..io.base import BaseRaw
if isinstance(self, Evoked):
return object_hash(dict(info=self.info, data=self.data))
elif isinstance(self, (BaseEpochs, BaseRaw)):
_check_preload(self, "Hashing ")
return object_hash(dict(info=self.info, data=self._data))
else:
raise RuntimeError('Hashing unknown object type: %s' % type(self))
class GetEpochsMixin(object):
"""Class to add epoch selection and metadata to certain classes."""
def __getitem__(self, item):
"""Return an Epochs object with a copied subset of epochs.
Parameters
----------
item : slice, array-like, str, or list
See below for use cases.
Returns
-------
epochs : instance of Epochs
See below for use cases.
Notes
-----
Epochs can be accessed as ``epochs[...]`` in several ways:
1. **Integer or slice:** ``epochs[idx]`` will return an `~mne.Epochs`
object with a subset of epochs chosen by index (supports single
index and Python-style slicing).
2. **String:** ``epochs['name']`` will return an `~mne.Epochs` object
comprising only the epochs labeled ``'name'`` (i.e., epochs created
around events with the label ``'name'``).
If there are no epochs labeled ``'name'`` but there are epochs
labeled with /-separated tags (e.g. ``'name/left'``,
``'name/right'``), then ``epochs['name']`` will select the epochs
with labels that contain that tag (e.g., ``epochs['left']`` selects
epochs labeled ``'audio/left'`` and ``'visual/left'``, but not
``'audio_left'``).
If multiple tags are provided *as a single string* (e.g.,
``epochs['name_1/name_2']``), this selects epochs containing *all*
provided tags. For example, ``epochs['audio/left']`` selects
``'audio/left'`` and ``'audio/quiet/left'``, but not
``'audio/right'``. Note that tag-based selection is insensitive to
order: tags like ``'audio/left'`` and ``'left/audio'`` will be
treated the same way when selecting via tag.
3. **List of strings:** ``epochs[['name_1', 'name_2', ... ]]`` will
return an `~mne.Epochs` object comprising epochs that match *any* of
the provided names (i.e., the list of names is treated as an
inclusive-or condition). If *none* of the provided names match any
epoch labels, a ``KeyError`` will be raised.
If epoch labels are /-separated tags, then providing multiple tags
*as separate list entries* will likewise act as an inclusive-or
filter. For example, ``epochs[['audio', 'left']]`` would select
``'audio/left'``, ``'audio/right'``, and ``'visual/left'``, but not
``'visual/right'``.
4. **Pandas query:** ``epochs['pandas query']`` will return an
`~mne.Epochs` object with a subset of epochs (and matching
metadata) selected by the query called with
``self.metadata.eval``, e.g.::
epochs["col_a > 2 and col_b == 'foo'"]
would return all epochs whose associated ``col_a`` metadata was
greater than two, and whose ``col_b`` metadata was the string 'foo'.
Query-based indexing only works if Pandas is installed and
``self.metadata`` is a :class:`pandas.DataFrame`.
.. versionadded:: 0.16
"""
return self._getitem(item)
def _item_to_select(self, item):
if isinstance(item, str):
item = [item]
# Convert string to indices
if isinstance(item, (list, tuple)) and len(item) > 0 and \
isinstance(item[0], str):
select = self._keys_to_idx(item)
elif isinstance(item, slice):
select = item
else:
select = np.atleast_1d(item)
if len(select) == 0:
select = np.array([], int)
return select
def _getitem(self, item, reason='IGNORED', copy=True, drop_event_id=True,
select_data=True, return_indices=False):
"""
Select epochs from current object.
Parameters
----------
item: slice, array-like, str, or list
see `__getitem__` for details.
reason: str
entry in `drop_log` for unselected epochs
copy: bool
return a copy of the current object
drop_event_id: bool
remove non-existing event-ids after selection
select_data: bool
apply selection to data
(use `select_data=False` if subclasses do not have a
valid `_data` field, or data has already been subselected)
return_indices: bool
return the indices of selected epochs from the original object
in addition to the new `Epochs` objects
Returns
-------
`Epochs` or tuple(Epochs, np.ndarray) if `return_indices` is True
subset of epochs (and optionally array with kept epoch indices)
"""
data = self._data
del self._data
inst = self.copy() if copy else self
self._data = inst._data = data
del self
select = inst._item_to_select(item)
has_selection = hasattr(inst, 'selection')
if has_selection:
key_selection = inst.selection[select]
drop_log = list(inst.drop_log)
if reason is not None:
for k in np.setdiff1d(inst.selection, key_selection):
drop_log[k] = (reason,)
inst.drop_log = tuple(drop_log)
inst.selection = key_selection
del drop_log
inst.events = np.atleast_2d(inst.events[select])
if inst.metadata is not None:
pd = _check_pandas_installed(strict=False)
if pd:
metadata = inst.metadata.iloc[select]
if has_selection:
metadata.index = inst.selection
else:
metadata = np.array(inst.metadata, 'object')[select].tolist()
# will reset the index for us
GetEpochsMixin.metadata.fset(inst, metadata, verbose=False)
if inst.preload and select_data:
# ensure that each Epochs instance owns its own data so we can
# resize later if necessary
inst._data = np.require(inst._data[select], requirements=['O'])
if drop_event_id:
# update event id to reflect new content of inst
inst.event_id = {k: v for k, v in inst.event_id.items()
if v in inst.events[:, 2]}
if return_indices:
return inst, select
else:
return inst
def _keys_to_idx(self, keys):
"""Find entries in event dict."""
keys = keys if isinstance(keys, (list, tuple)) else [keys]
try:
# Assume it's a condition name
return np.where(np.any(
np.array([self.events[:, 2] == self.event_id[k]
for k in _hid_match(self.event_id, keys)]),
axis=0))[0]
except KeyError as err:
# Could we in principle use metadata with these Epochs and keys?
if (len(keys) != 1 or self.metadata is None):
# If not, raise original error
raise
msg = str(err.args[0]) # message for KeyError
pd = _check_pandas_installed(strict=False)
# See if the query can be done
if pd:
md = self.metadata if hasattr(self, '_metadata') else None
self._check_metadata(metadata=md)
try:
# Try metadata
mask = self.metadata.eval(keys[0], engine='python').values
except Exception as exp:
msg += (' The epochs.metadata Pandas query did not '
'yield any results: %s' % (exp.args[0],))
else:
return np.where(mask)[0]
else:
# If not, warn this might be a problem
msg += (' The epochs.metadata Pandas query could not '
'be performed, consider installing Pandas.')
raise KeyError(msg)
def __len__(self):
"""Return the number of epochs.
Returns
-------
n_epochs : int
The number of remaining epochs.
Notes
-----
This function only works if bad epochs have been dropped.
Examples
--------
This can be used as::
>>> epochs.drop_bad() # doctest: +SKIP
>>> len(epochs) # doctest: +SKIP
43
>>> len(epochs.events) # doctest: +SKIP
43
"""
from ..epochs import BaseEpochs
if isinstance(self, BaseEpochs) and not self._bad_dropped:
raise RuntimeError('Since bad epochs have not been dropped, the '
'length of the Epochs is not known. Load the '
'Epochs with preload=True, or call '
'Epochs.drop_bad(). To find the number '
'of events in the Epochs, use '
'len(Epochs.events).')
return len(self.events)
def __iter__(self):
"""Facilitate iteration over epochs.
This method resets the object iteration state to the first epoch.
Notes
-----
This enables the use of this Python pattern::
>>> for epoch in epochs: # doctest: +SKIP
>>> print(epoch) # doctest: +SKIP
Where ``epoch`` is given by successive outputs of
:meth:`mne.Epochs.next`.
"""
self._current = 0
self._current_detrend_picks = self._detrend_picks
return self
def __next__(self, return_event_id=False):
"""Iterate over epoch data.
Parameters
----------
return_event_id : bool
If True, return both the epoch data and an event_id.
Returns
-------
epoch : array of shape (n_channels, n_times)
The epoch data.
event_id : int
The event id. Only returned if ``return_event_id`` is ``True``.
"""
if self.preload:
if self._current >= len(self._data):
self._stop_iter()
epoch = self._data[self._current]
self._current += 1
else:
is_good = False
while not is_good:
if self._current >= len(self.events):
self._stop_iter()
epoch_noproj = self._get_epoch_from_raw(self._current)
epoch_noproj = self._detrend_offset_decim(
epoch_noproj, self._current_detrend_picks)
epoch = self._project_epoch(epoch_noproj)
self._current += 1
is_good, _ = self._is_good_epoch(epoch)
# If delayed-ssp mode, pass 'virgin' data after rejection decision.
if self._do_delayed_proj:
epoch = epoch_noproj
if not return_event_id:
return epoch
else:
return epoch, self.events[self._current - 1][-1]
def _stop_iter(self):
del self._current
del self._current_detrend_picks
raise StopIteration # signal the end
next = __next__ # originally for Python2, now b/c public
def _check_metadata(self, metadata=None, reset_index=False):
"""Check metadata consistency."""
# reset_index=False will not copy!
if metadata is None:
return
else:
pd = _check_pandas_installed(strict=False)
if pd:
_validate_type(metadata, types=pd.DataFrame,
item_name='metadata')
if len(metadata) != len(self.events):
raise ValueError('metadata must have the same number of '
'rows (%d) as events (%d)'
% (len(metadata), len(self.events)))
if reset_index:
if hasattr(self, 'selection'):
# makes a copy
metadata = metadata.reset_index(drop=True)
metadata.index = self.selection
else:
metadata = deepcopy(metadata)
else:
_validate_type(metadata, types=list,
item_name='metadata')
if reset_index:
metadata = deepcopy(metadata)
return metadata
@property
def metadata(self):
"""Get the metadata."""
return self._metadata
@metadata.setter
@verbose
def metadata(self, metadata, verbose=None):
metadata = self._check_metadata(metadata, reset_index=True)
if metadata is not None:
if _check_pandas_installed(strict=False):
n_col = metadata.shape[1]
else:
n_col = len(metadata[0])
n_col = ' with %d columns' % n_col
else:
n_col = ''
if hasattr(self, '_metadata') and self._metadata is not None:
action = 'Removing' if metadata is None else 'Replacing'
action += ' existing'
else:
action = 'Not setting' if metadata is None else 'Adding'
logger.info('%s metadata%s' % (action, n_col))
self._metadata = metadata
def _prepare_write_metadata(metadata):
"""Convert metadata to JSON for saving."""
if metadata is not None:
if not isinstance(metadata, list):
metadata = metadata.to_json(orient='records')
else: # Pandas DataFrame
metadata = json.dumps(metadata)
assert isinstance(metadata, str)
return metadata
def _prepare_read_metadata(metadata):
"""Convert saved metadata back from JSON."""
if metadata is not None:
pd = _check_pandas_installed(strict=False)
# use json.loads because this preserves ordering
# (which is necessary for round-trip equivalence)
metadata = json.loads(metadata, object_pairs_hook=OrderedDict)
assert isinstance(metadata, list)
if pd:
metadata = pd.DataFrame.from_records(metadata)
assert isinstance(metadata, pd.DataFrame)
return metadata
def _hid_match(event_id, keys):
"""Match event IDs using HID selection.
Parameters
----------
event_id : dict
The event ID dictionary.
keys : list | str
The event ID or subset (for HID), or list of such items.
Returns
-------
use_keys : list
The full keys that fit the selection criteria.
"""
# form the hierarchical event ID mapping
use_keys = []
for key in keys:
if not isinstance(key, str):
raise KeyError('keys must be strings, got %s (%s)'
% (type(key), key))
use_keys.extend(k for k in event_id.keys()
if set(key.split('/')).issubset(k.split('/')))
if len(use_keys) == 0:
raise KeyError('Event "{}" is not in Epochs. Event_ids must be one of '
'"{}"'.format(key, ', '.join(event_id.keys())))
use_keys = list(set(use_keys)) # deduplicate if necessary
return use_keys
class _FakeNoPandas(object): # noqa: D101
def __enter__(self): # noqa: D105
def _check(strict=True):
if strict:
raise RuntimeError('Pandas not installed')
else:
return False
import mne
self._old_check = _check_pandas_installed
mne.epochs._check_pandas_installed = _check
mne.utils.mixin._check_pandas_installed = _check
def __exit__(self, *args): # noqa: D105
import mne
mne.epochs._check_pandas_installed = self._old_check
mne.utils.mixin._check_pandas_installed = self._old_check
class ShiftTimeMixin(object):
"""Class for shift_time method (Epochs, Evoked, and DipoleFixed)."""
def shift_time(self, tshift, relative=True):
"""Shift time scale in epoched or evoked data.
Parameters
----------
tshift : float
The (absolute or relative) time shift in seconds. If ``relative``
is True, positive tshift increases the time value associated with
each sample, while negative tshift decreases it.
relative : bool
If True, increase or decrease time values by ``tshift`` seconds.
Otherwise, shift the time values such that the time of the first
sample equals ``tshift``.
Returns
-------
epochs : instance of Epochs
The modified Epochs instance.
Notes
-----
This method allows you to shift the *time* values associated with each
data sample by an arbitrary amount. It does *not* resample the signal
or change the *data* values in any way.
"""
from ..epochs import BaseEpochs
_check_preload(self, 'shift_time')
start = tshift + (self.times[0] if relative else 0.)
new_times = start + np.arange(len(self.times)) / self.info['sfreq']
if isinstance(self, BaseEpochs):
self._set_times(new_times)
else:
self.times = new_times
self._update_first_last()
return self
def _update_first_last(self):
"""Update self.first and self.last (sample indices)."""
self.first = int(round(self.times[0] * self.info['sfreq']))
self.last = len(self.times) + self.first - 1
| bsd-3-clause |
shurain/codesprint2013 | Basemodel.py | 1 | 10200 | # -*- coding: utf-8 -*-
# <nbformat>3.0</nbformat>
# <codecell>
import numpy as np
import pandas as pd
from IPython.core.display import HTML
import matplotlib as mtp
from pylab import *
from datetime import datetime, timedelta
from StringIO import StringIO
# <codecell>
parse = lambda x: datetime.strptime(x, '%Y%m%d %H%M')
# <codecell>
april = pd.read_csv('data/round2-4.csv', names=['date', 'time', 'direction', 'index', 'source', 'destination', 'distance', 'speed'], parse_dates=[[0, 1]], date_parser=parse, header=None)
may = pd.read_csv('data/round2-5.csv', names=['date', 'time', 'direction', 'index', 'source', 'destination', 'distance', 'speed'], parse_dates=[[0, 1]], date_parser=parse, header=None)
june = pd.read_csv('data/round2-6.csv', names=['date', 'time', 'direction', 'index', 'source', 'destination', 'distance', 'speed'], parse_dates=[[0, 1]], date_parser=parse, header=None)
# <markdowncell>
# 간단한 검증을 거쳐서 모델을 선택하기로 한다. 4, 5월을 모델 학습을 위한 데이터로 삼고 6월을 이를 검증하는 데이터로 삼는다.
# <codecell>
train = pd.concat([april, may])
test = pd.concat([june])
train = train.sort(['direction', 'index', 'date_time'])
test = test.sort(['direction', 'index', 'date_time'])
# <markdowncell>
# Data analysis에서 평일과 주말을 분리하여 보기로 하였는데, 검증을 해보자. 일단 전체 (평일과 주말) 데이터를 사용하여 median을 구해보자.
# <codecell>
whole_week = train.copy()
whole_week['time'] = whole_week.date_time.apply(lambda x: "{:02d}{:02d}".format(x.hour, x.minute))
group = whole_week.groupby(['direction', 'index', 'time'])
df = group.median()
median_model = df.reset_index()
# <codecell>
print median_model
display(HTML(median_model[:10].to_html()))
# <markdowncell>
# Test를 어떻게 하느냐도 문제가 되지만 일단 화요일에 대한 검증만 해보도록 하자. 2013년 6월의 화요일은 6/4, 6/11, 6/18, 6/25일이다.
# <codecell>
def test_june(prediction, dow='tue'):
week = ['sat', 'sun', 'mon', 'tue', 'wed', 'thu', 'fri']
i = week.index(dow.lower())
testing_days = range(i+1, 31, 7)
result = []
for k in testing_days:
test_data = june.copy()
test_data['day'] = test_data.date_time.apply(lambda x: int(x.day))
test_data['time'] = test_data.date_time.apply(lambda x: "{:02d}{:02d}".format(x.hour, x.minute))
test_data = test_data[test_data['day'] == k]
assert(len(test_data) == 2*126*288)
test_data = test_data.sort(['direction', 'index', 'time'])
prediction = prediction.sort(['direction', 'index', 'time'])
result.append(np.mean(np.abs(prediction.speed.values - test_data.speed.values)))
return result
# <codecell>
median_res = test_june(median_model, 'tue')
print np.mean(median_res)
print median_res
# <markdowncell>
# 주중의 데이터만 활용한 모델을 만들어보자.
# <codecell>
weekdays = train.copy()
weekdays['weekday'] = weekdays['date_time'].apply(lambda x: x.weekday())
weekdays = weekdays[weekdays['weekday'] < 5]
del weekdays['weekday']
weekdays['time'] = weekdays.date_time.apply(lambda x: "{:02d}{:02d}".format(x.hour, x.minute))
group = weekdays.groupby(['direction', 'index', 'time'])
df = group.median()
weekday_median_model = df.reset_index()
# <codecell>
weekday_median_res = test_june(weekday_median_model, 'tue')
print np.mean(weekday_median_res)
print weekday_median_res
# <markdowncell>
# 일단 화요일에 대해서는 주중 데이터만 활용하는 것이 더 좋다. 다만 데이터를 자세히 살펴보면 2:2의 결과이며 하루는 값이 좀 튀는 경향이 있다. 일단 데이터 포인트가 4개 밖에 안 되기 때문에 통계적으로 안정적인 결과라 할 수는 없다.
# <codecell>
for i in range(7):
days = ['Sun', 'Mon', 'Tue', 'Wed', 'Thu', 'Fri', 'Sat']
print days[i]
res1 = test_june(median_model, days[i])
res2 = test_june(weekday_median_model, days[i])
print np.mean(res1), np.mean(res2)
# <markdowncell>
# 주말의 결과는 전체 데이터를 사용한 것이 월등하다. 평일에는 조금 갈리는 경향을 보인다. 월, 목, 금에는 전체 데이터를 사용한 편이 좋고 화, 수에는 평일 데이터만 활용하는 것이 좋다.
# <markdowncell>
# 조금 더 나은 분석을 위해 일종의 cross validation을 해보자.
# <codecell>
whole_data = pd.concat([april, may, june])
# <codecell>
whole_data['date'] = whole_data.date_time.apply(lambda x: x.date())
whole_data['time'] = whole_data.date_time.apply(lambda x: "{:02d}{:02d}".format(x.hour, x.minute))
whole_data['weekday'] = whole_data['date_time'].apply(lambda x: x.weekday())
whole_data = whole_data.sort(['date', 'direction', 'index', 'time'])
# <codecell>
import random
def crossvalidate():
# 91 days
days = range(91)
random.shuffle(days)
STRIDE = 2 * 126 * 288
test_range = days[:10]
train_range = days[10:]
train_data = []
for x in train_range:
train_data.append(whole_data[x * STRIDE:(x + 1) * STRIDE])
test_data = []
for x in test_range:
test_data.append(whole_data[x * STRIDE:(x + 1) * STRIDE])
cv_train = pd.concat(train_data)
cv_test = pd.concat(test_data)
return cv_train, cv_test
# <markdowncell>
# Crossvalidate 함수는 말 그대로 k-fold cross validation을 하기 위한 함수이다. 데이터를 10:81으로 나누도록 하드코딩 되어 있으니 9-fold CV라 할 수 있겠다. 이런식으로 사용하기 위해 몇 가지 가정이 뒷받침되어야 하지만 이는 된다고 가정하고 분석을 해보자.
# <codecell>
def test_cv(prediction, test_data, dow='tue'):
week = ['mon', 'tue', 'wed', 'thu', 'fri', 'sat', 'sun']
i = week.index(dow.lower())
test_data = test_data[test_data['weekday'] == i]
STRIDE = 2 * 126 * 288
stepsize = len(test_data) / STRIDE
result = []
for k in range(stepsize):
temp_data = test_data[k * STRIDE:(k + 1) * STRIDE]
temp_data = temp_data.sort(['direction', 'index', 'time'])
prediction = prediction.sort(['direction', 'index', 'time'])
result.append(np.mean(np.abs(prediction.speed.values - temp_data.speed.values)))
return result
# <codecell>
for x in range(10):
train, test = crossvalidate()
group = train.groupby(['direction', 'index', 'time'])
df = group.median()
cv_median_model = df.reset_index()
weekdays = train[train['weekday'] < 5]
group = weekdays.groupby(['direction', 'index', 'time'])
df = group.median()
cv_weekday_median_model = df.reset_index()
cv_median_model_res = test_cv(cv_median_model, test, 'tue')
cv_weekday_median_model_res = test_cv(cv_weekday_median_model, test, 'tue')
print np.mean(cv_median_model_res), np.mean(cv_weekday_median_model_res)
print np.mean(cv_median_model_res) - np.mean(cv_weekday_median_model_res)
# <markdowncell>
# 화요일 기준으로는 평일 데이터를 사용한 것이 거의 항상 우월하다.
# <codecell>
for y in ['Mon', 'Tue', 'Wed', 'Thu', 'Fri', 'Sat', 'Sun']:
print y
result = []
for x in range(10):
train, test = crossvalidate()
group = train.groupby(['direction', 'index', 'time'])
df = group.median()
cv_median_model = df.reset_index()
weekdays = train[train['weekday'] < 5]
group = weekdays.groupby(['direction', 'index', 'time'])
df = group.median()
cv_weekday_median_model = df.reset_index()
cv_median_model_res = test_cv(cv_median_model, test, y)
cv_weekday_median_model_res = test_cv(cv_weekday_median_model, test, y)
result.append(np.mean(cv_median_model_res) - np.mean(cv_weekday_median_model_res))
print result
# <markdowncell>
# 전체 요일에 대해 비슷하게 cross validation 분석을 해보면 전체 데이터를 사용하는 편이 주말은 물론이고 월요일에도 더 우월한 전략이다. 화요일과 수요일, 목요일 그리고 금요일에는 평일 데이터만 사용하는 편이 더 우월하다. 이는 따로 cross validation을 하지 않은 결과와 비슷해 보인다. 비록 10회 밖에 반복을 하지 않아 통계적인 안정성을 말할 수는 없지만, 적어도 화요일에는 평일 데이터만 사용하는 편이 더 나은 것으로 보인다.
#
# 요일별로 양상이 다른 것을 고려한다면 목표 예측 요일별 데이터를 뽑아내는 모집단도 더 세밀하게 나눠보는 것을 고려할 수 있을 것이다.
# <markdowncell>
# 최종 loss function이 MAE (mean absolute error) 이므로 평균값 (mean) 보다는 중앙값 (median) 을 사용하는 편이 더 성능이 좋을 것이라고 생각할 수 있다. 이를 검증하는 것은 쉬운 문제이다.
# <codecell>
whole_week = pd.concat([april, may])
whole_week['time'] = whole_week.date_time.apply(lambda x: "{:02d}{:02d}".format(x.hour, x.minute))
group = whole_week.groupby(['direction', 'index', 'time'])
df = group.mean()
mean_model = df.reset_index()
mean_res = test_june(mean_model, 'tue')
print np.mean(mean_res)
print mean_res
# <markdowncell>
# Median을 사용한 모델의 에러는 5.86801621748 였는데, mean을 사용한 모델은 6.04020650371 로 크게 차이난다.
# <codecell>
weekdays = pd.concat([april, may])
weekdays['weekday'] = weekdays['date_time'].apply(lambda x: x.weekday())
weekdays = weekdays[weekdays['weekday'] < 5]
del weekdays['weekday']
weekdays['time'] = weekdays.date_time.apply(lambda x: "{:02d}{:02d}".format(x.hour, x.minute))
group = weekdays.groupby(['direction', 'index', 'time'])
df = group.mean()
weekday_mean_model = df.reset_index()
weekday_mean_res = test_june(weekday_mean_model, 'tue')
print np.mean(weekday_mean_res)
print weekday_mean_res
# <markdowncell>
# 주중 데이터만 사용한 경우에도 마찬가지의 결과를 얻을 수 있다. Median 기반은 5.85363191689 인데 mean 기반은 5.9240332326 이다.
# <codecell>
| mit |
bmtgoncalves/TorinoCourse | Lecture IV/plot_geojson.py | 1 | 1061 | import matplotlib.pyplot as plt
import numpy as np
import json
import sys
data = json.load(open(sys.argv[1]))
def get_bbox(country):
maxLat = None
maxLon = None
minLat = None
minLon = None
for polygon in country["geometry"]["coordinates"]:
coords = np.array(polygon)[0]
curMaxLat = np.max(coords.T[1])
curMinLat = np.min(coords.T[1])
curMaxLon = np.max(coords.T[0])
curMinLon = np.min(coords.T[0])
if maxLat is None or curMaxLat > maxLat:
maxLat = curMaxLat
if maxLon is None or curMaxLon > maxLon:
maxLon = curMaxLon
if minLat is None or curMinLat < minLat:
minLat = curMinLat
if minLon is None or curMinLon < minLon:
minLon = curMinLon
return maxLat, maxLon, minLat, minLon
def plot_country(country):
for polygon in country["geometry"]["coordinates"]:
coords = np.array(polygon[0])
plt.plot(coords.T[0], coords.T[1])
for country in data["features"]:
plot_country(country)
maxLat, maxLon, minLat, minLon = 73, 88, -24, -94 #get_bbox(country)
plt.xlim(minLon, maxLon)
plt.ylim(minLat, maxLat)
plt.show() | mit |
kaspar030/RIOT | tests/pkg_cmsis-nn/generate_image.py | 15 | 1140 | #!/usr/bin/env python3
"""Generate a binary file from a sample image of the CIFAR-10 dataset.
Pixel of the sample are stored as uint8, images have size 32x32x3.
"""
import os
import argparse
import numpy as np
import matplotlib.pyplot as plt
from tensorflow.keras.datasets import cifar10
SCRIPT_DIR = os.path.dirname(os.path.realpath(__file__))
def main(args):
_, (cifar10_test, _) = cifar10.load_data()
data = cifar10_test[args.index]
data = data.astype('uint8')
output_path = os.path.join(SCRIPT_DIR, args.output)
np.ndarray.tofile(data, output_path)
if args.no_plot is False:
plt.imshow(data)
plt.show()
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("-i", "--index", type=int, default=0,
help="Image index in CIFAR test dataset")
parser.add_argument("-o", "--output", type=str, default='input',
help="Output filename")
parser.add_argument("--no-plot", default=False, action='store_true',
help="Disable image display in matplotlib")
main(parser.parse_args())
| lgpl-2.1 |
saiwing-yeung/scikit-learn | sklearn/utils/extmath.py | 16 | 26642 | """
Extended math utilities.
"""
# Authors: Gael Varoquaux
# Alexandre Gramfort
# Alexandre T. Passos
# Olivier Grisel
# Lars Buitinck
# Stefan van der Walt
# Kyle Kastner
# Giorgio Patrini
# License: BSD 3 clause
from __future__ import division
from functools import partial
import warnings
import numpy as np
from scipy import linalg
from scipy.sparse import issparse, csr_matrix
from . import check_random_state
from .fixes import np_version
from ._logistic_sigmoid import _log_logistic_sigmoid
from ..externals.six.moves import xrange
from .sparsefuncs_fast import csr_row_norms
from .validation import check_array
from ..exceptions import NonBLASDotWarning
def norm(x):
"""Compute the Euclidean or Frobenius norm of x.
Returns the Euclidean norm when x is a vector, the Frobenius norm when x
is a matrix (2-d array). More precise than sqrt(squared_norm(x)).
"""
x = np.asarray(x)
nrm2, = linalg.get_blas_funcs(['nrm2'], [x])
return nrm2(x)
# Newer NumPy has a ravel that needs less copying.
if np_version < (1, 7, 1):
_ravel = np.ravel
else:
_ravel = partial(np.ravel, order='K')
def squared_norm(x):
"""Squared Euclidean or Frobenius norm of x.
Returns the Euclidean norm when x is a vector, the Frobenius norm when x
is a matrix (2-d array). Faster than norm(x) ** 2.
"""
x = _ravel(x)
return np.dot(x, x)
def row_norms(X, squared=False):
"""Row-wise (squared) Euclidean norm of X.
Equivalent to np.sqrt((X * X).sum(axis=1)), but also supports sparse
matrices and does not create an X.shape-sized temporary.
Performs no input validation.
"""
if issparse(X):
if not isinstance(X, csr_matrix):
X = csr_matrix(X)
norms = csr_row_norms(X)
else:
norms = np.einsum('ij,ij->i', X, X)
if not squared:
np.sqrt(norms, norms)
return norms
def fast_logdet(A):
"""Compute log(det(A)) for A symmetric
Equivalent to : np.log(nl.det(A)) but more robust.
It returns -Inf if det(A) is non positive or is not defined.
"""
sign, ld = np.linalg.slogdet(A)
if not sign > 0:
return -np.inf
return ld
def _impose_f_order(X):
"""Helper Function"""
# important to access flags instead of calling np.isfortran,
# this catches corner cases.
if X.flags.c_contiguous:
return check_array(X.T, copy=False, order='F'), True
else:
return check_array(X, copy=False, order='F'), False
def _fast_dot(A, B):
if B.shape[0] != A.shape[A.ndim - 1]: # check adopted from '_dotblas.c'
raise ValueError
if A.dtype != B.dtype or any(x.dtype not in (np.float32, np.float64)
for x in [A, B]):
warnings.warn('Falling back to np.dot. '
'Data must be of same type of either '
'32 or 64 bit float for the BLAS function, gemm, to be '
'used for an efficient dot operation. ',
NonBLASDotWarning)
raise ValueError
if min(A.shape) == 1 or min(B.shape) == 1 or A.ndim != 2 or B.ndim != 2:
raise ValueError
# scipy 0.9 compliant API
dot = linalg.get_blas_funcs(['gemm'], (A, B))[0]
A, trans_a = _impose_f_order(A)
B, trans_b = _impose_f_order(B)
return dot(alpha=1.0, a=A, b=B, trans_a=trans_a, trans_b=trans_b)
def _have_blas_gemm():
try:
linalg.get_blas_funcs(['gemm'])
return True
except (AttributeError, ValueError):
warnings.warn('Could not import BLAS, falling back to np.dot')
return False
# Only use fast_dot for older NumPy; newer ones have tackled the speed issue.
if np_version < (1, 7, 2) and _have_blas_gemm():
def fast_dot(A, B):
"""Compute fast dot products directly calling BLAS.
This function calls BLAS directly while warranting Fortran contiguity.
This helps avoiding extra copies `np.dot` would have created.
For details see section `Linear Algebra on large Arrays`:
http://wiki.scipy.org/PerformanceTips
Parameters
----------
A, B: instance of np.ndarray
Input arrays. Arrays are supposed to be of the same dtype and to
have exactly 2 dimensions. Currently only floats are supported.
In case these requirements aren't met np.dot(A, B) is returned
instead. To activate the related warning issued in this case
execute the following lines of code:
>> import warnings
>> from sklearn.exceptions import NonBLASDotWarning
>> warnings.simplefilter('always', NonBLASDotWarning)
"""
try:
return _fast_dot(A, B)
except ValueError:
# Maltyped or malformed data.
return np.dot(A, B)
else:
fast_dot = np.dot
def density(w, **kwargs):
"""Compute density of a sparse vector
Return a value between 0 and 1
"""
if hasattr(w, "toarray"):
d = float(w.nnz) / (w.shape[0] * w.shape[1])
else:
d = 0 if w is None else float((w != 0).sum()) / w.size
return d
def safe_sparse_dot(a, b, dense_output=False):
"""Dot product that handle the sparse matrix case correctly
Uses BLAS GEMM as replacement for numpy.dot where possible
to avoid unnecessary copies.
"""
if issparse(a) or issparse(b):
ret = a * b
if dense_output and hasattr(ret, "toarray"):
ret = ret.toarray()
return ret
else:
return fast_dot(a, b)
def randomized_range_finder(A, size, n_iter,
power_iteration_normalizer='auto',
random_state=None):
"""Computes an orthonormal matrix whose range approximates the range of A.
Parameters
----------
A: 2D array
The input data matrix
size: integer
Size of the return array
n_iter: integer
Number of power iterations used to stabilize the result
power_iteration_normalizer: 'auto' (default), 'QR', 'LU', 'none'
Whether the power iterations are normalized with step-by-step
QR factorization (the slowest but most accurate), 'none'
(the fastest but numerically unstable when `n_iter` is large, e.g.
typically 5 or larger), or 'LU' factorization (numerically stable
but can lose slightly in accuracy). The 'auto' mode applies no
normalization if `n_iter`<=2 and switches to LU otherwise.
.. versionadded:: 0.18
random_state: RandomState or an int seed (0 by default)
A random number generator instance
Returns
-------
Q: 2D array
A (size x size) projection matrix, the range of which
approximates well the range of the input matrix A.
Notes
-----
Follows Algorithm 4.3 of
Finding structure with randomness: Stochastic algorithms for constructing
approximate matrix decompositions
Halko, et al., 2009 (arXiv:909) http://arxiv.org/pdf/0909.4061
An implementation of a randomized algorithm for principal component
analysis
A. Szlam et al. 2014
"""
random_state = check_random_state(random_state)
# Generating normal random vectors with shape: (A.shape[1], size)
Q = random_state.normal(size=(A.shape[1], size))
# Deal with "auto" mode
if power_iteration_normalizer == 'auto':
if n_iter <= 2:
power_iteration_normalizer = 'none'
else:
power_iteration_normalizer = 'LU'
# Perform power iterations with Q to further 'imprint' the top
# singular vectors of A in Q
for i in range(n_iter):
if power_iteration_normalizer == 'none':
Q = safe_sparse_dot(A, Q)
Q = safe_sparse_dot(A.T, Q)
elif power_iteration_normalizer == 'LU':
Q, _ = linalg.lu(safe_sparse_dot(A, Q), permute_l=True)
Q, _ = linalg.lu(safe_sparse_dot(A.T, Q), permute_l=True)
elif power_iteration_normalizer == 'QR':
Q, _ = linalg.qr(safe_sparse_dot(A, Q), mode='economic')
Q, _ = linalg.qr(safe_sparse_dot(A.T, Q), mode='economic')
# Sample the range of A using by linear projection of Q
# Extract an orthonormal basis
Q, _ = linalg.qr(safe_sparse_dot(A, Q), mode='economic')
return Q
def randomized_svd(M, n_components, n_oversamples=10, n_iter=None,
power_iteration_normalizer='auto', transpose='auto',
flip_sign=True, random_state=0):
"""Computes a truncated randomized SVD
Parameters
----------
M: ndarray or sparse matrix
Matrix to decompose
n_components: int
Number of singular values and vectors to extract.
n_oversamples: int (default is 10)
Additional number of random vectors to sample the range of M so as
to ensure proper conditioning. The total number of random vectors
used to find the range of M is n_components + n_oversamples. Smaller
number can improve speed but can negatively impact the quality of
approximation of singular vectors and singular values.
n_iter: int (default is 4)
Number of power iterations. It can be used to deal with very noisy
problems. When `n_components` is small (< .1 * min(X.shape)) `n_iter`
is set to 7, unless the user specifies a higher number. This improves
precision with few components.
.. versionchanged:: 0.18
power_iteration_normalizer: 'auto' (default), 'QR', 'LU', 'none'
Whether the power iterations are normalized with step-by-step
QR factorization (the slowest but most accurate), 'none'
(the fastest but numerically unstable when `n_iter` is large, e.g.
typically 5 or larger), or 'LU' factorization (numerically stable
but can lose slightly in accuracy). The 'auto' mode applies no
normalization if `n_iter`<=2 and switches to LU otherwise.
.. versionadded:: 0.18
transpose: True, False or 'auto' (default)
Whether the algorithm should be applied to M.T instead of M. The
result should approximately be the same. The 'auto' mode will
trigger the transposition if M.shape[1] > M.shape[0] since this
implementation of randomized SVD tend to be a little faster in that
case.
.. versionchanged:: 0.18
flip_sign: boolean, (True by default)
The output of a singular value decomposition is only unique up to a
permutation of the signs of the singular vectors. If `flip_sign` is
set to `True`, the sign ambiguity is resolved by making the largest
loadings for each component in the left singular vectors positive.
random_state: RandomState or an int seed (0 by default)
A random number generator instance to make behavior
Notes
-----
This algorithm finds a (usually very good) approximate truncated
singular value decomposition using randomization to speed up the
computations. It is particularly fast on large matrices on which
you wish to extract only a small number of components. In order to
obtain further speed up, `n_iter` can be set <=2 (at the cost of
loss of precision).
References
----------
* Finding structure with randomness: Stochastic algorithms for constructing
approximate matrix decompositions
Halko, et al., 2009 http://arxiv.org/abs/arXiv:0909.4061
* A randomized algorithm for the decomposition of matrices
Per-Gunnar Martinsson, Vladimir Rokhlin and Mark Tygert
* An implementation of a randomized algorithm for principal component
analysis
A. Szlam et al. 2014
"""
random_state = check_random_state(random_state)
n_random = n_components + n_oversamples
n_samples, n_features = M.shape
if n_iter is None:
# Checks if the number of iterations is explicitely specified
n_iter = 4
n_iter_specified = False
else:
n_iter_specified = True
if transpose == 'auto':
transpose = n_samples < n_features
if transpose:
# this implementation is a bit faster with smaller shape[1]
M = M.T
# Adjust n_iter. 7 was found a good compromise for PCA. See #5299
if n_components < .1 * min(M.shape) and n_iter < 7:
if n_iter_specified:
warnings.warn("The number of power iterations is increased to "
"7 to achieve higher precision.")
n_iter = 7
Q = randomized_range_finder(M, n_random, n_iter,
power_iteration_normalizer, random_state)
# project M to the (k + p) dimensional space using the basis vectors
B = safe_sparse_dot(Q.T, M)
# compute the SVD on the thin matrix: (k + p) wide
Uhat, s, V = linalg.svd(B, full_matrices=False)
del B
U = np.dot(Q, Uhat)
if flip_sign:
if not transpose:
U, V = svd_flip(U, V)
else:
# In case of transpose u_based_decision=false
# to actually flip based on u and not v.
U, V = svd_flip(U, V, u_based_decision=False)
if transpose:
# transpose back the results according to the input convention
return V[:n_components, :].T, s[:n_components], U[:, :n_components].T
else:
return U[:, :n_components], s[:n_components], V[:n_components, :]
def logsumexp(arr, axis=0):
"""Computes the sum of arr assuming arr is in the log domain.
Returns log(sum(exp(arr))) while minimizing the possibility of
over/underflow.
Examples
--------
>>> import numpy as np
>>> from sklearn.utils.extmath import logsumexp
>>> a = np.arange(10)
>>> np.log(np.sum(np.exp(a)))
9.4586297444267107
>>> logsumexp(a)
9.4586297444267107
"""
arr = np.rollaxis(arr, axis)
# Use the max to normalize, as with the log this is what accumulates
# the less errors
vmax = arr.max(axis=0)
out = np.log(np.sum(np.exp(arr - vmax), axis=0))
out += vmax
return out
def weighted_mode(a, w, axis=0):
"""Returns an array of the weighted modal (most common) value in a
If there is more than one such value, only the first is returned.
The bin-count for the modal bins is also returned.
This is an extension of the algorithm in scipy.stats.mode.
Parameters
----------
a : array_like
n-dimensional array of which to find mode(s).
w : array_like
n-dimensional array of weights for each value
axis : int, optional
Axis along which to operate. Default is 0, i.e. the first axis.
Returns
-------
vals : ndarray
Array of modal values.
score : ndarray
Array of weighted counts for each mode.
Examples
--------
>>> from sklearn.utils.extmath import weighted_mode
>>> x = [4, 1, 4, 2, 4, 2]
>>> weights = [1, 1, 1, 1, 1, 1]
>>> weighted_mode(x, weights)
(array([ 4.]), array([ 3.]))
The value 4 appears three times: with uniform weights, the result is
simply the mode of the distribution.
>>> weights = [1, 3, 0.5, 1.5, 1, 2] # deweight the 4's
>>> weighted_mode(x, weights)
(array([ 2.]), array([ 3.5]))
The value 2 has the highest score: it appears twice with weights of
1.5 and 2: the sum of these is 3.
See Also
--------
scipy.stats.mode
"""
if axis is None:
a = np.ravel(a)
w = np.ravel(w)
axis = 0
else:
a = np.asarray(a)
w = np.asarray(w)
axis = axis
if a.shape != w.shape:
w = np.zeros(a.shape, dtype=w.dtype) + w
scores = np.unique(np.ravel(a)) # get ALL unique values
testshape = list(a.shape)
testshape[axis] = 1
oldmostfreq = np.zeros(testshape)
oldcounts = np.zeros(testshape)
for score in scores:
template = np.zeros(a.shape)
ind = (a == score)
template[ind] = w[ind]
counts = np.expand_dims(np.sum(template, axis), axis)
mostfrequent = np.where(counts > oldcounts, score, oldmostfreq)
oldcounts = np.maximum(counts, oldcounts)
oldmostfreq = mostfrequent
return mostfrequent, oldcounts
def pinvh(a, cond=None, rcond=None, lower=True):
"""Compute the (Moore-Penrose) pseudo-inverse of a hermetian matrix.
Calculate a generalized inverse of a symmetric matrix using its
eigenvalue decomposition and including all 'large' eigenvalues.
Parameters
----------
a : array, shape (N, N)
Real symmetric or complex hermetian matrix to be pseudo-inverted
cond : float or None, default None
Cutoff for 'small' eigenvalues.
Singular values smaller than rcond * largest_eigenvalue are considered
zero.
If None or -1, suitable machine precision is used.
rcond : float or None, default None (deprecated)
Cutoff for 'small' eigenvalues.
Singular values smaller than rcond * largest_eigenvalue are considered
zero.
If None or -1, suitable machine precision is used.
lower : boolean
Whether the pertinent array data is taken from the lower or upper
triangle of a. (Default: lower)
Returns
-------
B : array, shape (N, N)
Raises
------
LinAlgError
If eigenvalue does not converge
Examples
--------
>>> import numpy as np
>>> a = np.random.randn(9, 6)
>>> a = np.dot(a, a.T)
>>> B = pinvh(a)
>>> np.allclose(a, np.dot(a, np.dot(B, a)))
True
>>> np.allclose(B, np.dot(B, np.dot(a, B)))
True
"""
a = np.asarray_chkfinite(a)
s, u = linalg.eigh(a, lower=lower)
if rcond is not None:
cond = rcond
if cond in [None, -1]:
t = u.dtype.char.lower()
factor = {'f': 1E3, 'd': 1E6}
cond = factor[t] * np.finfo(t).eps
# unlike svd case, eigh can lead to negative eigenvalues
above_cutoff = (abs(s) > cond * np.max(abs(s)))
psigma_diag = np.zeros_like(s)
psigma_diag[above_cutoff] = 1.0 / s[above_cutoff]
return np.dot(u * psigma_diag, np.conjugate(u).T)
def cartesian(arrays, out=None):
"""Generate a cartesian product of input arrays.
Parameters
----------
arrays : list of array-like
1-D arrays to form the cartesian product of.
out : ndarray
Array to place the cartesian product in.
Returns
-------
out : ndarray
2-D array of shape (M, len(arrays)) containing cartesian products
formed of input arrays.
Examples
--------
>>> cartesian(([1, 2, 3], [4, 5], [6, 7]))
array([[1, 4, 6],
[1, 4, 7],
[1, 5, 6],
[1, 5, 7],
[2, 4, 6],
[2, 4, 7],
[2, 5, 6],
[2, 5, 7],
[3, 4, 6],
[3, 4, 7],
[3, 5, 6],
[3, 5, 7]])
"""
arrays = [np.asarray(x) for x in arrays]
shape = (len(x) for x in arrays)
dtype = arrays[0].dtype
ix = np.indices(shape)
ix = ix.reshape(len(arrays), -1).T
if out is None:
out = np.empty_like(ix, dtype=dtype)
for n, arr in enumerate(arrays):
out[:, n] = arrays[n][ix[:, n]]
return out
def svd_flip(u, v, u_based_decision=True):
"""Sign correction to ensure deterministic output from SVD.
Adjusts the columns of u and the rows of v such that the loadings in the
columns in u that are largest in absolute value are always positive.
Parameters
----------
u, v : ndarray
u and v are the output of `linalg.svd` or
`sklearn.utils.extmath.randomized_svd`, with matching inner dimensions
so one can compute `np.dot(u * s, v)`.
u_based_decision : boolean, (default=True)
If True, use the columns of u as the basis for sign flipping.
Otherwise, use the rows of v. The choice of which variable to base the
decision on is generally algorithm dependent.
Returns
-------
u_adjusted, v_adjusted : arrays with the same dimensions as the input.
"""
if u_based_decision:
# columns of u, rows of v
max_abs_cols = np.argmax(np.abs(u), axis=0)
signs = np.sign(u[max_abs_cols, xrange(u.shape[1])])
u *= signs
v *= signs[:, np.newaxis]
else:
# rows of v, columns of u
max_abs_rows = np.argmax(np.abs(v), axis=1)
signs = np.sign(v[xrange(v.shape[0]), max_abs_rows])
u *= signs
v *= signs[:, np.newaxis]
return u, v
def log_logistic(X, out=None):
"""Compute the log of the logistic function, ``log(1 / (1 + e ** -x))``.
This implementation is numerically stable because it splits positive and
negative values::
-log(1 + exp(-x_i)) if x_i > 0
x_i - log(1 + exp(x_i)) if x_i <= 0
For the ordinary logistic function, use ``sklearn.utils.fixes.expit``.
Parameters
----------
X: array-like, shape (M, N) or (M, )
Argument to the logistic function
out: array-like, shape: (M, N) or (M, ), optional:
Preallocated output array.
Returns
-------
out: array, shape (M, N) or (M, )
Log of the logistic function evaluated at every point in x
Notes
-----
See the blog post describing this implementation:
http://fa.bianp.net/blog/2013/numerical-optimizers-for-logistic-regression/
"""
is_1d = X.ndim == 1
X = np.atleast_2d(X)
X = check_array(X, dtype=np.float64)
n_samples, n_features = X.shape
if out is None:
out = np.empty_like(X)
_log_logistic_sigmoid(n_samples, n_features, X, out)
if is_1d:
return np.squeeze(out)
return out
def softmax(X, copy=True):
"""
Calculate the softmax function.
The softmax function is calculated by
np.exp(X) / np.sum(np.exp(X), axis=1)
This will cause overflow when large values are exponentiated.
Hence the largest value in each row is subtracted from each data
point to prevent this.
Parameters
----------
X: array-like, shape (M, N)
Argument to the logistic function
copy: bool, optional
Copy X or not.
Returns
-------
out: array, shape (M, N)
Softmax function evaluated at every point in x
"""
if copy:
X = np.copy(X)
max_prob = np.max(X, axis=1).reshape((-1, 1))
X -= max_prob
np.exp(X, X)
sum_prob = np.sum(X, axis=1).reshape((-1, 1))
X /= sum_prob
return X
def safe_min(X):
"""Returns the minimum value of a dense or a CSR/CSC matrix.
Adapated from http://stackoverflow.com/q/13426580
"""
if issparse(X):
if len(X.data) == 0:
return 0
m = X.data.min()
return m if X.getnnz() == X.size else min(m, 0)
else:
return X.min()
def make_nonnegative(X, min_value=0):
"""Ensure `X.min()` >= `min_value`."""
min_ = safe_min(X)
if min_ < min_value:
if issparse(X):
raise ValueError("Cannot make the data matrix"
" nonnegative because it is sparse."
" Adding a value to every entry would"
" make it no longer sparse.")
X = X + (min_value - min_)
return X
def _incremental_mean_and_var(X, last_mean=.0, last_variance=None,
last_sample_count=0):
"""Calculate mean update and a Youngs and Cramer variance update.
last_mean and last_variance are statistics computed at the last step by the
function. Both must be initialized to 0.0. In case no scaling is required
last_variance can be None. The mean is always required and returned because
necessary for the calculation of the variance. last_n_samples_seen is the
number of samples encountered until now.
From the paper "Algorithms for computing the sample variance: analysis and
recommendations", by Chan, Golub, and LeVeque.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Data to use for variance update
last_mean : array-like, shape: (n_features,)
last_variance : array-like, shape: (n_features,)
last_sample_count : int
Returns
-------
updated_mean : array, shape (n_features,)
updated_variance : array, shape (n_features,)
If None, only mean is computed
updated_sample_count : int
References
----------
T. Chan, G. Golub, R. LeVeque. Algorithms for computing the sample
variance: recommendations, The American Statistician, Vol. 37, No. 3,
pp. 242-247
Also, see the sparse implementation of this in
`utils.sparsefuncs.incr_mean_variance_axis` and
`utils.sparsefuncs_fast.incr_mean_variance_axis0`
"""
# old = stats until now
# new = the current increment
# updated = the aggregated stats
last_sum = last_mean * last_sample_count
new_sum = X.sum(axis=0)
new_sample_count = X.shape[0]
updated_sample_count = last_sample_count + new_sample_count
updated_mean = (last_sum + new_sum) / updated_sample_count
if last_variance is None:
updated_variance = None
else:
new_unnormalized_variance = X.var(axis=0) * new_sample_count
if last_sample_count == 0: # Avoid division by 0
updated_unnormalized_variance = new_unnormalized_variance
else:
last_over_new_count = last_sample_count / new_sample_count
last_unnormalized_variance = last_variance * last_sample_count
updated_unnormalized_variance = (
last_unnormalized_variance +
new_unnormalized_variance +
last_over_new_count / updated_sample_count *
(last_sum / last_over_new_count - new_sum) ** 2)
updated_variance = updated_unnormalized_variance / updated_sample_count
return updated_mean, updated_variance, updated_sample_count
def _deterministic_vector_sign_flip(u):
"""Modify the sign of vectors for reproducibility
Flips the sign of elements of all the vectors (rows of u) such that
the absolute maximum element of each vector is positive.
Parameters
----------
u : ndarray
Array with vectors as its rows.
Returns
-------
u_flipped : ndarray with same shape as u
Array with the sign flipped vectors as its rows.
"""
max_abs_rows = np.argmax(np.abs(u), axis=1)
signs = np.sign(u[range(u.shape[0]), max_abs_rows])
u *= signs[:, np.newaxis]
return u
| bsd-3-clause |
M4573R/BuildingMachineLearningSystemsWithPython | ch05/utils.py | 24 | 7111 | # This code is supporting material for the book
# Building Machine Learning Systems with Python
# by Willi Richert and Luis Pedro Coelho
# published by PACKT Publishing
#
# It is made available under the MIT License
import os
try:
import ujson as json # UltraJSON if available
except:
import json
from matplotlib import pylab
import numpy as np
from data import CHART_DIR
def fetch_data(filename, col=None, line_count=-1, only_questions=False):
count = 0
for line in open(filename, "r"):
count += 1
if line_count > 0 and count > line_count:
break
data = Id, ParentId, IsQuestion, IsAccepted, TimeToAnswer, Score, Text, NumTextTokens, NumCodeLines, LinkCount, MisSpelledFraction = line.split(
"\t")
IsQuestion = int(IsQuestion)
if only_questions and not IsQuestion:
continue
if col:
if col < 6:
val = int(data[col])
else:
val = data[col]
yield val
else:
Id = int(Id)
assert Id >= 0, line
ParentId = int(ParentId)
IsAccepted = int(IsAccepted)
assert not IsQuestion == IsAccepted == 1, "%i %i --- %s" % (
IsQuestion, IsAccepted, line)
assert (ParentId == -1 and IsQuestion) or (
ParentId >= 0 and not IsQuestion), "%i %i --- %s" % (ParentId, IsQuestion, line)
TimeToAnswer = int(TimeToAnswer)
Score = int(Score)
NumTextTokens = int(NumTextTokens)
NumCodeLines = int(NumCodeLines)
LinkCount = int(LinkCount)
MisSpelledFraction = float(MisSpelledFraction)
yield Id, ParentId, IsQuestion, IsAccepted, TimeToAnswer, Score, Text, NumTextTokens, NumCodeLines, LinkCount, MisSpelledFraction
def fetch_posts(filename, with_index=True, line_count=-1):
count = 0
for line in open(filename, "r"):
count += 1
if line_count > 0 and count > line_count:
break
Id, Text = line.split("\t")
Text = Text.strip()
if with_index:
yield int(Id), Text
else:
yield Text
def load_meta(filename):
meta = json.load(open(filename, "r"))
keys = list(meta.keys())
# JSON only allows string keys, changing that to int
for key in keys:
meta[int(key)] = meta[key]
del meta[key]
# post Id to index in vectorized
id_to_idx = {}
# and back
idx_to_id = {}
for PostId, Info in meta.items():
id_to_idx[PostId] = idx = Info['idx']
idx_to_id[idx] = PostId
return meta, id_to_idx, idx_to_id
def plot_roc(auc_score, name, fpr, tpr):
pylab.figure(num=None, figsize=(6, 5))
pylab.plot([0, 1], [0, 1], 'k--')
pylab.xlim([0.0, 1.0])
pylab.ylim([0.0, 1.0])
pylab.xlabel('False Positive Rate')
pylab.ylabel('True Positive Rate')
pylab.title('Receiver operating characteristic (AUC=%0.2f)\n%s' % (
auc_score, name))
pylab.legend(loc="lower right")
pylab.grid(True, linestyle='-', color='0.75')
pylab.fill_between(tpr, fpr, alpha=0.5)
pylab.plot(fpr, tpr, lw=1)
pylab.savefig(
os.path.join(CHART_DIR, "roc_" + name.replace(" ", "_") + ".png"))
def plot_pr(auc_score, name, precision, recall, label=None):
pylab.figure(num=None, figsize=(6, 5))
pylab.xlim([0.0, 1.0])
pylab.ylim([0.0, 1.0])
pylab.xlabel('Recall')
pylab.ylabel('Precision')
pylab.title('P/R (AUC=%0.2f) / %s' % (auc_score, label))
pylab.fill_between(recall, precision, alpha=0.5)
pylab.grid(True, linestyle='-', color='0.75')
pylab.plot(recall, precision, lw=1)
filename = name.replace(" ", "_")
pylab.savefig(os.path.join(CHART_DIR, "pr_" + filename + ".png"))
def show_most_informative_features(vectorizer, clf, n=20):
c_f = sorted(zip(clf.coef_[0], vectorizer.get_feature_names()))
top = list(zip(c_f[:n], c_f[:-(n + 1):-1]))
for (c1, f1), (c2, f2) in top:
print("\t%.4f\t%-15s\t\t%.4f\t%-15s" % (c1, f1, c2, f2))
def plot_feat_importance(feature_names, clf, name):
pylab.figure(num=None, figsize=(6, 5))
coef_ = clf.coef_
important = np.argsort(np.absolute(coef_.ravel()))
f_imp = feature_names[important]
coef = coef_.ravel()[important]
inds = np.argsort(coef)
f_imp = f_imp[inds]
coef = coef[inds]
xpos = np.array(list(range(len(coef))))
pylab.bar(xpos, coef, width=1)
pylab.title('Feature importance for %s' % (name))
ax = pylab.gca()
ax.set_xticks(np.arange(len(coef)))
labels = ax.set_xticklabels(f_imp)
for label in labels:
label.set_rotation(90)
filename = name.replace(" ", "_")
pylab.savefig(os.path.join(
CHART_DIR, "feat_imp_%s.png" % filename), bbox_inches="tight")
def plot_feat_hist(data_name_list, filename=None):
if len(data_name_list) > 1:
assert filename is not None
pylab.figure(num=None, figsize=(8, 6))
num_rows = int(1 + (len(data_name_list) - 1) / 2)
num_cols = int(1 if len(data_name_list) == 1 else 2)
pylab.figure(figsize=(5 * num_cols, 4 * num_rows))
for i in range(num_rows):
for j in range(num_cols):
pylab.subplot(num_rows, num_cols, 1 + i * num_cols + j)
x, name = data_name_list[i * num_cols + j]
pylab.title(name)
pylab.xlabel('Value')
pylab.ylabel('Fraction')
# the histogram of the data
max_val = np.max(x)
if max_val <= 1.0:
bins = 50
elif max_val > 50:
bins = 50
else:
bins = max_val
n, bins, patches = pylab.hist(
x, bins=bins, normed=1, alpha=0.75)
pylab.grid(True)
if not filename:
filename = "feat_hist_%s.png" % name.replace(" ", "_")
pylab.savefig(os.path.join(CHART_DIR, filename), bbox_inches="tight")
def plot_bias_variance(data_sizes, train_errors, test_errors, name, title):
pylab.figure(num=None, figsize=(6, 5))
pylab.ylim([0.0, 1.0])
pylab.xlabel('Data set size')
pylab.ylabel('Error')
pylab.title("Bias-Variance for '%s'" % name)
pylab.plot(
data_sizes, test_errors, "--", data_sizes, train_errors, "b-", lw=1)
pylab.legend(["test error", "train error"], loc="upper right")
pylab.grid(True, linestyle='-', color='0.75')
pylab.savefig(
os.path.join(CHART_DIR, "bv_" + name.replace(" ", "_") + ".png"), bbox_inches="tight")
def plot_k_complexity(ks, train_errors, test_errors):
pylab.figure(num=None, figsize=(6, 5))
pylab.ylim([0.0, 1.0])
pylab.xlabel('k')
pylab.ylabel('Error')
pylab.title('Errors for for different values of $k$')
pylab.plot(
ks, test_errors, "--", ks, train_errors, "-", lw=1)
pylab.legend(["test error", "train error"], loc="upper right")
pylab.grid(True, linestyle='-', color='0.75')
pylab.savefig(
os.path.join(CHART_DIR, "kcomplexity.png"), bbox_inches="tight")
| mit |
DSLituiev/scikit-learn | examples/model_selection/plot_precision_recall.py | 74 | 6377 | """
================
Precision-Recall
================
Example of Precision-Recall metric to evaluate classifier output quality.
In information retrieval, precision is a measure of result relevancy, while
recall is a measure of how many truly relevant results are returned. A high
area under the curve represents both high recall and high precision, where high
precision relates to a low false positive rate, and high recall relates to a
low false negative rate. High scores for both show that the classifier is
returning accurate results (high precision), as well as returning a majority of
all positive results (high recall).
A system with high recall but low precision returns many results, but most of
its predicted labels are incorrect when compared to the training labels. A
system with high precision but low recall is just the opposite, returning very
few results, but most of its predicted labels are correct when compared to the
training labels. An ideal system with high precision and high recall will
return many results, with all results labeled correctly.
Precision (:math:`P`) is defined as the number of true positives (:math:`T_p`)
over the number of true positives plus the number of false positives
(:math:`F_p`).
:math:`P = \\frac{T_p}{T_p+F_p}`
Recall (:math:`R`) is defined as the number of true positives (:math:`T_p`)
over the number of true positives plus the number of false negatives
(:math:`F_n`).
:math:`R = \\frac{T_p}{T_p + F_n}`
These quantities are also related to the (:math:`F_1`) score, which is defined
as the harmonic mean of precision and recall.
:math:`F1 = 2\\frac{P \\times R}{P+R}`
It is important to note that the precision may not decrease with recall. The
definition of precision (:math:`\\frac{T_p}{T_p + F_p}`) shows that lowering
the threshold of a classifier may increase the denominator, by increasing the
number of results returned. If the threshold was previously set too high, the
new results may all be true positives, which will increase precision. If the
previous threshold was about right or too low, further lowering the threshold
will introduce false positives, decreasing precision.
Recall is defined as :math:`\\frac{T_p}{T_p+F_n}`, where :math:`T_p+F_n` does
not depend on the classifier threshold. This means that lowering the classifier
threshold may increase recall, by increasing the number of true positive
results. It is also possible that lowering the threshold may leave recall
unchanged, while the precision fluctuates.
The relationship between recall and precision can be observed in the
stairstep area of the plot - at the edges of these steps a small change
in the threshold considerably reduces precision, with only a minor gain in
recall. See the corner at recall = .59, precision = .8 for an example of this
phenomenon.
Precision-recall curves are typically used in binary classification to study
the output of a classifier. In order to extend Precision-recall curve and
average precision to multi-class or multi-label classification, it is necessary
to binarize the output. One curve can be drawn per label, but one can also draw
a precision-recall curve by considering each element of the label indicator
matrix as a binary prediction (micro-averaging).
.. note::
See also :func:`sklearn.metrics.average_precision_score`,
:func:`sklearn.metrics.recall_score`,
:func:`sklearn.metrics.precision_score`,
:func:`sklearn.metrics.f1_score`
"""
print(__doc__)
import matplotlib.pyplot as plt
import numpy as np
from itertools import cycle
from sklearn import svm, datasets
from sklearn.metrics import precision_recall_curve
from sklearn.metrics import average_precision_score
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import label_binarize
from sklearn.multiclass import OneVsRestClassifier
# import some data to play with
iris = datasets.load_iris()
X = iris.data
y = iris.target
# setup plot details
colors = cycle(['navy', 'turquoise', 'darkorange', 'cornflowerblue', 'teal'])
lw = 2
# Binarize the output
y = label_binarize(y, classes=[0, 1, 2])
n_classes = y.shape[1]
# Add noisy features
random_state = np.random.RandomState(0)
n_samples, n_features = X.shape
X = np.c_[X, random_state.randn(n_samples, 200 * n_features)]
# Split into training and test
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=.5,
random_state=random_state)
# Run classifier
classifier = OneVsRestClassifier(svm.SVC(kernel='linear', probability=True,
random_state=random_state))
y_score = classifier.fit(X_train, y_train).decision_function(X_test)
# Compute Precision-Recall and plot curve
precision = dict()
recall = dict()
average_precision = dict()
for i in range(n_classes):
precision[i], recall[i], _ = precision_recall_curve(y_test[:, i],
y_score[:, i])
average_precision[i] = average_precision_score(y_test[:, i], y_score[:, i])
# Compute micro-average ROC curve and ROC area
precision["micro"], recall["micro"], _ = precision_recall_curve(y_test.ravel(),
y_score.ravel())
average_precision["micro"] = average_precision_score(y_test, y_score,
average="micro")
# Plot Precision-Recall curve
plt.clf()
plt.plot(recall[0], precision[0], lw=lw, color='navy',
label='Precision-Recall curve')
plt.xlabel('Recall')
plt.ylabel('Precision')
plt.ylim([0.0, 1.05])
plt.xlim([0.0, 1.0])
plt.title('Precision-Recall example: AUC={0:0.2f}'.format(average_precision[0]))
plt.legend(loc="lower left")
plt.show()
# Plot Precision-Recall curve for each class
plt.clf()
plt.plot(recall["micro"], precision["micro"], color='gold', lw=lw,
label='micro-average Precision-recall curve (area = {0:0.2f})'
''.format(average_precision["micro"]))
for i, color in zip(range(n_classes), colors):
plt.plot(recall[i], precision[i], color=color, lw=lw,
label='Precision-recall curve of class {0} (area = {1:0.2f})'
''.format(i, average_precision[i]))
plt.xlim([0.0, 1.0])
plt.ylim([0.0, 1.05])
plt.xlabel('Recall')
plt.ylabel('Precision')
plt.title('Extension of Precision-Recall curve to multi-class')
plt.legend(loc="lower right")
plt.show()
| bsd-3-clause |
jonyroda97/redbot-amigosprovaveis | lib/matplotlib/mlab.py | 2 | 122923 | """
Numerical python functions written for compatability with MATLAB
commands with the same names.
MATLAB compatible functions
---------------------------
:func:`cohere`
Coherence (normalized cross spectral density)
:func:`csd`
Cross spectral density using Welch's average periodogram
:func:`detrend`
Remove the mean or best fit line from an array
:func:`find`
Return the indices where some condition is true;
numpy.nonzero is similar but more general.
:func:`griddata`
Interpolate irregularly distributed data to a
regular grid.
:func:`prctile`
Find the percentiles of a sequence
:func:`prepca`
Principal Component Analysis
:func:`psd`
Power spectral density using Welch's average periodogram
:func:`rk4`
A 4th order runge kutta integrator for 1D or ND systems
:func:`specgram`
Spectrogram (spectrum over segments of time)
Miscellaneous functions
-----------------------
Functions that don't exist in MATLAB, but are useful anyway:
:func:`cohere_pairs`
Coherence over all pairs. This is not a MATLAB function, but we
compute coherence a lot in my lab, and we compute it for a lot of
pairs. This function is optimized to do this efficiently by
caching the direct FFTs.
:func:`rk4`
A 4th order Runge-Kutta ODE integrator in case you ever find
yourself stranded without scipy (and the far superior
scipy.integrate tools)
:func:`contiguous_regions`
Return the indices of the regions spanned by some logical mask
:func:`cross_from_below`
Return the indices where a 1D array crosses a threshold from below
:func:`cross_from_above`
Return the indices where a 1D array crosses a threshold from above
:func:`complex_spectrum`
Return the complex-valued frequency spectrum of a signal
:func:`magnitude_spectrum`
Return the magnitude of the frequency spectrum of a signal
:func:`angle_spectrum`
Return the angle (wrapped phase) of the frequency spectrum of a signal
:func:`phase_spectrum`
Return the phase (unwrapped angle) of the frequency spectrum of a signal
:func:`detrend_mean`
Remove the mean from a line.
:func:`demean`
Remove the mean from a line. This function is the same as
:func:`detrend_mean` except for the default *axis*.
:func:`detrend_linear`
Remove the best fit line from a line.
:func:`detrend_none`
Return the original line.
:func:`stride_windows`
Get all windows in an array in a memory-efficient manner
:func:`stride_repeat`
Repeat an array in a memory-efficient manner
:func:`apply_window`
Apply a window along a given axis
record array helper functions
-----------------------------
A collection of helper methods for numpyrecord arrays
.. _htmlonly:
See :ref:`misc-examples-index`
:func:`rec2txt`
Pretty print a record array
:func:`rec2csv`
Store record array in CSV file
:func:`csv2rec`
Import record array from CSV file with type inspection
:func:`rec_append_fields`
Adds field(s)/array(s) to record array
:func:`rec_drop_fields`
Drop fields from record array
:func:`rec_join`
Join two record arrays on sequence of fields
:func:`recs_join`
A simple join of multiple recarrays using a single column as a key
:func:`rec_groupby`
Summarize data by groups (similar to SQL GROUP BY)
:func:`rec_summarize`
Helper code to filter rec array fields into new fields
For the rec viewer functions(e rec2csv), there are a bunch of Format
objects you can pass into the functions that will do things like color
negative values red, set percent formatting and scaling, etc.
Example usage::
r = csv2rec('somefile.csv', checkrows=0)
formatd = dict(
weight = FormatFloat(2),
change = FormatPercent(2),
cost = FormatThousands(2),
)
rec2excel(r, 'test.xls', formatd=formatd)
rec2csv(r, 'test.csv', formatd=formatd)
scroll = rec2gtk(r, formatd=formatd)
win = gtk.Window()
win.set_size_request(600,800)
win.add(scroll)
win.show_all()
gtk.main()
"""
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import six
from six.moves import map, xrange, zip
import copy
import csv
import operator
import os
import warnings
import numpy as np
import matplotlib.cbook as cbook
from matplotlib import docstring
from matplotlib.path import Path
import math
if six.PY3:
long = int
def logspace(xmin, xmax, N):
'''
Return N values logarithmically spaced between xmin and xmax.
'''
return np.exp(np.linspace(np.log(xmin), np.log(xmax), N))
def _norm(x):
'''
Return sqrt(x dot x).
'''
return np.sqrt(np.dot(x, x))
def window_hanning(x):
'''
Return x times the hanning window of len(x).
See Also
--------
:func:`window_none`
:func:`window_none` is another window algorithm.
'''
return np.hanning(len(x))*x
def window_none(x):
'''
No window function; simply return x.
See Also
--------
:func:`window_hanning`
:func:`window_hanning` is another window algorithm.
'''
return x
def apply_window(x, window, axis=0, return_window=None):
'''
Apply the given window to the given 1D or 2D array along the given axis.
Parameters
----------
x : 1D or 2D array or sequence
Array or sequence containing the data.
window : function or array.
Either a function to generate a window or an array with length
*x*.shape[*axis*]
axis : integer
The axis over which to do the repetition.
Must be 0 or 1. The default is 0
return_window : bool
If true, also return the 1D values of the window that was applied
'''
x = np.asarray(x)
if x.ndim < 1 or x.ndim > 2:
raise ValueError('only 1D or 2D arrays can be used')
if axis+1 > x.ndim:
raise ValueError('axis(=%s) out of bounds' % axis)
xshape = list(x.shape)
xshapetarg = xshape.pop(axis)
if cbook.iterable(window):
if len(window) != xshapetarg:
raise ValueError('The len(window) must be the same as the shape '
'of x for the chosen axis')
windowVals = window
else:
windowVals = window(np.ones(xshapetarg, dtype=x.dtype))
if x.ndim == 1:
if return_window:
return windowVals * x, windowVals
else:
return windowVals * x
xshapeother = xshape.pop()
otheraxis = (axis+1) % 2
windowValsRep = stride_repeat(windowVals, xshapeother, axis=otheraxis)
if return_window:
return windowValsRep * x, windowVals
else:
return windowValsRep * x
def detrend(x, key=None, axis=None):
'''
Return x with its trend removed.
Parameters
----------
x : array or sequence
Array or sequence containing the data.
key : [ 'default' | 'constant' | 'mean' | 'linear' | 'none'] or function
Specifies the detrend algorithm to use. 'default' is 'mean', which is
the same as :func:`detrend_mean`. 'constant' is the same. 'linear' is
the same as :func:`detrend_linear`. 'none' is the same as
:func:`detrend_none`. The default is 'mean'. See the corresponding
functions for more details regarding the algorithms. Can also be a
function that carries out the detrend operation.
axis : integer
The axis along which to do the detrending.
See Also
--------
:func:`detrend_mean`
:func:`detrend_mean` implements the 'mean' algorithm.
:func:`detrend_linear`
:func:`detrend_linear` implements the 'linear' algorithm.
:func:`detrend_none`
:func:`detrend_none` implements the 'none' algorithm.
'''
if key is None or key in ['constant', 'mean', 'default']:
return detrend(x, key=detrend_mean, axis=axis)
elif key == 'linear':
return detrend(x, key=detrend_linear, axis=axis)
elif key == 'none':
return detrend(x, key=detrend_none, axis=axis)
elif isinstance(key, six.string_types):
raise ValueError("Unknown value for key %s, must be one of: "
"'default', 'constant', 'mean', "
"'linear', or a function" % key)
if not callable(key):
raise ValueError("Unknown value for key %s, must be one of: "
"'default', 'constant', 'mean', "
"'linear', or a function" % key)
x = np.asarray(x)
if axis is not None and axis+1 > x.ndim:
raise ValueError('axis(=%s) out of bounds' % axis)
if (axis is None and x.ndim == 0) or (not axis and x.ndim == 1):
return key(x)
# try to use the 'axis' argument if the function supports it,
# otherwise use apply_along_axis to do it
try:
return key(x, axis=axis)
except TypeError:
return np.apply_along_axis(key, axis=axis, arr=x)
def demean(x, axis=0):
'''
Return x minus its mean along the specified axis.
Parameters
----------
x : array or sequence
Array or sequence containing the data
Can have any dimensionality
axis : integer
The axis along which to take the mean. See numpy.mean for a
description of this argument.
See Also
--------
:func:`delinear`
:func:`denone`
:func:`delinear` and :func:`denone` are other detrend algorithms.
:func:`detrend_mean`
This function is the same as :func:`detrend_mean` except for the
default *axis*.
'''
return detrend_mean(x, axis=axis)
def detrend_mean(x, axis=None):
'''
Return x minus the mean(x).
Parameters
----------
x : array or sequence
Array or sequence containing the data
Can have any dimensionality
axis : integer
The axis along which to take the mean. See numpy.mean for a
description of this argument.
See Also
--------
:func:`demean`
This function is the same as :func:`demean` except for the default
*axis*.
:func:`detrend_linear`
:func:`detrend_none`
:func:`detrend_linear` and :func:`detrend_none` are other detrend
algorithms.
:func:`detrend`
:func:`detrend` is a wrapper around all the detrend algorithms.
'''
x = np.asarray(x)
if axis is not None and axis+1 > x.ndim:
raise ValueError('axis(=%s) out of bounds' % axis)
# short-circuit 0-D array.
if not x.ndim:
return np.array(0., dtype=x.dtype)
# short-circuit simple operations
if axis == 0 or axis is None or x.ndim <= 1:
return x - x.mean(axis)
ind = [slice(None)] * x.ndim
ind[axis] = np.newaxis
return x - x.mean(axis)[ind]
def detrend_none(x, axis=None):
'''
Return x: no detrending.
Parameters
----------
x : any object
An object containing the data
axis : integer
This parameter is ignored.
It is included for compatibility with detrend_mean
See Also
--------
:func:`denone`
This function is the same as :func:`denone` except for the default
*axis*, which has no effect.
:func:`detrend_mean`
:func:`detrend_linear`
:func:`detrend_mean` and :func:`detrend_linear` are other detrend
algorithms.
:func:`detrend`
:func:`detrend` is a wrapper around all the detrend algorithms.
'''
return x
def detrend_linear(y):
'''
Return x minus best fit line; 'linear' detrending.
Parameters
----------
y : 0-D or 1-D array or sequence
Array or sequence containing the data
axis : integer
The axis along which to take the mean. See numpy.mean for a
description of this argument.
See Also
--------
:func:`delinear`
This function is the same as :func:`delinear` except for the default
*axis*.
:func:`detrend_mean`
:func:`detrend_none`
:func:`detrend_mean` and :func:`detrend_none` are other detrend
algorithms.
:func:`detrend`
:func:`detrend` is a wrapper around all the detrend algorithms.
'''
# This is faster than an algorithm based on linalg.lstsq.
y = np.asarray(y)
if y.ndim > 1:
raise ValueError('y cannot have ndim > 1')
# short-circuit 0-D array.
if not y.ndim:
return np.array(0., dtype=y.dtype)
x = np.arange(y.size, dtype=float)
C = np.cov(x, y, bias=1)
b = C[0, 1]/C[0, 0]
a = y.mean() - b*x.mean()
return y - (b*x + a)
def stride_windows(x, n, noverlap=None, axis=0):
'''
Get all windows of x with length n as a single array,
using strides to avoid data duplication.
.. warning::
It is not safe to write to the output array. Multiple
elements may point to the same piece of memory,
so modifying one value may change others.
Parameters
----------
x : 1D array or sequence
Array or sequence containing the data.
n : integer
The number of data points in each window.
noverlap : integer
The overlap between adjacent windows.
Default is 0 (no overlap)
axis : integer
The axis along which the windows will run.
References
----------
`stackoverflow: Rolling window for 1D arrays in Numpy?
<http://stackoverflow.com/a/6811241>`_
`stackoverflow: Using strides for an efficient moving average filter
<http://stackoverflow.com/a/4947453>`_
'''
if noverlap is None:
noverlap = 0
if noverlap >= n:
raise ValueError('noverlap must be less than n')
if n < 1:
raise ValueError('n cannot be less than 1')
x = np.asarray(x)
if x.ndim != 1:
raise ValueError('only 1-dimensional arrays can be used')
if n == 1 and noverlap == 0:
if axis == 0:
return x[np.newaxis]
else:
return x[np.newaxis].transpose()
if n > x.size:
raise ValueError('n cannot be greater than the length of x')
# np.lib.stride_tricks.as_strided easily leads to memory corruption for
# non integer shape and strides, i.e. noverlap or n. See #3845.
noverlap = int(noverlap)
n = int(n)
step = n - noverlap
if axis == 0:
shape = (n, (x.shape[-1]-noverlap)//step)
strides = (x.strides[0], step*x.strides[0])
else:
shape = ((x.shape[-1]-noverlap)//step, n)
strides = (step*x.strides[0], x.strides[0])
return np.lib.stride_tricks.as_strided(x, shape=shape, strides=strides)
def stride_repeat(x, n, axis=0):
'''
Repeat the values in an array in a memory-efficient manner. Array x is
stacked vertically n times.
.. warning::
It is not safe to write to the output array. Multiple
elements may point to the same piece of memory, so
modifying one value may change others.
Parameters
----------
x : 1D array or sequence
Array or sequence containing the data.
n : integer
The number of time to repeat the array.
axis : integer
The axis along which the data will run.
References
----------
`stackoverflow: Repeat NumPy array without replicating data?
<http://stackoverflow.com/a/5568169>`_
'''
if axis not in [0, 1]:
raise ValueError('axis must be 0 or 1')
x = np.asarray(x)
if x.ndim != 1:
raise ValueError('only 1-dimensional arrays can be used')
if n == 1:
if axis == 0:
return np.atleast_2d(x)
else:
return np.atleast_2d(x).T
if n < 1:
raise ValueError('n cannot be less than 1')
# np.lib.stride_tricks.as_strided easily leads to memory corruption for
# non integer shape and strides, i.e. n. See #3845.
n = int(n)
if axis == 0:
shape = (n, x.size)
strides = (0, x.strides[0])
else:
shape = (x.size, n)
strides = (x.strides[0], 0)
return np.lib.stride_tricks.as_strided(x, shape=shape, strides=strides)
def _spectral_helper(x, y=None, NFFT=None, Fs=None, detrend_func=None,
window=None, noverlap=None, pad_to=None,
sides=None, scale_by_freq=None, mode=None):
'''
This is a helper function that implements the commonality between the
psd, csd, spectrogram and complex, magnitude, angle, and phase spectrums.
It is *NOT* meant to be used outside of mlab and may change at any time.
'''
if y is None:
# if y is None use x for y
same_data = True
else:
# The checks for if y is x are so that we can use the same function to
# implement the core of psd(), csd(), and spectrogram() without doing
# extra calculations. We return the unaveraged Pxy, freqs, and t.
same_data = y is x
if Fs is None:
Fs = 2
if noverlap is None:
noverlap = 0
if detrend_func is None:
detrend_func = detrend_none
if window is None:
window = window_hanning
# if NFFT is set to None use the whole signal
if NFFT is None:
NFFT = 256
if mode is None or mode == 'default':
mode = 'psd'
elif mode not in ['psd', 'complex', 'magnitude', 'angle', 'phase']:
raise ValueError("Unknown value for mode %s, must be one of: "
"'default', 'psd', 'complex', "
"'magnitude', 'angle', 'phase'" % mode)
if not same_data and mode != 'psd':
raise ValueError("x and y must be equal if mode is not 'psd'")
# Make sure we're dealing with a numpy array. If y and x were the same
# object to start with, keep them that way
x = np.asarray(x)
if not same_data:
y = np.asarray(y)
if sides is None or sides == 'default':
if np.iscomplexobj(x):
sides = 'twosided'
else:
sides = 'onesided'
elif sides not in ['onesided', 'twosided']:
raise ValueError("Unknown value for sides %s, must be one of: "
"'default', 'onesided', or 'twosided'" % sides)
# zero pad x and y up to NFFT if they are shorter than NFFT
if len(x) < NFFT:
n = len(x)
x = np.resize(x, (NFFT,))
x[n:] = 0
if not same_data and len(y) < NFFT:
n = len(y)
y = np.resize(y, (NFFT,))
y[n:] = 0
if pad_to is None:
pad_to = NFFT
if mode != 'psd':
scale_by_freq = False
elif scale_by_freq is None:
scale_by_freq = True
# For real x, ignore the negative frequencies unless told otherwise
if sides == 'twosided':
numFreqs = pad_to
if pad_to % 2:
freqcenter = (pad_to - 1)//2 + 1
else:
freqcenter = pad_to//2
scaling_factor = 1.
elif sides == 'onesided':
if pad_to % 2:
numFreqs = (pad_to + 1)//2
else:
numFreqs = pad_to//2 + 1
scaling_factor = 2.
result = stride_windows(x, NFFT, noverlap, axis=0)
result = detrend(result, detrend_func, axis=0)
result, windowVals = apply_window(result, window, axis=0,
return_window=True)
result = np.fft.fft(result, n=pad_to, axis=0)[:numFreqs, :]
freqs = np.fft.fftfreq(pad_to, 1/Fs)[:numFreqs]
if not same_data:
# if same_data is False, mode must be 'psd'
resultY = stride_windows(y, NFFT, noverlap)
resultY = apply_window(resultY, window, axis=0)
resultY = detrend(resultY, detrend_func, axis=0)
resultY = np.fft.fft(resultY, n=pad_to, axis=0)[:numFreqs, :]
result = np.conj(result) * resultY
elif mode == 'psd':
result = np.conj(result) * result
elif mode == 'magnitude':
result = np.abs(result) / np.abs(windowVals).sum()
elif mode == 'angle' or mode == 'phase':
# we unwrap the phase later to handle the onesided vs. twosided case
result = np.angle(result)
elif mode == 'complex':
result /= np.abs(windowVals).sum()
if mode == 'psd':
# Also include scaling factors for one-sided densities and dividing by
# the sampling frequency, if desired. Scale everything, except the DC
# component and the NFFT/2 component:
# if we have a even number of frequencies, don't scale NFFT/2
if not NFFT % 2:
slc = slice(1, -1, None)
# if we have an odd number, just don't scale DC
else:
slc = slice(1, None, None)
result[slc] *= scaling_factor
# MATLAB divides by the sampling frequency so that density function
# has units of dB/Hz and can be integrated by the plotted frequency
# values. Perform the same scaling here.
if scale_by_freq:
result /= Fs
# Scale the spectrum by the norm of the window to compensate for
# windowing loss; see Bendat & Piersol Sec 11.5.2.
result /= (np.abs(windowVals)**2).sum()
else:
# In this case, preserve power in the segment, not amplitude
result /= np.abs(windowVals).sum()**2
t = np.arange(NFFT/2, len(x) - NFFT/2 + 1, NFFT - noverlap)/Fs
if sides == 'twosided':
# center the frequency range at zero
freqs = np.concatenate((freqs[freqcenter:], freqs[:freqcenter]))
result = np.concatenate((result[freqcenter:, :],
result[:freqcenter, :]), 0)
elif not pad_to % 2:
# get the last value correctly, it is negative otherwise
freqs[-1] *= -1
# we unwrap the phase here to handle the onesided vs. twosided case
if mode == 'phase':
result = np.unwrap(result, axis=0)
return result, freqs, t
def _single_spectrum_helper(x, mode, Fs=None, window=None, pad_to=None,
sides=None):
'''
This is a helper function that implements the commonality between the
complex, magnitude, angle, and phase spectrums.
It is *NOT* meant to be used outside of mlab and may change at any time.
'''
if mode is None or mode == 'psd' or mode == 'default':
raise ValueError('_single_spectrum_helper does not work with %s mode'
% mode)
if pad_to is None:
pad_to = len(x)
spec, freqs, _ = _spectral_helper(x=x, y=None, NFFT=len(x), Fs=Fs,
detrend_func=detrend_none, window=window,
noverlap=0, pad_to=pad_to,
sides=sides,
scale_by_freq=False,
mode=mode)
if mode != 'complex':
spec = spec.real
if spec.ndim == 2 and spec.shape[1] == 1:
spec = spec[:, 0]
return spec, freqs
# Split out these keyword docs so that they can be used elsewhere
docstring.interpd.update(Spectral=cbook.dedent("""
Fs : scalar
The sampling frequency (samples per time unit). It is used
to calculate the Fourier frequencies, freqs, in cycles per time
unit. The default value is 2.
window : callable or ndarray
A function or a vector of length *NFFT*. To create window
vectors see :func:`window_hanning`, :func:`window_none`,
:func:`numpy.blackman`, :func:`numpy.hamming`,
:func:`numpy.bartlett`, :func:`scipy.signal`,
:func:`scipy.signal.get_window`, etc. The default is
:func:`window_hanning`. If a function is passed as the
argument, it must take a data segment as an argument and
return the windowed version of the segment.
sides : [ 'default' | 'onesided' | 'twosided' ]
Specifies which sides of the spectrum to return. Default gives the
default behavior, which returns one-sided for real data and both
for complex data. 'onesided' forces the return of a one-sided
spectrum, while 'twosided' forces two-sided.
"""))
docstring.interpd.update(Single_Spectrum=cbook.dedent("""
pad_to : integer
The number of points to which the data segment is padded when
performing the FFT. While not increasing the actual resolution of
the spectrum (the minimum distance between resolvable peaks),
this can give more points in the plot, allowing for more
detail. This corresponds to the *n* parameter in the call to fft().
The default is None, which sets *pad_to* equal to the length of the
input signal (i.e. no padding).
"""))
docstring.interpd.update(PSD=cbook.dedent("""
pad_to : integer
The number of points to which the data segment is padded when
performing the FFT. This can be different from *NFFT*, which
specifies the number of data points used. While not increasing
the actual resolution of the spectrum (the minimum distance between
resolvable peaks), this can give more points in the plot,
allowing for more detail. This corresponds to the *n* parameter
in the call to fft(). The default is None, which sets *pad_to*
equal to *NFFT*
NFFT : integer
The number of data points used in each block for the FFT.
A power 2 is most efficient. The default value is 256.
This should *NOT* be used to get zero padding, or the scaling of the
result will be incorrect. Use *pad_to* for this instead.
detrend : {'default', 'constant', 'mean', 'linear', 'none'} or callable
The function applied to each segment before fft-ing,
designed to remove the mean or linear trend. Unlike in
MATLAB, where the *detrend* parameter is a vector, in
matplotlib is it a function. The :mod:`~matplotlib.pylab`
module defines :func:`~matplotlib.pylab.detrend_none`,
:func:`~matplotlib.pylab.detrend_mean`, and
:func:`~matplotlib.pylab.detrend_linear`, but you can use
a custom function as well. You can also use a string to choose
one of the functions. 'default', 'constant', and 'mean' call
:func:`~matplotlib.pylab.detrend_mean`. 'linear' calls
:func:`~matplotlib.pylab.detrend_linear`. 'none' calls
:func:`~matplotlib.pylab.detrend_none`.
scale_by_freq : boolean, optional
Specifies whether the resulting density values should be scaled
by the scaling frequency, which gives density in units of Hz^-1.
This allows for integration over the returned frequency values.
The default is True for MATLAB compatibility.
"""))
@docstring.dedent_interpd
def psd(x, NFFT=None, Fs=None, detrend=None, window=None,
noverlap=None, pad_to=None, sides=None, scale_by_freq=None):
r"""
Compute the power spectral density.
Call signature::
psd(x, NFFT=256, Fs=2, detrend=mlab.detrend_none,
window=mlab.window_hanning, noverlap=0, pad_to=None,
sides='default', scale_by_freq=None)
The power spectral density :math:`P_{xx}` by Welch's average
periodogram method. The vector *x* is divided into *NFFT* length
segments. Each segment is detrended by function *detrend* and
windowed by function *window*. *noverlap* gives the length of
the overlap between segments. The :math:`|\mathrm{fft}(i)|^2`
of each segment :math:`i` are averaged to compute :math:`P_{xx}`.
If len(*x*) < *NFFT*, it will be zero padded to *NFFT*.
Parameters
----------
x : 1-D array or sequence
Array or sequence containing the data
%(Spectral)s
%(PSD)s
noverlap : integer
The number of points of overlap between segments.
The default value is 0 (no overlap).
Returns
-------
Pxx : 1-D array
The values for the power spectrum `P_{xx}` (real valued)
freqs : 1-D array
The frequencies corresponding to the elements in *Pxx*
References
----------
Bendat & Piersol -- Random Data: Analysis and Measurement Procedures, John
Wiley & Sons (1986)
See Also
--------
:func:`specgram`
:func:`specgram` differs in the default overlap; in not returning the
mean of the segment periodograms; and in returning the times of the
segments.
:func:`magnitude_spectrum`
:func:`magnitude_spectrum` returns the magnitude spectrum.
:func:`csd`
:func:`csd` returns the spectral density between two signals.
"""
Pxx, freqs = csd(x=x, y=None, NFFT=NFFT, Fs=Fs, detrend=detrend,
window=window, noverlap=noverlap, pad_to=pad_to,
sides=sides, scale_by_freq=scale_by_freq)
return Pxx.real, freqs
@docstring.dedent_interpd
def csd(x, y, NFFT=None, Fs=None, detrend=None, window=None,
noverlap=None, pad_to=None, sides=None, scale_by_freq=None):
"""
Compute the cross-spectral density.
Call signature::
csd(x, y, NFFT=256, Fs=2, detrend=mlab.detrend_none,
window=mlab.window_hanning, noverlap=0, pad_to=None,
sides='default', scale_by_freq=None)
The cross spectral density :math:`P_{xy}` by Welch's average
periodogram method. The vectors *x* and *y* are divided into
*NFFT* length segments. Each segment is detrended by function
*detrend* and windowed by function *window*. *noverlap* gives
the length of the overlap between segments. The product of
the direct FFTs of *x* and *y* are averaged over each segment
to compute :math:`P_{xy}`, with a scaling to correct for power
loss due to windowing.
If len(*x*) < *NFFT* or len(*y*) < *NFFT*, they will be zero
padded to *NFFT*.
Parameters
----------
x, y : 1-D arrays or sequences
Arrays or sequences containing the data
%(Spectral)s
%(PSD)s
noverlap : integer
The number of points of overlap between segments.
The default value is 0 (no overlap).
Returns
-------
Pxy : 1-D array
The values for the cross spectrum `P_{xy}` before scaling (real valued)
freqs : 1-D array
The frequencies corresponding to the elements in *Pxy*
References
----------
Bendat & Piersol -- Random Data: Analysis and Measurement Procedures, John
Wiley & Sons (1986)
See Also
--------
:func:`psd`
:func:`psd` is the equivalent to setting y=x.
"""
if NFFT is None:
NFFT = 256
Pxy, freqs, _ = _spectral_helper(x=x, y=y, NFFT=NFFT, Fs=Fs,
detrend_func=detrend, window=window,
noverlap=noverlap, pad_to=pad_to,
sides=sides, scale_by_freq=scale_by_freq,
mode='psd')
if Pxy.ndim == 2:
if Pxy.shape[1] > 1:
Pxy = Pxy.mean(axis=1)
else:
Pxy = Pxy[:, 0]
return Pxy, freqs
@docstring.dedent_interpd
def complex_spectrum(x, Fs=None, window=None, pad_to=None,
sides=None):
"""
Compute the complex-valued frequency spectrum of *x*. Data is padded to a
length of *pad_to* and the windowing function *window* is applied to the
signal.
Parameters
----------
x : 1-D array or sequence
Array or sequence containing the data
%(Spectral)s
%(Single_Spectrum)s
Returns
-------
spectrum : 1-D array
The values for the complex spectrum (complex valued)
freqs : 1-D array
The frequencies corresponding to the elements in *spectrum*
See Also
--------
:func:`magnitude_spectrum`
:func:`magnitude_spectrum` returns the absolute value of this function.
:func:`angle_spectrum`
:func:`angle_spectrum` returns the angle of this function.
:func:`phase_spectrum`
:func:`phase_spectrum` returns the phase (unwrapped angle) of this
function.
:func:`specgram`
:func:`specgram` can return the complex spectrum of segments within the
signal.
"""
return _single_spectrum_helper(x=x, Fs=Fs, window=window, pad_to=pad_to,
sides=sides, mode='complex')
@docstring.dedent_interpd
def magnitude_spectrum(x, Fs=None, window=None, pad_to=None,
sides=None):
"""
Compute the magnitude (absolute value) of the frequency spectrum of
*x*. Data is padded to a length of *pad_to* and the windowing function
*window* is applied to the signal.
Parameters
----------
x : 1-D array or sequence
Array or sequence containing the data
%(Spectral)s
%(Single_Spectrum)s
Returns
-------
spectrum : 1-D array
The values for the magnitude spectrum (real valued)
freqs : 1-D array
The frequencies corresponding to the elements in *spectrum*
See Also
--------
:func:`psd`
:func:`psd` returns the power spectral density.
:func:`complex_spectrum`
This function returns the absolute value of :func:`complex_spectrum`.
:func:`angle_spectrum`
:func:`angle_spectrum` returns the angles of the corresponding
frequencies.
:func:`phase_spectrum`
:func:`phase_spectrum` returns the phase (unwrapped angle) of the
corresponding frequencies.
:func:`specgram`
:func:`specgram` can return the magnitude spectrum of segments within
the signal.
"""
return _single_spectrum_helper(x=x, Fs=Fs, window=window, pad_to=pad_to,
sides=sides, mode='magnitude')
@docstring.dedent_interpd
def angle_spectrum(x, Fs=None, window=None, pad_to=None,
sides=None):
"""
Compute the angle of the frequency spectrum (wrapped phase spectrum) of
*x*. Data is padded to a length of *pad_to* and the windowing function
*window* is applied to the signal.
Parameters
----------
x : 1-D array or sequence
Array or sequence containing the data
%(Spectral)s
%(Single_Spectrum)s
Returns
-------
spectrum : 1-D array
The values for the angle spectrum in radians (real valued)
freqs : 1-D array
The frequencies corresponding to the elements in *spectrum*
See Also
--------
:func:`complex_spectrum`
This function returns the angle value of :func:`complex_spectrum`.
:func:`magnitude_spectrum`
:func:`angle_spectrum` returns the magnitudes of the corresponding
frequencies.
:func:`phase_spectrum`
:func:`phase_spectrum` returns the unwrapped version of this function.
:func:`specgram`
:func:`specgram` can return the angle spectrum of segments within the
signal.
"""
return _single_spectrum_helper(x=x, Fs=Fs, window=window, pad_to=pad_to,
sides=sides, mode='angle')
@docstring.dedent_interpd
def phase_spectrum(x, Fs=None, window=None, pad_to=None,
sides=None):
"""
Compute the phase of the frequency spectrum (unwrapped angle spectrum) of
*x*. Data is padded to a length of *pad_to* and the windowing function
*window* is applied to the signal.
Parameters
----------
x : 1-D array or sequence
Array or sequence containing the data
%(Spectral)s
%(Single_Spectrum)s
Returns
-------
spectrum : 1-D array
The values for the phase spectrum in radians (real valued)
freqs : 1-D array
The frequencies corresponding to the elements in *spectrum*
See Also
--------
:func:`complex_spectrum`
This function returns the angle value of :func:`complex_spectrum`.
:func:`magnitude_spectrum`
:func:`magnitude_spectrum` returns the magnitudes of the corresponding
frequencies.
:func:`angle_spectrum`
:func:`angle_spectrum` returns the wrapped version of this function.
:func:`specgram`
:func:`specgram` can return the phase spectrum of segments within the
signal.
"""
return _single_spectrum_helper(x=x, Fs=Fs, window=window, pad_to=pad_to,
sides=sides, mode='phase')
@docstring.dedent_interpd
def specgram(x, NFFT=None, Fs=None, detrend=None, window=None,
noverlap=None, pad_to=None, sides=None, scale_by_freq=None,
mode=None):
"""
Compute a spectrogram.
Compute and plot a spectrogram of data in x. Data are split into
NFFT length segments and the spectrum of each section is
computed. The windowing function window is applied to each
segment, and the amount of overlap of each segment is
specified with noverlap.
Parameters
----------
x : array_like
1-D array or sequence.
%(Spectral)s
%(PSD)s
noverlap : int, optional
The number of points of overlap between blocks. The default
value is 128.
mode : str, optional
What sort of spectrum to use, default is 'psd'.
'psd'
Returns the power spectral density.
'complex'
Returns the complex-valued frequency spectrum.
'magnitude'
Returns the magnitude spectrum.
'angle'
Returns the phase spectrum without unwrapping.
'phase'
Returns the phase spectrum with unwrapping.
Returns
-------
spectrum : array_like
2-D array, columns are the periodograms of successive segments.
freqs : array_like
1-D array, frequencies corresponding to the rows in *spectrum*.
t : array_like
1-D array, the times corresponding to midpoints of segments
(i.e the columns in *spectrum*).
See Also
--------
psd : differs in the overlap and in the return values.
complex_spectrum : similar, but with complex valued frequencies.
magnitude_spectrum : similar single segment when mode is 'magnitude'.
angle_spectrum : similar to single segment when mode is 'angle'.
phase_spectrum : similar to single segment when mode is 'phase'.
Notes
-----
detrend and scale_by_freq only apply when *mode* is set to 'psd'.
"""
if noverlap is None:
noverlap = 128 # default in _spectral_helper() is noverlap = 0
if NFFT is None:
NFFT = 256 # same default as in _spectral_helper()
if len(x) <= NFFT:
warnings.warn("Only one segment is calculated since parameter NFFT " +
"(=%d) >= signal length (=%d)." % (NFFT, len(x)))
spec, freqs, t = _spectral_helper(x=x, y=None, NFFT=NFFT, Fs=Fs,
detrend_func=detrend, window=window,
noverlap=noverlap, pad_to=pad_to,
sides=sides,
scale_by_freq=scale_by_freq,
mode=mode)
if mode != 'complex':
spec = spec.real # Needed since helper implements generically
return spec, freqs, t
_coh_error = """Coherence is calculated by averaging over *NFFT*
length segments. Your signal is too short for your choice of *NFFT*.
"""
@docstring.dedent_interpd
def cohere(x, y, NFFT=256, Fs=2, detrend=detrend_none, window=window_hanning,
noverlap=0, pad_to=None, sides='default', scale_by_freq=None):
"""
The coherence between *x* and *y*. Coherence is the normalized
cross spectral density:
.. math::
C_{xy} = \\frac{|P_{xy}|^2}{P_{xx}P_{yy}}
Parameters
----------
x, y
Array or sequence containing the data
%(Spectral)s
%(PSD)s
noverlap : integer
The number of points of overlap between blocks. The default value
is 0 (no overlap).
Returns
-------
The return value is the tuple (*Cxy*, *f*), where *f* are the
frequencies of the coherence vector. For cohere, scaling the
individual densities by the sampling frequency has no effect,
since the factors cancel out.
See Also
--------
:func:`psd`, :func:`csd` :
For information about the methods used to compute :math:`P_{xy}`,
:math:`P_{xx}` and :math:`P_{yy}`.
"""
if len(x) < 2 * NFFT:
raise ValueError(_coh_error)
Pxx, f = psd(x, NFFT, Fs, detrend, window, noverlap, pad_to, sides,
scale_by_freq)
Pyy, f = psd(y, NFFT, Fs, detrend, window, noverlap, pad_to, sides,
scale_by_freq)
Pxy, f = csd(x, y, NFFT, Fs, detrend, window, noverlap, pad_to, sides,
scale_by_freq)
Cxy = np.abs(Pxy) ** 2 / (Pxx * Pxy)
return Cxy, f
def donothing_callback(*args):
pass
def cohere_pairs(X, ij, NFFT=256, Fs=2, detrend=detrend_none,
window=window_hanning, noverlap=0,
preferSpeedOverMemory=True,
progressCallback=donothing_callback,
returnPxx=False):
"""
Compute the coherence and phase for all pairs *ij*, in *X*.
*X* is a *numSamples* * *numCols* array
*ij* is a list of tuples. Each tuple is a pair of indexes into
the columns of X for which you want to compute coherence. For
example, if *X* has 64 columns, and you want to compute all
nonredundant pairs, define *ij* as::
ij = []
for i in range(64):
for j in range(i+1,64):
ij.append( (i,j) )
*preferSpeedOverMemory* is an optional bool. Defaults to true. If
False, limits the caching by only making one, rather than two,
complex cache arrays. This is useful if memory becomes critical.
Even when *preferSpeedOverMemory* is False, :func:`cohere_pairs`
will still give significant performace gains over calling
:func:`cohere` for each pair, and will use subtantially less
memory than if *preferSpeedOverMemory* is True. In my tests with
a 43000,64 array over all nonredundant pairs,
*preferSpeedOverMemory* = True delivered a 33% performance boost
on a 1.7GHZ Athlon with 512MB RAM compared with
*preferSpeedOverMemory* = False. But both solutions were more
than 10x faster than naively crunching all possible pairs through
:func:`cohere`.
Returns
-------
Cxy : dictionary of (*i*, *j*) tuples -> coherence vector for
that pair. i.e., ``Cxy[(i,j) = cohere(X[:,i], X[:,j])``.
Number of dictionary keys is ``len(ij)``.
Phase : dictionary of phases of the cross spectral density at
each frequency for each pair. Keys are (*i*, *j*).
freqs : vector of frequencies, equal in length to either the
coherence or phase vectors for any (*i*, *j*) key.
e.g., to make a coherence Bode plot::
subplot(211)
plot( freqs, Cxy[(12,19)])
subplot(212)
plot( freqs, Phase[(12,19)])
For a large number of pairs, :func:`cohere_pairs` can be much more
efficient than just calling :func:`cohere` for each pair, because
it caches most of the intensive computations. If :math:`N` is the
number of pairs, this function is :math:`O(N)` for most of the
heavy lifting, whereas calling cohere for each pair is
:math:`O(N^2)`. However, because of the caching, it is also more
memory intensive, making 2 additional complex arrays with
approximately the same number of elements as *X*.
See :file:`test/cohere_pairs_test.py` in the src tree for an
example script that shows that this :func:`cohere_pairs` and
:func:`cohere` give the same results for a given pair.
See Also
--------
:func:`psd`
For information about the methods used to compute :math:`P_{xy}`,
:math:`P_{xx}` and :math:`P_{yy}`.
"""
numRows, numCols = X.shape
# zero pad if X is too short
if numRows < NFFT:
tmp = X
X = np.zeros((NFFT, numCols), X.dtype)
X[:numRows, :] = tmp
del tmp
numRows, numCols = X.shape
# get all the columns of X that we are interested in by checking
# the ij tuples
allColumns = set()
for i, j in ij:
allColumns.add(i)
allColumns.add(j)
Ncols = len(allColumns)
# for real X, ignore the negative frequencies
if np.iscomplexobj(X):
numFreqs = NFFT
else:
numFreqs = NFFT//2+1
# cache the FFT of every windowed, detrended NFFT length segement
# of every channel. If preferSpeedOverMemory, cache the conjugate
# as well
if cbook.iterable(window):
if len(window) != NFFT:
raise ValueError("The length of the window must be equal to NFFT")
windowVals = window
else:
windowVals = window(np.ones(NFFT, X.dtype))
ind = list(xrange(0, numRows-NFFT+1, NFFT-noverlap))
numSlices = len(ind)
FFTSlices = {}
FFTConjSlices = {}
Pxx = {}
slices = range(numSlices)
normVal = np.linalg.norm(windowVals)**2
for iCol in allColumns:
progressCallback(i/Ncols, 'Cacheing FFTs')
Slices = np.zeros((numSlices, numFreqs), dtype=np.complex_)
for iSlice in slices:
thisSlice = X[ind[iSlice]:ind[iSlice]+NFFT, iCol]
thisSlice = windowVals*detrend(thisSlice)
Slices[iSlice, :] = np.fft.fft(thisSlice)[:numFreqs]
FFTSlices[iCol] = Slices
if preferSpeedOverMemory:
FFTConjSlices[iCol] = np.conj(Slices)
Pxx[iCol] = np.divide(np.mean(abs(Slices)**2, axis=0), normVal)
del Slices, ind, windowVals
# compute the coherences and phases for all pairs using the
# cached FFTs
Cxy = {}
Phase = {}
count = 0
N = len(ij)
for i, j in ij:
count += 1
if count % 10 == 0:
progressCallback(count/N, 'Computing coherences')
if preferSpeedOverMemory:
Pxy = FFTSlices[i] * FFTConjSlices[j]
else:
Pxy = FFTSlices[i] * np.conj(FFTSlices[j])
if numSlices > 1:
Pxy = np.mean(Pxy, axis=0)
# Pxy = np.divide(Pxy, normVal)
Pxy /= normVal
# Cxy[(i,j)] = np.divide(np.absolute(Pxy)**2, Pxx[i]*Pxx[j])
Cxy[i, j] = abs(Pxy)**2 / (Pxx[i]*Pxx[j])
Phase[i, j] = np.arctan2(Pxy.imag, Pxy.real)
freqs = Fs/NFFT*np.arange(numFreqs)
if returnPxx:
return Cxy, Phase, freqs, Pxx
else:
return Cxy, Phase, freqs
def entropy(y, bins):
r"""
Return the entropy of the data in *y* in units of nat.
.. math::
-\sum p_i \ln(p_i)
where :math:`p_i` is the probability of observing *y* in the
:math:`i^{th}` bin of *bins*. *bins* can be a number of bins or a
range of bins; see :func:`numpy.histogram`.
Compare *S* with analytic calculation for a Gaussian::
x = mu + sigma * randn(200000)
Sanalytic = 0.5 * ( 1.0 + log(2*pi*sigma**2.0) )
"""
n, bins = np.histogram(y, bins)
n = n.astype(float)
n = np.take(n, np.nonzero(n)[0]) # get the positive
p = np.divide(n, len(y))
delta = bins[1] - bins[0]
S = -1.0 * np.sum(p * np.log(p)) + np.log(delta)
return S
def normpdf(x, *args):
"Return the normal pdf evaluated at *x*; args provides *mu*, *sigma*"
mu, sigma = args
return 1./(np.sqrt(2*np.pi)*sigma)*np.exp(-0.5 * (1./sigma*(x - mu))**2)
def find(condition):
"Return the indices where ravel(condition) is true"
res, = np.nonzero(np.ravel(condition))
return res
def longest_contiguous_ones(x):
"""
Return the indices of the longest stretch of contiguous ones in *x*,
assuming *x* is a vector of zeros and ones. If there are two
equally long stretches, pick the first.
"""
x = np.ravel(x)
if len(x) == 0:
return np.array([])
ind = (x == 0).nonzero()[0]
if len(ind) == 0:
return np.arange(len(x))
if len(ind) == len(x):
return np.array([])
y = np.zeros((len(x)+2,), x.dtype)
y[1:-1] = x
dif = np.diff(y)
up = (dif == 1).nonzero()[0]
dn = (dif == -1).nonzero()[0]
i = (dn-up == max(dn - up)).nonzero()[0][0]
ind = np.arange(up[i], dn[i])
return ind
def longest_ones(x):
'''alias for longest_contiguous_ones'''
return longest_contiguous_ones(x)
class PCA(object):
def __init__(self, a, standardize=True):
"""
compute the SVD of a and store data for PCA. Use project to
project the data onto a reduced set of dimensions
Parameters
----------
a : np.ndarray
A numobservations x numdims array
standardize : bool
True if input data are to be standardized. If False, only centering
will be carried out.
Attributes
----------
a
A centered unit sigma version of input ``a``.
numrows, numcols
The dimensions of ``a``.
mu
A numdims array of means of ``a``. This is the vector that points
to the origin of PCA space.
sigma
A numdims array of standard deviation of ``a``.
fracs
The proportion of variance of each of the principal components.
s
The actual eigenvalues of the decomposition.
Wt
The weight vector for projecting a numdims point or array into
PCA space.
Y
A projected into PCA space.
Notes
-----
The factor loadings are in the ``Wt`` factor, i.e., the factor loadings
for the first principal component are given by ``Wt[0]``. This row is
also the first eigenvector.
"""
n, m = a.shape
if n < m:
raise RuntimeError('we assume data in a is organized with '
'numrows>numcols')
self.numrows, self.numcols = n, m
self.mu = a.mean(axis=0)
self.sigma = a.std(axis=0)
self.standardize = standardize
a = self.center(a)
self.a = a
U, s, Vh = np.linalg.svd(a, full_matrices=False)
# Note: .H indicates the conjugate transposed / Hermitian.
# The SVD is commonly written as a = U s V.H.
# If U is a unitary matrix, it means that it satisfies U.H = inv(U).
# The rows of Vh are the eigenvectors of a.H a.
# The columns of U are the eigenvectors of a a.H.
# For row i in Vh and column i in U, the corresponding eigenvalue is
# s[i]**2.
self.Wt = Vh
# save the transposed coordinates
Y = np.dot(Vh, a.T).T
self.Y = Y
# save the eigenvalues
self.s = s**2
# and now the contribution of the individual components
vars = self.s/float(len(s))
self.fracs = vars/vars.sum()
def project(self, x, minfrac=0.):
'''
project x onto the principle axes, dropping any axes where fraction
of variance<minfrac
'''
x = np.asarray(x)
if x.shape[-1] != self.numcols:
raise ValueError('Expected an array with dims[-1]==%d' %
self.numcols)
Y = np.dot(self.Wt, self.center(x).T).T
mask = self.fracs >= minfrac
if x.ndim == 2:
Yreduced = Y[:, mask]
else:
Yreduced = Y[mask]
return Yreduced
def center(self, x):
'''
center and optionally standardize the data using the mean and sigma
from training set a
'''
if self.standardize:
return (x - self.mu)/self.sigma
else:
return (x - self.mu)
@staticmethod
def _get_colinear():
c0 = np.array([
0.19294738, 0.6202667, 0.45962655, 0.07608613, 0.135818,
0.83580842, 0.07218851, 0.48318321, 0.84472463, 0.18348462,
0.81585306, 0.96923926, 0.12835919, 0.35075355, 0.15807861,
0.837437, 0.10824303, 0.1723387, 0.43926494, 0.83705486])
c1 = np.array([
-1.17705601, -0.513883, -0.26614584, 0.88067144, 1.00474954,
-1.1616545, 0.0266109, 0.38227157, 1.80489433, 0.21472396,
-1.41920399, -2.08158544, -0.10559009, 1.68999268, 0.34847107,
-0.4685737, 1.23980423, -0.14638744, -0.35907697, 0.22442616])
c2 = c0 + 2*c1
c3 = -3*c0 + 4*c1
a = np.array([c3, c0, c1, c2]).T
return a
def prctile(x, p=(0.0, 25.0, 50.0, 75.0, 100.0)):
"""
Return the percentiles of *x*. *p* can either be a sequence of
percentile values or a scalar. If *p* is a sequence, the ith
element of the return sequence is the *p*(i)-th percentile of *x*.
If *p* is a scalar, the largest value of *x* less than or equal to
the *p* percentage point in the sequence is returned.
"""
# This implementation derived from scipy.stats.scoreatpercentile
def _interpolate(a, b, fraction):
"""Returns the point at the given fraction between a and b, where
'fraction' must be between 0 and 1.
"""
return a + (b - a) * fraction
per = np.array(p)
values = np.sort(x, axis=None)
idxs = per / 100 * (values.shape[0] - 1)
ai = idxs.astype(int)
bi = ai + 1
frac = idxs % 1
# handle cases where attempting to interpolate past last index
cond = bi >= len(values)
if per.ndim:
ai[cond] -= 1
bi[cond] -= 1
frac[cond] += 1
else:
if cond:
ai -= 1
bi -= 1
frac += 1
return _interpolate(values[ai], values[bi], frac)
def prctile_rank(x, p):
"""
Return the rank for each element in *x*, return the rank
0..len(*p*). e.g., if *p* = (25, 50, 75), the return value will be a
len(*x*) array with values in [0,1,2,3] where 0 indicates the
value is less than the 25th percentile, 1 indicates the value is
>= the 25th and < 50th percentile, ... and 3 indicates the value
is above the 75th percentile cutoff.
*p* is either an array of percentiles in [0..100] or a scalar which
indicates how many quantiles of data you want ranked.
"""
if not cbook.iterable(p):
p = np.arange(100.0/p, 100.0, 100.0/p)
else:
p = np.asarray(p)
if p.max() <= 1 or p.min() < 0 or p.max() > 100:
raise ValueError('percentiles should be in range 0..100, not 0..1')
ptiles = prctile(x, p)
return np.searchsorted(ptiles, x)
def center_matrix(M, dim=0):
"""
Return the matrix *M* with each row having zero mean and unit std.
If *dim* = 1 operate on columns instead of rows. (*dim* is
opposite to the numpy axis kwarg.)
"""
M = np.asarray(M, float)
if dim:
M = (M - M.mean(axis=0)) / M.std(axis=0)
else:
M = (M - M.mean(axis=1)[:, np.newaxis])
M = M / M.std(axis=1)[:, np.newaxis]
return M
def rk4(derivs, y0, t):
"""
Integrate 1D or ND system of ODEs using 4-th order Runge-Kutta.
This is a toy implementation which may be useful if you find
yourself stranded on a system w/o scipy. Otherwise use
:func:`scipy.integrate`.
Parameters
----------
y0
initial state vector
t
sample times
derivs
returns the derivative of the system and has the
signature ``dy = derivs(yi, ti)``
Examples
--------
A 2D system::
def derivs6(x,t):
d1 = x[0] + 2*x[1]
d2 = -3*x[0] + 4*x[1]
return (d1, d2)
dt = 0.0005
t = arange(0.0, 2.0, dt)
y0 = (1,2)
yout = rk4(derivs6, y0, t)
A 1D system::
alpha = 2
def derivs(x,t):
return -alpha*x + exp(-t)
y0 = 1
yout = rk4(derivs, y0, t)
If you have access to scipy, you should probably be using the
scipy.integrate tools rather than this function.
"""
try:
Ny = len(y0)
except TypeError:
yout = np.zeros((len(t),), float)
else:
yout = np.zeros((len(t), Ny), float)
yout[0] = y0
i = 0
for i in np.arange(len(t)-1):
thist = t[i]
dt = t[i+1] - thist
dt2 = dt/2.0
y0 = yout[i]
k1 = np.asarray(derivs(y0, thist))
k2 = np.asarray(derivs(y0 + dt2*k1, thist+dt2))
k3 = np.asarray(derivs(y0 + dt2*k2, thist+dt2))
k4 = np.asarray(derivs(y0 + dt*k3, thist+dt))
yout[i+1] = y0 + dt/6.0*(k1 + 2*k2 + 2*k3 + k4)
return yout
def bivariate_normal(X, Y, sigmax=1.0, sigmay=1.0,
mux=0.0, muy=0.0, sigmaxy=0.0):
"""
Bivariate Gaussian distribution for equal shape *X*, *Y*.
See `bivariate normal
<http://mathworld.wolfram.com/BivariateNormalDistribution.html>`_
at mathworld.
"""
Xmu = X-mux
Ymu = Y-muy
rho = sigmaxy/(sigmax*sigmay)
z = Xmu**2/sigmax**2 + Ymu**2/sigmay**2 - 2*rho*Xmu*Ymu/(sigmax*sigmay)
denom = 2*np.pi*sigmax*sigmay*np.sqrt(1-rho**2)
return np.exp(-z/(2*(1-rho**2))) / denom
def get_xyz_where(Z, Cond):
"""
*Z* and *Cond* are *M* x *N* matrices. *Z* are data and *Cond* is
a boolean matrix where some condition is satisfied. Return value
is (*x*, *y*, *z*) where *x* and *y* are the indices into *Z* and
*z* are the values of *Z* at those indices. *x*, *y*, and *z* are
1D arrays.
"""
X, Y = np.indices(Z.shape)
return X[Cond], Y[Cond], Z[Cond]
def get_sparse_matrix(M, N, frac=0.1):
"""
Return a *M* x *N* sparse matrix with *frac* elements randomly
filled.
"""
data = np.zeros((M, N))*0.
for i in range(int(M*N*frac)):
x = np.random.randint(0, M-1)
y = np.random.randint(0, N-1)
data[x, y] = np.random.rand()
return data
def dist(x, y):
"""
Return the distance between two points.
"""
d = x-y
return np.sqrt(np.dot(d, d))
def dist_point_to_segment(p, s0, s1):
"""
Get the distance of a point to a segment.
*p*, *s0*, *s1* are *xy* sequences
This algorithm from
http://geomalgorithms.com/a02-_lines.html
"""
p = np.asarray(p, float)
s0 = np.asarray(s0, float)
s1 = np.asarray(s1, float)
v = s1 - s0
w = p - s0
c1 = np.dot(w, v)
if c1 <= 0:
return dist(p, s0)
c2 = np.dot(v, v)
if c2 <= c1:
return dist(p, s1)
b = c1 / c2
pb = s0 + b * v
return dist(p, pb)
def segments_intersect(s1, s2):
"""
Return *True* if *s1* and *s2* intersect.
*s1* and *s2* are defined as::
s1: (x1, y1), (x2, y2)
s2: (x3, y3), (x4, y4)
"""
(x1, y1), (x2, y2) = s1
(x3, y3), (x4, y4) = s2
den = ((y4-y3) * (x2-x1)) - ((x4-x3)*(y2-y1))
n1 = ((x4-x3) * (y1-y3)) - ((y4-y3)*(x1-x3))
n2 = ((x2-x1) * (y1-y3)) - ((y2-y1)*(x1-x3))
if den == 0:
# lines parallel
return False
u1 = n1/den
u2 = n2/den
return 0.0 <= u1 <= 1.0 and 0.0 <= u2 <= 1.0
def fftsurr(x, detrend=detrend_none, window=window_none):
"""
Compute an FFT phase randomized surrogate of *x*.
"""
if cbook.iterable(window):
x = window*detrend(x)
else:
x = window(detrend(x))
z = np.fft.fft(x)
a = 2.*np.pi*1j
phase = a * np.random.rand(len(x))
z = z*np.exp(phase)
return np.fft.ifft(z).real
def movavg(x, n):
"""
Compute the len(*n*) moving average of *x*.
"""
w = np.empty((n,), dtype=float)
w[:] = 1.0/n
return np.convolve(x, w, mode='valid')
# the following code was written and submitted by Fernando Perez
# from the ipython numutils package under a BSD license
# begin fperez functions
"""
A set of convenient utilities for numerical work.
Most of this module requires numpy or is meant to be used with it.
Copyright (c) 2001-2004, Fernando Perez. <Fernando.Perez@colorado.edu>
All rights reserved.
This license was generated from the BSD license template as found in:
http://www.opensource.org/licenses/bsd-license.php
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
* Neither the name of the IPython project nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
# *****************************************************************************
# Globals
# ****************************************************************************
# function definitions
exp_safe_MIN = math.log(2.2250738585072014e-308)
exp_safe_MAX = 1.7976931348623157e+308
def exp_safe(x):
"""
Compute exponentials which safely underflow to zero.
Slow, but convenient to use. Note that numpy provides proper
floating point exception handling with access to the underlying
hardware.
"""
if type(x) is np.ndarray:
return np.exp(np.clip(x, exp_safe_MIN, exp_safe_MAX))
else:
return math.exp(x)
def amap(fn, *args):
"""
amap(function, sequence[, sequence, ...]) -> array.
Works like :func:`map`, but it returns an array. This is just a
convenient shorthand for ``numpy.array(map(...))``.
"""
return np.array(list(map(fn, *args)))
def rms_flat(a):
"""
Return the root mean square of all the elements of *a*, flattened out.
"""
return np.sqrt(np.mean(np.abs(a) ** 2))
def l1norm(a):
"""
Return the *l1* norm of *a*, flattened out.
Implemented as a separate function (not a call to :func:`norm` for speed).
"""
return np.sum(np.abs(a))
def l2norm(a):
"""
Return the *l2* norm of *a*, flattened out.
Implemented as a separate function (not a call to :func:`norm` for speed).
"""
return np.sqrt(np.sum(np.abs(a) ** 2))
def norm_flat(a, p=2):
"""
norm(a,p=2) -> l-p norm of a.flat
Return the l-p norm of *a*, considered as a flat array. This is NOT a true
matrix norm, since arrays of arbitrary rank are always flattened.
*p* can be a number or the string 'Infinity' to get the L-infinity norm.
"""
# This function was being masked by a more general norm later in
# the file. We may want to simply delete it.
if p == 'Infinity':
return np.max(np.abs(a))
else:
return np.sum(np.abs(a) ** p) ** (1 / p)
def frange(xini, xfin=None, delta=None, **kw):
"""
frange([start,] stop[, step, keywords]) -> array of floats
Return a numpy ndarray containing a progression of floats. Similar to
:func:`numpy.arange`, but defaults to a closed interval.
``frange(x0, x1)`` returns ``[x0, x0+1, x0+2, ..., x1]``; *start*
defaults to 0, and the endpoint *is included*. This behavior is
different from that of :func:`range` and
:func:`numpy.arange`. This is deliberate, since :func:`frange`
will probably be more useful for generating lists of points for
function evaluation, and endpoints are often desired in this
use. The usual behavior of :func:`range` can be obtained by
setting the keyword *closed* = 0, in this case, :func:`frange`
basically becomes :func:numpy.arange`.
When *step* is given, it specifies the increment (or
decrement). All arguments can be floating point numbers.
``frange(x0,x1,d)`` returns ``[x0,x0+d,x0+2d,...,xfin]`` where
*xfin* <= *x1*.
:func:`frange` can also be called with the keyword *npts*. This
sets the number of points the list should contain (and overrides
the value *step* might have been given). :func:`numpy.arange`
doesn't offer this option.
Examples::
>>> frange(3)
array([ 0., 1., 2., 3.])
>>> frange(3,closed=0)
array([ 0., 1., 2.])
>>> frange(1,6,2)
array([1, 3, 5]) or 1,3,5,7, depending on floating point vagueries
>>> frange(1,6.5,npts=5)
array([ 1. , 2.375, 3.75 , 5.125, 6.5 ])
"""
# defaults
kw.setdefault('closed', 1)
endpoint = kw['closed'] != 0
# funny logic to allow the *first* argument to be optional (like range())
# This was modified with a simpler version from a similar frange() found
# at http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/66472
if xfin is None:
xfin = xini + 0.0
xini = 0.0
if delta is None:
delta = 1.0
# compute # of points, spacing and return final list
try:
npts = kw['npts']
delta = (xfin-xini)/float(npts-endpoint)
except KeyError:
npts = int(np.round((xfin-xini)/delta)) + endpoint
# round finds the nearest, so the endpoint can be up to
# delta/2 larger than xfin.
return np.arange(npts)*delta+xini
# end frange()
def identity(n, rank=2, dtype='l', typecode=None):
"""
Returns the identity matrix of shape (*n*, *n*, ..., *n*) (rank *r*).
For ranks higher than 2, this object is simply a multi-index Kronecker
delta::
/ 1 if i0=i1=...=iR,
id[i0,i1,...,iR] = -|
\\ 0 otherwise.
Optionally a *dtype* (or typecode) may be given (it defaults to 'l').
Since rank defaults to 2, this function behaves in the default case (when
only *n* is given) like ``numpy.identity(n)`` -- but surprisingly, it is
much faster.
"""
if typecode is not None:
dtype = typecode
iden = np.zeros((n,)*rank, dtype)
for i in range(n):
idx = (i,)*rank
iden[idx] = 1
return iden
def base_repr(number, base=2, padding=0):
"""
Return the representation of a *number* in any given *base*.
"""
chars = '0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ'
if number < base:
return (padding - 1) * chars[0] + chars[int(number)]
max_exponent = int(math.log(number)/math.log(base))
max_power = long(base) ** max_exponent
lead_digit = int(number/max_power)
return (chars[lead_digit] +
base_repr(number - max_power * lead_digit, base,
max(padding - 1, max_exponent)))
def binary_repr(number, max_length=1025):
"""
Return the binary representation of the input *number* as a
string.
This is more efficient than using :func:`base_repr` with base 2.
Increase the value of max_length for very large numbers. Note that
on 32-bit machines, 2**1023 is the largest integer power of 2
which can be converted to a Python float.
"""
# assert number < 2L << max_length
shifts = map(operator.rshift, max_length * [number],
range(max_length - 1, -1, -1))
digits = list(map(operator.mod, shifts, max_length * [2]))
if not digits.count(1):
return 0
digits = digits[digits.index(1):]
return ''.join(map(repr, digits)).replace('L', '')
def log2(x, ln2=math.log(2.0)):
"""
Return the log(*x*) in base 2.
This is a _slow_ function but which is guaranteed to return the correct
integer value if the input is an integer exact power of 2.
"""
try:
bin_n = binary_repr(x)[1:]
except (AssertionError, TypeError):
return math.log(x)/ln2
else:
if '1' in bin_n:
return math.log(x)/ln2
else:
return len(bin_n)
def ispower2(n):
"""
Returns the log base 2 of *n* if *n* is a power of 2, zero otherwise.
Note the potential ambiguity if *n* == 1: 2**0 == 1, interpret accordingly.
"""
bin_n = binary_repr(n)[1:]
if '1' in bin_n:
return 0
else:
return len(bin_n)
def isvector(X):
"""
Like the MATLAB function with the same name, returns *True*
if the supplied numpy array or matrix *X* looks like a vector,
meaning it has a one non-singleton axis (i.e., it can have
multiple axes, but all must have length 1, except for one of
them).
If you just want to see if the array has 1 axis, use X.ndim == 1.
"""
return np.prod(X.shape) == np.max(X.shape)
# end fperez numutils code
# helpers for loading, saving, manipulating and viewing numpy record arrays
def safe_isnan(x):
':func:`numpy.isnan` for arbitrary types'
if isinstance(x, six.string_types):
return False
try:
b = np.isnan(x)
except NotImplementedError:
return False
except TypeError:
return False
else:
return b
def safe_isinf(x):
':func:`numpy.isinf` for arbitrary types'
if isinstance(x, six.string_types):
return False
try:
b = np.isinf(x)
except NotImplementedError:
return False
except TypeError:
return False
else:
return b
def rec_append_fields(rec, names, arrs, dtypes=None):
"""
Return a new record array with field names populated with data
from arrays in *arrs*. If appending a single field, then *names*,
*arrs* and *dtypes* do not have to be lists. They can just be the
values themselves.
"""
if (not isinstance(names, six.string_types) and cbook.iterable(names)
and len(names) and isinstance(names[0]), six.string_types):
if len(names) != len(arrs):
raise ValueError("number of arrays do not match number of names")
else: # we have only 1 name and 1 array
names = [names]
arrs = [arrs]
arrs = list(map(np.asarray, arrs))
if dtypes is None:
dtypes = [a.dtype for a in arrs]
elif not cbook.iterable(dtypes):
dtypes = [dtypes]
if len(arrs) != len(dtypes):
if len(dtypes) == 1:
dtypes = dtypes * len(arrs)
else:
raise ValueError("dtypes must be None, a single dtype or a list")
old_dtypes = rec.dtype.descr
if six.PY2:
old_dtypes = [(name.encode('utf-8'), dt) for name, dt in old_dtypes]
newdtype = np.dtype(old_dtypes + list(zip(names, dtypes)))
newrec = np.recarray(rec.shape, dtype=newdtype)
for field in rec.dtype.fields:
newrec[field] = rec[field]
for name, arr in zip(names, arrs):
newrec[name] = arr
return newrec
def rec_drop_fields(rec, names):
"""
Return a new numpy record array with fields in *names* dropped.
"""
names = set(names)
newdtype = np.dtype([(name, rec.dtype[name]) for name in rec.dtype.names
if name not in names])
newrec = np.recarray(rec.shape, dtype=newdtype)
for field in newdtype.names:
newrec[field] = rec[field]
return newrec
def rec_keep_fields(rec, names):
"""
Return a new numpy record array with only fields listed in names
"""
if isinstance(names, six.string_types):
names = names.split(',')
arrays = []
for name in names:
arrays.append(rec[name])
return np.rec.fromarrays(arrays, names=names)
def rec_groupby(r, groupby, stats):
"""
*r* is a numpy record array
*groupby* is a sequence of record array attribute names that
together form the grouping key. e.g., ('date', 'productcode')
*stats* is a sequence of (*attr*, *func*, *outname*) tuples which
will call ``x = func(attr)`` and assign *x* to the record array
output with attribute *outname*. For example::
stats = ( ('sales', len, 'numsales'), ('sales', np.mean, 'avgsale') )
Return record array has *dtype* names for each attribute name in
the *groupby* argument, with the associated group values, and
for each outname name in the *stats* argument, with the associated
stat summary output.
"""
# build a dictionary from groupby keys-> list of indices into r with
# those keys
rowd = {}
for i, row in enumerate(r):
key = tuple([row[attr] for attr in groupby])
rowd.setdefault(key, []).append(i)
rows = []
# sort the output by groupby keys
for key in sorted(rowd):
row = list(key)
# get the indices for this groupby key
ind = rowd[key]
thisr = r[ind]
# call each stat function for this groupby slice
row.extend([func(thisr[attr]) for attr, func, outname in stats])
rows.append(row)
# build the output record array with groupby and outname attributes
attrs, funcs, outnames = list(zip(*stats))
names = list(groupby)
names.extend(outnames)
return np.rec.fromrecords(rows, names=names)
def rec_summarize(r, summaryfuncs):
"""
*r* is a numpy record array
*summaryfuncs* is a list of (*attr*, *func*, *outname*) tuples
which will apply *func* to the array *r*[attr] and assign the
output to a new attribute name *outname*. The returned record
array is identical to *r*, with extra arrays for each element in
*summaryfuncs*.
"""
names = list(r.dtype.names)
arrays = [r[name] for name in names]
for attr, func, outname in summaryfuncs:
names.append(outname)
arrays.append(np.asarray(func(r[attr])))
return np.rec.fromarrays(arrays, names=names)
def rec_join(key, r1, r2, jointype='inner', defaults=None, r1postfix='1',
r2postfix='2'):
"""
Join record arrays *r1* and *r2* on *key*; *key* is a tuple of
field names -- if *key* is a string it is assumed to be a single
attribute name. If *r1* and *r2* have equal values on all the keys
in the *key* tuple, then their fields will be merged into a new
record array containing the intersection of the fields of *r1* and
*r2*.
*r1* (also *r2*) must not have any duplicate keys.
The *jointype* keyword can be 'inner', 'outer', 'leftouter'. To
do a rightouter join just reverse *r1* and *r2*.
The *defaults* keyword is a dictionary filled with
``{column_name:default_value}`` pairs.
The keywords *r1postfix* and *r2postfix* are postfixed to column names
(other than keys) that are both in *r1* and *r2*.
"""
if isinstance(key, six.string_types):
key = (key, )
for name in key:
if name not in r1.dtype.names:
raise ValueError('r1 does not have key field %s' % name)
if name not in r2.dtype.names:
raise ValueError('r2 does not have key field %s' % name)
def makekey(row):
return tuple([row[name] for name in key])
r1d = {makekey(row): i for i, row in enumerate(r1)}
r2d = {makekey(row): i for i, row in enumerate(r2)}
r1keys = set(r1d)
r2keys = set(r2d)
common_keys = r1keys & r2keys
r1ind = np.array([r1d[k] for k in common_keys])
r2ind = np.array([r2d[k] for k in common_keys])
common_len = len(common_keys)
left_len = right_len = 0
if jointype == "outer" or jointype == "leftouter":
left_keys = r1keys.difference(r2keys)
left_ind = np.array([r1d[k] for k in left_keys])
left_len = len(left_ind)
if jointype == "outer":
right_keys = r2keys.difference(r1keys)
right_ind = np.array([r2d[k] for k in right_keys])
right_len = len(right_ind)
def key_desc(name):
'''
if name is a string key, use the larger size of r1 or r2 before
merging
'''
dt1 = r1.dtype[name]
if dt1.type != np.string_:
return (name, dt1.descr[0][1])
dt2 = r2.dtype[name]
if dt1 != dt2:
msg = "The '{0}' fields in arrays 'r1' and 'r2' must have the same"
msg += " dtype."
raise ValueError(msg.format(name))
if dt1.num > dt2.num:
return (name, dt1.descr[0][1])
else:
return (name, dt2.descr[0][1])
keydesc = [key_desc(name) for name in key]
def mapped_r1field(name):
"""
The column name in *newrec* that corresponds to the column in *r1*.
"""
if name in key or name not in r2.dtype.names:
return name
else:
return name + r1postfix
def mapped_r2field(name):
"""
The column name in *newrec* that corresponds to the column in *r2*.
"""
if name in key or name not in r1.dtype.names:
return name
else:
return name + r2postfix
r1desc = [(mapped_r1field(desc[0]), desc[1]) for desc in r1.dtype.descr
if desc[0] not in key]
r2desc = [(mapped_r2field(desc[0]), desc[1]) for desc in r2.dtype.descr
if desc[0] not in key]
all_dtypes = keydesc + r1desc + r2desc
if six.PY2:
all_dtypes = [(name.encode('utf-8'), dt) for name, dt in all_dtypes]
newdtype = np.dtype(all_dtypes)
newrec = np.recarray((common_len + left_len + right_len,), dtype=newdtype)
if defaults is not None:
for thiskey in defaults:
if thiskey not in newdtype.names:
warnings.warn('rec_join defaults key="%s" not in new dtype '
'names "%s"' % (thiskey, newdtype.names))
for name in newdtype.names:
dt = newdtype[name]
if dt.kind in ('f', 'i'):
newrec[name] = 0
if jointype != 'inner' and defaults is not None:
# fill in the defaults enmasse
newrec_fields = list(newrec.dtype.fields)
for k, v in six.iteritems(defaults):
if k in newrec_fields:
newrec[k] = v
for field in r1.dtype.names:
newfield = mapped_r1field(field)
if common_len:
newrec[newfield][:common_len] = r1[field][r1ind]
if (jointype == "outer" or jointype == "leftouter") and left_len:
newrec[newfield][common_len:(common_len+left_len)] = (
r1[field][left_ind]
)
for field in r2.dtype.names:
newfield = mapped_r2field(field)
if field not in key and common_len:
newrec[newfield][:common_len] = r2[field][r2ind]
if jointype == "outer" and right_len:
newrec[newfield][-right_len:] = r2[field][right_ind]
newrec.sort(order=key)
return newrec
def recs_join(key, name, recs, jointype='outer', missing=0., postfixes=None):
"""
Join a sequence of record arrays on single column key.
This function only joins a single column of the multiple record arrays
*key*
is the column name that acts as a key
*name*
is the name of the column that we want to join
*recs*
is a list of record arrays to join
*jointype*
is a string 'inner' or 'outer'
*missing*
is what any missing field is replaced by
*postfixes*
if not None, a len recs sequence of postfixes
returns a record array with columns [rowkey, name0, name1, ... namen-1].
or if postfixes [PF0, PF1, ..., PFN-1] are supplied,
[rowkey, namePF0, namePF1, ... namePFN-1].
Example::
r = recs_join("date", "close", recs=[r0, r1], missing=0.)
"""
results = []
aligned_iters = cbook.align_iterators(operator.attrgetter(key),
*[iter(r) for r in recs])
def extract(r):
if r is None:
return missing
else:
return r[name]
if jointype == "outer":
for rowkey, row in aligned_iters:
results.append([rowkey] + list(map(extract, row)))
elif jointype == "inner":
for rowkey, row in aligned_iters:
if None not in row: # throw out any Nones
results.append([rowkey] + list(map(extract, row)))
if postfixes is None:
postfixes = ['%d' % i for i in range(len(recs))]
names = ",".join([key] + ["%s%s" % (name, postfix)
for postfix in postfixes])
return np.rec.fromrecords(results, names=names)
def csv2rec(fname, comments='#', skiprows=0, checkrows=0, delimiter=',',
converterd=None, names=None, missing='', missingd=None,
use_mrecords=False, dayfirst=False, yearfirst=False):
"""
Load data from comma/space/tab delimited file in *fname* into a
numpy record array and return the record array.
If *names* is *None*, a header row is required to automatically
assign the recarray names. The headers will be lower cased,
spaces will be converted to underscores, and illegal attribute
name characters removed. If *names* is not *None*, it is a
sequence of names to use for the column names. In this case, it
is assumed there is no header row.
- *fname*: can be a filename or a file handle. Support for gzipped
files is automatic, if the filename ends in '.gz'
- *comments*: the character used to indicate the start of a comment
in the file, or *None* to switch off the removal of comments
- *skiprows*: is the number of rows from the top to skip
- *checkrows*: is the number of rows to check to validate the column
data type. When set to zero all rows are validated.
- *converterd*: if not *None*, is a dictionary mapping column number or
munged column name to a converter function.
- *names*: if not None, is a list of header names. In this case, no
header will be read from the file
- *missingd* is a dictionary mapping munged column names to field values
which signify that the field does not contain actual data and should
be masked, e.g., '0000-00-00' or 'unused'
- *missing*: a string whose value signals a missing field regardless of
the column it appears in
- *use_mrecords*: if True, return an mrecords.fromrecords record array if
any of the data are missing
- *dayfirst*: default is False so that MM-DD-YY has precedence over
DD-MM-YY. See
http://labix.org/python-dateutil#head-b95ce2094d189a89f80f5ae52a05b4ab7b41af47
for further information.
- *yearfirst*: default is False so that MM-DD-YY has precedence over
YY-MM-DD. See
http://labix.org/python-dateutil#head-b95ce2094d189a89f80f5ae52a05b4ab7b41af47
for further information.
If no rows are found, *None* is returned
"""
if converterd is None:
converterd = dict()
if missingd is None:
missingd = {}
import dateutil.parser
import datetime
fh = cbook.to_filehandle(fname)
delimiter = str(delimiter)
class FH:
"""
For space-delimited files, we want different behavior than
comma or tab. Generally, we want multiple spaces to be
treated as a single separator, whereas with comma and tab we
want multiple commas to return multiple (empty) fields. The
join/strip trick below effects this.
"""
def __init__(self, fh):
self.fh = fh
def close(self):
self.fh.close()
def seek(self, arg):
self.fh.seek(arg)
def fix(self, s):
return ' '.join(s.split())
def __next__(self):
return self.fix(next(self.fh))
def __iter__(self):
for line in self.fh:
yield self.fix(line)
if delimiter == ' ':
fh = FH(fh)
reader = csv.reader(fh, delimiter=delimiter)
def process_skiprows(reader):
if skiprows:
for i, row in enumerate(reader):
if i >= (skiprows-1):
break
return fh, reader
process_skiprows(reader)
def ismissing(name, val):
"Should the value val in column name be masked?"
return val == missing or val == missingd.get(name) or val == ''
def with_default_value(func, default):
def newfunc(name, val):
if ismissing(name, val):
return default
else:
return func(val)
return newfunc
def mybool(x):
if x == 'True':
return True
elif x == 'False':
return False
else:
raise ValueError('invalid bool')
dateparser = dateutil.parser.parse
def mydateparser(x):
# try and return a datetime object
d = dateparser(x, dayfirst=dayfirst, yearfirst=yearfirst)
return d
mydateparser = with_default_value(mydateparser, datetime.datetime(1, 1, 1))
myfloat = with_default_value(float, np.nan)
myint = with_default_value(int, -1)
mystr = with_default_value(str, '')
mybool = with_default_value(mybool, None)
def mydate(x):
# try and return a date object
d = dateparser(x, dayfirst=dayfirst, yearfirst=yearfirst)
if d.hour > 0 or d.minute > 0 or d.second > 0:
raise ValueError('not a date')
return d.date()
mydate = with_default_value(mydate, datetime.date(1, 1, 1))
def get_func(name, item, func):
# promote functions in this order
funcs = [mybool, myint, myfloat, mydate, mydateparser, mystr]
for func in funcs[funcs.index(func):]:
try:
func(name, item)
except Exception:
continue
return func
raise ValueError('Could not find a working conversion function')
# map column names that clash with builtins -- TODO - extend this list
itemd = {
'return': 'return_',
'file': 'file_',
'print': 'print_',
}
def get_converters(reader, comments):
converters = None
i = 0
for row in reader:
if (len(row) and comments is not None and
row[0].startswith(comments)):
continue
if i == 0:
converters = [mybool]*len(row)
if checkrows and i > checkrows:
break
i += 1
for j, (name, item) in enumerate(zip(names, row)):
func = converterd.get(j)
if func is None:
func = converterd.get(name)
if func is None:
func = converters[j]
if len(item.strip()):
func = get_func(name, item, func)
else:
# how should we handle custom converters and defaults?
func = with_default_value(func, None)
converters[j] = func
return converters
# Get header and remove invalid characters
needheader = names is None
if needheader:
for row in reader:
if (len(row) and comments is not None and
row[0].startswith(comments)):
continue
headers = row
break
# remove these chars
delete = set(r"""~!@#$%^&*()-=+~\|]}[{';: /?.>,<""")
delete.add('"')
names = []
seen = dict()
for i, item in enumerate(headers):
item = item.strip().lower().replace(' ', '_')
item = ''.join([c for c in item if c not in delete])
if not len(item):
item = 'column%d' % i
item = itemd.get(item, item)
cnt = seen.get(item, 0)
if cnt > 0:
names.append(item + '_%d' % cnt)
else:
names.append(item)
seen[item] = cnt+1
else:
if isinstance(names, six.string_types):
names = [n.strip() for n in names.split(',')]
# get the converter functions by inspecting checkrows
converters = get_converters(reader, comments)
if converters is None:
raise ValueError('Could not find any valid data in CSV file')
# reset the reader and start over
fh.seek(0)
reader = csv.reader(fh, delimiter=delimiter)
process_skiprows(reader)
if needheader:
while True:
# skip past any comments and consume one line of column header
row = next(reader)
if (len(row) and comments is not None and
row[0].startswith(comments)):
continue
break
# iterate over the remaining rows and convert the data to date
# objects, ints, or floats as approriate
rows = []
rowmasks = []
for i, row in enumerate(reader):
if not len(row):
continue
if comments is not None and row[0].startswith(comments):
continue
# Ensure that the row returned always has the same nr of elements
row.extend([''] * (len(converters) - len(row)))
rows.append([func(name, val)
for func, name, val in zip(converters, names, row)])
rowmasks.append([ismissing(name, val)
for name, val in zip(names, row)])
fh.close()
if not len(rows):
return None
if use_mrecords and np.any(rowmasks):
r = np.ma.mrecords.fromrecords(rows, names=names, mask=rowmasks)
else:
r = np.rec.fromrecords(rows, names=names)
return r
# a series of classes for describing the format intentions of various rec views
class FormatObj(object):
def tostr(self, x):
return self.toval(x)
def toval(self, x):
return str(x)
def fromstr(self, s):
return s
def __hash__(self):
"""
override the hash function of any of the formatters, so that we don't
create duplicate excel format styles
"""
return hash(self.__class__)
class FormatString(FormatObj):
def tostr(self, x):
val = repr(x)
return val[1:-1]
class FormatFormatStr(FormatObj):
def __init__(self, fmt):
self.fmt = fmt
def tostr(self, x):
if x is None:
return 'None'
return self.fmt % self.toval(x)
class FormatFloat(FormatFormatStr):
def __init__(self, precision=4, scale=1.):
FormatFormatStr.__init__(self, '%%1.%df' % precision)
self.precision = precision
self.scale = scale
def __hash__(self):
return hash((self.__class__, self.precision, self.scale))
def toval(self, x):
if x is not None:
x = x * self.scale
return x
def fromstr(self, s):
return float(s)/self.scale
class FormatInt(FormatObj):
def tostr(self, x):
return '%d' % int(x)
def toval(self, x):
return int(x)
def fromstr(self, s):
return int(s)
class FormatBool(FormatObj):
def toval(self, x):
return str(x)
def fromstr(self, s):
return bool(s)
class FormatPercent(FormatFloat):
def __init__(self, precision=4):
FormatFloat.__init__(self, precision, scale=100.)
class FormatThousands(FormatFloat):
def __init__(self, precision=4):
FormatFloat.__init__(self, precision, scale=1e-3)
class FormatMillions(FormatFloat):
def __init__(self, precision=4):
FormatFloat.__init__(self, precision, scale=1e-6)
class FormatDate(FormatObj):
def __init__(self, fmt):
self.fmt = fmt
def __hash__(self):
return hash((self.__class__, self.fmt))
def toval(self, x):
if x is None:
return 'None'
return x.strftime(self.fmt)
def fromstr(self, x):
import dateutil.parser
return dateutil.parser.parse(x).date()
class FormatDatetime(FormatDate):
def __init__(self, fmt='%Y-%m-%d %H:%M:%S'):
FormatDate.__init__(self, fmt)
def fromstr(self, x):
import dateutil.parser
return dateutil.parser.parse(x)
defaultformatd = {
np.bool_: FormatBool(),
np.int16: FormatInt(),
np.int32: FormatInt(),
np.int64: FormatInt(),
np.float32: FormatFloat(),
np.float64: FormatFloat(),
np.object_: FormatObj(),
np.string_: FormatString(),
}
def get_formatd(r, formatd=None):
'build a formatd guaranteed to have a key for every dtype name'
if formatd is None:
formatd = dict()
for i, name in enumerate(r.dtype.names):
dt = r.dtype[name]
format = formatd.get(name)
if format is None:
format = defaultformatd.get(dt.type, FormatObj())
formatd[name] = format
return formatd
def csvformat_factory(format):
format = copy.deepcopy(format)
if isinstance(format, FormatFloat):
format.scale = 1. # override scaling for storage
format.fmt = '%r'
return format
def rec2txt(r, header=None, padding=3, precision=3, fields=None):
"""
Returns a textual representation of a record array.
Parameters
----------
r: numpy recarray
header: list
column headers
padding:
space between each column
precision: number of decimal places to use for floats.
Set to an integer to apply to all floats. Set to a
list of integers to apply precision individually.
Precision for non-floats is simply ignored.
fields : list
If not None, a list of field names to print. fields
can be a list of strings like ['field1', 'field2'] or a single
comma separated string like 'field1,field2'
Examples
--------
For ``precision=[0,2,3]``, the output is ::
ID Price Return
ABC 12.54 0.234
XYZ 6.32 -0.076
"""
if fields is not None:
r = rec_keep_fields(r, fields)
if cbook.is_numlike(precision):
precision = [precision]*len(r.dtype)
def get_type(item, atype=int):
tdict = {None: int, int: float, float: str}
try:
atype(str(item))
except:
return get_type(item, tdict[atype])
return atype
def get_justify(colname, column, precision):
ntype = column.dtype
if np.issubdtype(ntype, np.character):
fixed_width = int(ntype.str[2:])
length = max(len(colname), fixed_width)
return 0, length+padding, "%s" # left justify
if np.issubdtype(ntype, np.integer):
length = max(len(colname),
np.max(list(map(len, list(map(str, column))))))
return 1, length+padding, "%d" # right justify
if np.issubdtype(ntype, np.floating):
fmt = "%." + str(precision) + "f"
length = max(
len(colname),
np.max(list(map(len, list(map(lambda x: fmt % x, column)))))
)
return 1, length+padding, fmt # right justify
return (0,
max(len(colname),
np.max(list(map(len, list(map(str, column))))))+padding,
"%s")
if header is None:
header = r.dtype.names
justify_pad_prec = [get_justify(header[i], r.__getitem__(colname),
precision[i])
for i, colname in enumerate(r.dtype.names)]
justify_pad_prec_spacer = []
for i in range(len(justify_pad_prec)):
just, pad, prec = justify_pad_prec[i]
if i == 0:
justify_pad_prec_spacer.append((just, pad, prec, 0))
else:
pjust, ppad, pprec = justify_pad_prec[i-1]
if pjust == 0 and just == 1:
justify_pad_prec_spacer.append((just, pad-padding, prec, 0))
elif pjust == 1 and just == 0:
justify_pad_prec_spacer.append((just, pad, prec, padding))
else:
justify_pad_prec_spacer.append((just, pad, prec, 0))
def format(item, just_pad_prec_spacer):
just, pad, prec, spacer = just_pad_prec_spacer
if just == 0:
return spacer*' ' + str(item).ljust(pad)
else:
if get_type(item) == float:
item = (prec % float(item))
elif get_type(item) == int:
item = (prec % int(item))
return item.rjust(pad)
textl = []
textl.append(''.join([format(colitem, justify_pad_prec_spacer[j])
for j, colitem in enumerate(header)]))
for i, row in enumerate(r):
textl.append(''.join([format(colitem, justify_pad_prec_spacer[j])
for j, colitem in enumerate(row)]))
if i == 0:
textl[0] = textl[0].rstrip()
text = os.linesep.join(textl)
return text
def rec2csv(r, fname, delimiter=',', formatd=None, missing='',
missingd=None, withheader=True):
"""
Save the data from numpy recarray *r* into a
comma-/space-/tab-delimited file. The record array dtype names
will be used for column headers.
*fname*: can be a filename or a file handle. Support for gzipped
files is automatic, if the filename ends in '.gz'
*withheader*: if withheader is False, do not write the attribute
names in the first row
for formatd type FormatFloat, we override the precision to store
full precision floats in the CSV file
See Also
--------
:func:`csv2rec`
For information about *missing* and *missingd*, which can be used to
fill in masked values into your CSV file.
"""
delimiter = str(delimiter)
if missingd is None:
missingd = dict()
def with_mask(func):
def newfunc(val, mask, mval):
if mask:
return mval
else:
return func(val)
return newfunc
if r.ndim != 1:
raise ValueError('rec2csv only operates on 1 dimensional recarrays')
formatd = get_formatd(r, formatd)
funcs = []
for i, name in enumerate(r.dtype.names):
funcs.append(with_mask(csvformat_factory(formatd[name]).tostr))
fh, opened = cbook.to_filehandle(fname, 'wb', return_opened=True)
writer = csv.writer(fh, delimiter=delimiter)
header = r.dtype.names
if withheader:
writer.writerow(header)
# Our list of specials for missing values
mvals = []
for name in header:
mvals.append(missingd.get(name, missing))
ismasked = False
if len(r):
row = r[0]
ismasked = hasattr(row, '_fieldmask')
for row in r:
if ismasked:
row, rowmask = row.item(), row._fieldmask.item()
else:
rowmask = [False] * len(row)
writer.writerow([func(val, mask, mval) for func, val, mask, mval
in zip(funcs, row, rowmask, mvals)])
if opened:
fh.close()
def griddata(x, y, z, xi, yi, interp='nn'):
"""Interpolates from a nonuniformly spaced grid to some other
grid.
Fits a surface of the form z = f(`x`, `y`) to the data in the
(usually) nonuniformly spaced vectors (`x`, `y`, `z`), then
interpolates this surface at the points specified by
(`xi`, `yi`) to produce `zi`.
Parameters
----------
x, y, z : 1d array_like
Coordinates of grid points to interpolate from.
xi, yi : 1d or 2d array_like
Coordinates of grid points to interpolate to.
interp : string key from {'nn', 'linear'}
Interpolation algorithm, either 'nn' for natural neighbor, or
'linear' for linear interpolation.
Returns
-------
2d float array
Array of values interpolated at (`xi`, `yi`) points. Array
will be masked is any of (`xi`, `yi`) are outside the convex
hull of (`x`, `y`).
Notes
-----
If `interp` is 'nn' (the default), uses natural neighbor
interpolation based on Delaunay triangulation. This option is
only available if the mpl_toolkits.natgrid module is installed.
This can be downloaded from https://github.com/matplotlib/natgrid.
The (`xi`, `yi`) grid must be regular and monotonically increasing
in this case.
If `interp` is 'linear', linear interpolation is used via
matplotlib.tri.LinearTriInterpolator.
Instead of using `griddata`, more flexible functionality and other
interpolation options are available using a
matplotlib.tri.Triangulation and a matplotlib.tri.TriInterpolator.
"""
# Check input arguments.
x = np.asanyarray(x, dtype=np.float64)
y = np.asanyarray(y, dtype=np.float64)
z = np.asanyarray(z, dtype=np.float64)
if x.shape != y.shape or x.shape != z.shape or x.ndim != 1:
raise ValueError("x, y and z must be equal-length 1-D arrays")
xi = np.asanyarray(xi, dtype=np.float64)
yi = np.asanyarray(yi, dtype=np.float64)
if xi.ndim != yi.ndim:
raise ValueError("xi and yi must be arrays with the same number of "
"dimensions (1 or 2)")
if xi.ndim == 2 and xi.shape != yi.shape:
raise ValueError("if xi and yi are 2D arrays, they must have the same "
"shape")
if xi.ndim == 1:
xi, yi = np.meshgrid(xi, yi)
if interp == 'nn':
use_nn_interpolation = True
elif interp == 'linear':
use_nn_interpolation = False
else:
raise ValueError("interp keyword must be one of 'linear' (for linear "
"interpolation) or 'nn' (for natural neighbor "
"interpolation). Default is 'nn'.")
# Remove masked points.
mask = np.ma.getmask(z)
if mask is not np.ma.nomask:
x = x.compress(~mask)
y = y.compress(~mask)
z = z.compressed()
if use_nn_interpolation:
try:
from mpl_toolkits.natgrid import _natgrid
except ImportError:
raise RuntimeError(
"To use interp='nn' (Natural Neighbor interpolation) in "
"griddata, natgrid must be installed. Either install it "
"from http://github.com/matplotlib/natgrid or use "
"interp='linear' instead.")
if xi.ndim == 2:
# natgrid expects 1D xi and yi arrays.
xi = xi[0, :]
yi = yi[:, 0]
# Override default natgrid internal parameters.
_natgrid.seti(b'ext', 0)
_natgrid.setr(b'nul', np.nan)
if np.min(np.diff(xi)) < 0 or np.min(np.diff(yi)) < 0:
raise ValueError("Output grid defined by xi,yi must be monotone "
"increasing")
# Allocate array for output (buffer will be overwritten by natgridd)
zi = np.empty((yi.shape[0], xi.shape[0]), np.float64)
# Natgrid requires each array to be contiguous rather than e.g. a view
# that is a non-contiguous slice of another array. Use numpy.require
# to deal with this, which will copy if necessary.
x = np.require(x, requirements=['C'])
y = np.require(y, requirements=['C'])
z = np.require(z, requirements=['C'])
xi = np.require(xi, requirements=['C'])
yi = np.require(yi, requirements=['C'])
_natgrid.natgridd(x, y, z, xi, yi, zi)
# Mask points on grid outside convex hull of input data.
if np.any(np.isnan(zi)):
zi = np.ma.masked_where(np.isnan(zi), zi)
return zi
else:
# Linear interpolation performed using a matplotlib.tri.Triangulation
# and a matplotlib.tri.LinearTriInterpolator.
from .tri import Triangulation, LinearTriInterpolator
triang = Triangulation(x, y)
interpolator = LinearTriInterpolator(triang, z)
return interpolator(xi, yi)
##################################################
# Linear interpolation algorithms
##################################################
def less_simple_linear_interpolation(x, y, xi, extrap=False):
"""
This function provides simple (but somewhat less so than
:func:`cbook.simple_linear_interpolation`) linear interpolation.
:func:`simple_linear_interpolation` will give a list of point
between a start and an end, while this does true linear
interpolation at an arbitrary set of points.
This is very inefficient linear interpolation meant to be used
only for a small number of points in relatively non-intensive use
cases. For real linear interpolation, use scipy.
"""
x = np.asarray(x)
y = np.asarray(y)
xi = np.atleast_1d(xi)
s = list(y.shape)
s[0] = len(xi)
yi = np.tile(np.nan, s)
for ii, xx in enumerate(xi):
bb = x == xx
if np.any(bb):
jj, = np.nonzero(bb)
yi[ii] = y[jj[0]]
elif xx < x[0]:
if extrap:
yi[ii] = y[0]
elif xx > x[-1]:
if extrap:
yi[ii] = y[-1]
else:
jj, = np.nonzero(x < xx)
jj = max(jj)
yi[ii] = y[jj] + (xx-x[jj])/(x[jj+1]-x[jj]) * (y[jj+1]-y[jj])
return yi
def slopes(x, y):
"""
:func:`slopes` calculates the slope *y*'(*x*)
The slope is estimated using the slope obtained from that of a
parabola through any three consecutive points.
This method should be superior to that described in the appendix
of A CONSISTENTLY WELL BEHAVED METHOD OF INTERPOLATION by Russel
W. Stineman (Creative Computing July 1980) in at least one aspect:
Circles for interpolation demand a known aspect ratio between
*x*- and *y*-values. For many functions, however, the abscissa
are given in different dimensions, so an aspect ratio is
completely arbitrary.
The parabola method gives very similar results to the circle
method for most regular cases but behaves much better in special
cases.
Norbert Nemec, Institute of Theoretical Physics, University or
Regensburg, April 2006 Norbert.Nemec at physik.uni-regensburg.de
(inspired by a original implementation by Halldor Bjornsson,
Icelandic Meteorological Office, March 2006 halldor at vedur.is)
"""
# Cast key variables as float.
x = np.asarray(x, float)
y = np.asarray(y, float)
yp = np.zeros(y.shape, float)
dx = x[1:] - x[:-1]
dy = y[1:] - y[:-1]
dydx = dy/dx
yp[1:-1] = (dydx[:-1] * dx[1:] + dydx[1:] * dx[:-1])/(dx[1:] + dx[:-1])
yp[0] = 2.0 * dy[0]/dx[0] - yp[1]
yp[-1] = 2.0 * dy[-1]/dx[-1] - yp[-2]
return yp
def stineman_interp(xi, x, y, yp=None):
"""
Given data vectors *x* and *y*, the slope vector *yp* and a new
abscissa vector *xi*, the function :func:`stineman_interp` uses
Stineman interpolation to calculate a vector *yi* corresponding to
*xi*.
Here's an example that generates a coarse sine curve, then
interpolates over a finer abscissa::
x = linspace(0,2*pi,20); y = sin(x); yp = cos(x)
xi = linspace(0,2*pi,40);
yi = stineman_interp(xi,x,y,yp);
plot(x,y,'o',xi,yi)
The interpolation method is described in the article A
CONSISTENTLY WELL BEHAVED METHOD OF INTERPOLATION by Russell
W. Stineman. The article appeared in the July 1980 issue of
Creative Computing with a note from the editor stating that while
they were:
not an academic journal but once in a while something serious
and original comes in adding that this was
"apparently a real solution" to a well known problem.
For *yp* = *None*, the routine automatically determines the slopes
using the :func:`slopes` routine.
*x* is assumed to be sorted in increasing order.
For values ``xi[j] < x[0]`` or ``xi[j] > x[-1]``, the routine
tries an extrapolation. The relevance of the data obtained from
this, of course, is questionable...
Original implementation by Halldor Bjornsson, Icelandic
Meteorolocial Office, March 2006 halldor at vedur.is
Completely reworked and optimized for Python by Norbert Nemec,
Institute of Theoretical Physics, University or Regensburg, April
2006 Norbert.Nemec at physik.uni-regensburg.de
"""
# Cast key variables as float.
x = np.asarray(x, float)
y = np.asarray(y, float)
if x.shape != y.shape:
raise ValueError("'x' and 'y' must be of same shape")
if yp is None:
yp = slopes(x, y)
else:
yp = np.asarray(yp, float)
xi = np.asarray(xi, float)
yi = np.zeros(xi.shape, float)
# calculate linear slopes
dx = x[1:] - x[:-1]
dy = y[1:] - y[:-1]
s = dy/dx # note length of s is N-1 so last element is #N-2
# find the segment each xi is in
# this line actually is the key to the efficiency of this implementation
idx = np.searchsorted(x[1:-1], xi)
# now we have generally: x[idx[j]] <= xi[j] <= x[idx[j]+1]
# except at the boundaries, where it may be that xi[j] < x[0] or
# xi[j] > x[-1]
# the y-values that would come out from a linear interpolation:
sidx = s.take(idx)
xidx = x.take(idx)
yidx = y.take(idx)
xidxp1 = x.take(idx+1)
yo = yidx + sidx * (xi - xidx)
# the difference that comes when using the slopes given in yp
# using the yp slope of the left point
dy1 = (yp.take(idx) - sidx) * (xi - xidx)
# using the yp slope of the right point
dy2 = (yp.take(idx+1)-sidx) * (xi - xidxp1)
dy1dy2 = dy1*dy2
# The following is optimized for Python. The solution actually
# does more calculations than necessary but exploiting the power
# of numpy, this is far more efficient than coding a loop by hand
# in Python
yi = yo + dy1dy2 * np.choose(np.array(np.sign(dy1dy2), np.int32)+1,
((2*xi-xidx-xidxp1)/((dy1-dy2)*(xidxp1-xidx)),
0.0,
1/(dy1+dy2),))
return yi
class GaussianKDE(object):
"""
Representation of a kernel-density estimate using Gaussian kernels.
Parameters
----------
dataset : array_like
Datapoints to estimate from. In case of univariate data this is a 1-D
array, otherwise a 2-D array with shape (# of dims, # of data).
bw_method : str, scalar or callable, optional
The method used to calculate the estimator bandwidth. This can be
'scott', 'silverman', a scalar constant or a callable. If a
scalar, this will be used directly as `kde.factor`. If a
callable, it should take a `GaussianKDE` instance as only
parameter and return a scalar. If None (default), 'scott' is used.
Attributes
----------
dataset : ndarray
The dataset with which `gaussian_kde` was initialized.
dim : int
Number of dimensions.
num_dp : int
Number of datapoints.
factor : float
The bandwidth factor, obtained from `kde.covariance_factor`, with which
the covariance matrix is multiplied.
covariance : ndarray
The covariance matrix of `dataset`, scaled by the calculated bandwidth
(`kde.factor`).
inv_cov : ndarray
The inverse of `covariance`.
Methods
-------
kde.evaluate(points) : ndarray
Evaluate the estimated pdf on a provided set of points.
kde(points) : ndarray
Same as kde.evaluate(points)
"""
# This implementation with minor modification was too good to pass up.
# from scipy: https://github.com/scipy/scipy/blob/master/scipy/stats/kde.py
def __init__(self, dataset, bw_method=None):
self.dataset = np.atleast_2d(dataset)
if not np.array(self.dataset).size > 1:
raise ValueError("`dataset` input should have multiple elements.")
self.dim, self.num_dp = np.array(self.dataset).shape
isString = isinstance(bw_method, six.string_types)
if bw_method is None:
pass
elif (isString and bw_method == 'scott'):
self.covariance_factor = self.scotts_factor
elif (isString and bw_method == 'silverman'):
self.covariance_factor = self.silverman_factor
elif (np.isscalar(bw_method) and not isString):
self._bw_method = 'use constant'
self.covariance_factor = lambda: bw_method
elif callable(bw_method):
self._bw_method = bw_method
self.covariance_factor = lambda: self._bw_method(self)
else:
msg = "`bw_method` should be 'scott', 'silverman', a scalar " \
"or a callable."
raise ValueError(msg)
# Computes the covariance matrix for each Gaussian kernel using
# covariance_factor().
self.factor = self.covariance_factor()
# Cache covariance and inverse covariance of the data
if not hasattr(self, '_data_inv_cov'):
self.data_covariance = np.atleast_2d(
np.cov(
self.dataset,
rowvar=1,
bias=False))
self.data_inv_cov = np.linalg.inv(self.data_covariance)
self.covariance = self.data_covariance * self.factor ** 2
self.inv_cov = self.data_inv_cov / self.factor ** 2
self.norm_factor = np.sqrt(
np.linalg.det(
2 * np.pi * self.covariance)) * self.num_dp
def scotts_factor(self):
return np.power(self.num_dp, -1. / (self.dim + 4))
def silverman_factor(self):
return np.power(
self.num_dp * (self.dim + 2.0) / 4.0, -1. / (self.dim + 4))
# Default method to calculate bandwidth, can be overwritten by subclass
covariance_factor = scotts_factor
def evaluate(self, points):
"""Evaluate the estimated pdf on a set of points.
Parameters
----------
points : (# of dimensions, # of points)-array
Alternatively, a (# of dimensions,) vector can be passed in and
treated as a single point.
Returns
-------
values : (# of points,)-array
The values at each point.
Raises
------
ValueError : if the dimensionality of the input points is different
than the dimensionality of the KDE.
"""
points = np.atleast_2d(points)
dim, num_m = np.array(points).shape
if dim != self.dim:
msg = "points have dimension %s, dataset has dimension %s" % (
dim, self.dim)
raise ValueError(msg)
result = np.zeros((num_m,), dtype=float)
if num_m >= self.num_dp:
# there are more points than data, so loop over data
for i in range(self.num_dp):
diff = self.dataset[:, i, np.newaxis] - points
tdiff = np.dot(self.inv_cov, diff)
energy = np.sum(diff * tdiff, axis=0) / 2.0
result = result + np.exp(-energy)
else:
# loop over points
for i in range(num_m):
diff = self.dataset - points[:, i, np.newaxis]
tdiff = np.dot(self.inv_cov, diff)
energy = np.sum(diff * tdiff, axis=0) / 2.0
result[i] = np.sum(np.exp(-energy), axis=0)
result = result / self.norm_factor
return result
__call__ = evaluate
##################################################
# Code related to things in and around polygons
##################################################
def inside_poly(points, verts):
"""
*points* is a sequence of *x*, *y* points.
*verts* is a sequence of *x*, *y* vertices of a polygon.
Return value is a sequence of indices into points for the points
that are inside the polygon.
"""
# Make a closed polygon path
poly = Path(verts)
# Check to see which points are contained within the Path
return [idx for idx, p in enumerate(points) if poly.contains_point(p)]
def poly_below(xmin, xs, ys):
"""
Given a sequence of *xs* and *ys*, return the vertices of a
polygon that has a horizontal base at *xmin* and an upper bound at
the *ys*. *xmin* is a scalar.
Intended for use with :meth:`matplotlib.axes.Axes.fill`, e.g.,::
xv, yv = poly_below(0, x, y)
ax.fill(xv, yv)
"""
if any(isinstance(var, np.ma.MaskedArray) for var in [xs, ys]):
numpy = np.ma
else:
numpy = np
xs = numpy.asarray(xs)
ys = numpy.asarray(ys)
Nx = len(xs)
Ny = len(ys)
if Nx != Ny:
raise ValueError("'xs' and 'ys' must have the same length")
x = xmin*numpy.ones(2*Nx)
y = numpy.ones(2*Nx)
x[:Nx] = xs
y[:Nx] = ys
y[Nx:] = ys[::-1]
return x, y
def poly_between(x, ylower, yupper):
"""
Given a sequence of *x*, *ylower* and *yupper*, return the polygon
that fills the regions between them. *ylower* or *yupper* can be
scalar or iterable. If they are iterable, they must be equal in
length to *x*.
Return value is *x*, *y* arrays for use with
:meth:`matplotlib.axes.Axes.fill`.
"""
if any(isinstance(var, np.ma.MaskedArray) for var in [ylower, yupper, x]):
numpy = np.ma
else:
numpy = np
Nx = len(x)
if not cbook.iterable(ylower):
ylower = ylower*numpy.ones(Nx)
if not cbook.iterable(yupper):
yupper = yupper*numpy.ones(Nx)
x = numpy.concatenate((x, x[::-1]))
y = numpy.concatenate((yupper, ylower[::-1]))
return x, y
def is_closed_polygon(X):
"""
Tests whether first and last object in a sequence are the same. These are
presumably coordinates on a polygonal curve, in which case this function
tests if that curve is closed.
"""
return np.all(X[0] == X[-1])
def contiguous_regions(mask):
"""
return a list of (ind0, ind1) such that mask[ind0:ind1].all() is
True and we cover all such regions
"""
mask = np.asarray(mask, dtype=bool)
if not mask.size:
return []
# Find the indices of region changes, and correct offset
idx, = np.nonzero(mask[:-1] != mask[1:])
idx += 1
# List operations are faster for moderately sized arrays
idx = idx.tolist()
# Add first and/or last index if needed
if mask[0]:
idx = [0] + idx
if mask[-1]:
idx.append(len(mask))
return list(zip(idx[::2], idx[1::2]))
def cross_from_below(x, threshold):
"""
return the indices into *x* where *x* crosses some threshold from
below, e.g., the i's where::
x[i-1]<threshold and x[i]>=threshold
Example code::
import matplotlib.pyplot as plt
t = np.arange(0.0, 2.0, 0.1)
s = np.sin(2*np.pi*t)
fig = plt.figure()
ax = fig.add_subplot(111)
ax.plot(t, s, '-o')
ax.axhline(0.5)
ax.axhline(-0.5)
ind = cross_from_below(s, 0.5)
ax.vlines(t[ind], -1, 1)
ind = cross_from_above(s, -0.5)
ax.vlines(t[ind], -1, 1)
plt.show()
See Also
--------
:func:`cross_from_above` and :func:`contiguous_regions`
"""
x = np.asarray(x)
ind = np.nonzero((x[:-1] < threshold) & (x[1:] >= threshold))[0]
if len(ind):
return ind+1
else:
return ind
def cross_from_above(x, threshold):
"""
return the indices into *x* where *x* crosses some threshold from
below, e.g., the i's where::
x[i-1]>threshold and x[i]<=threshold
See Also
--------
:func:`cross_from_below` and :func:`contiguous_regions`
"""
x = np.asarray(x)
ind = np.nonzero((x[:-1] >= threshold) & (x[1:] < threshold))[0]
if len(ind):
return ind+1
else:
return ind
##################################################
# Vector and path length geometry calculations
##################################################
def vector_lengths(X, P=2., axis=None):
"""
Finds the length of a set of vectors in *n* dimensions. This is
like the :func:`numpy.norm` function for vectors, but has the ability to
work over a particular axis of the supplied array or matrix.
Computes ``(sum((x_i)^P))^(1/P)`` for each ``{x_i}`` being the
elements of *X* along the given axis. If *axis* is *None*,
compute over all elements of *X*.
"""
X = np.asarray(X)
return (np.sum(X**(P), axis=axis))**(1./P)
def distances_along_curve(X):
"""
Computes the distance between a set of successive points in *N* dimensions.
Where *X* is an *M* x *N* array or matrix. The distances between
successive rows is computed. Distance is the standard Euclidean
distance.
"""
X = np.diff(X, axis=0)
return vector_lengths(X, axis=1)
def path_length(X):
"""
Computes the distance travelled along a polygonal curve in *N* dimensions.
Where *X* is an *M* x *N* array or matrix. Returns an array of
length *M* consisting of the distance along the curve at each point
(i.e., the rows of *X*).
"""
X = distances_along_curve(X)
return np.concatenate((np.zeros(1), np.cumsum(X)))
def quad2cubic(q0x, q0y, q1x, q1y, q2x, q2y):
"""
Converts a quadratic Bezier curve to a cubic approximation.
The inputs are the *x* and *y* coordinates of the three control
points of a quadratic curve, and the output is a tuple of *x* and
*y* coordinates of the four control points of the cubic curve.
"""
# TODO: Candidate for deprecation -- no longer used internally
# c0x, c0y = q0x, q0y
c1x, c1y = q0x + 2./3. * (q1x - q0x), q0y + 2./3. * (q1y - q0y)
c2x, c2y = c1x + 1./3. * (q2x - q0x), c1y + 1./3. * (q2y - q0y)
# c3x, c3y = q2x, q2y
return q0x, q0y, c1x, c1y, c2x, c2y, q2x, q2y
def offset_line(y, yerr):
"""
Offsets an array *y* by +/- an error and returns a tuple
(y - err, y + err).
The error term can be:
* A scalar. In this case, the returned tuple is obvious.
* A vector of the same length as *y*. The quantities y +/- err are computed
component-wise.
* A tuple of length 2. In this case, yerr[0] is the error below *y* and
yerr[1] is error above *y*. For example::
from pylab import *
x = linspace(0, 2*pi, num=100, endpoint=True)
y = sin(x)
y_minus, y_plus = mlab.offset_line(y, 0.1)
plot(x, y)
fill_between(x, ym, y2=yp)
show()
"""
if cbook.is_numlike(yerr) or (cbook.iterable(yerr) and
len(yerr) == len(y)):
ymin = y - yerr
ymax = y + yerr
elif len(yerr) == 2:
ymin, ymax = y - yerr[0], y + yerr[1]
else:
raise ValueError("yerr must be scalar, 1xN or 2xN")
return ymin, ymax
| gpl-3.0 |
ThomasMiconi/nupic.research | projects/sequence_learning/generate_plots.py | 6 | 2426 | import matplotlib.pyplot as plt
import multiprocessing
from optparse import OptionParser
import sequence_simulations
import sys
def fig6a(cliArgs, noises):
argsTpl = cliArgs + " --noise {}"
return [
sequence_simulations.parser.parse_args(argsTpl.format(noise).split(" "))[0]
for noise in noises
] + [
sequence_simulations.parser.parse_args((argsTpl + " --cells 1")
.format(noise).split(" "))[0]
for noise in noises
]
def fig6b(cliArgs, noises):
argsTpl = cliArgs + " --noise {}"
return [
sequence_simulations.parser.parse_args(argsTpl.format(noise).split(" "))[0]
for noise in noises
]
if __name__ == "__main__":
parser = OptionParser("python %prog noise [noise ...]")
parser.add_option("--figure",
help="Which figure to plot. Must be 'A' or 'B'.")
parser.add_option("--passthru",
help=("Pass options through to sequence_simulations.py. "
"See `python sequence_simulations.py --help` for "
"options"))
# Parse CLI arguments
options, args = parser.parse_args(sys.argv[1:])
if not args:
print "You must specify at least one 'noise' argument."
sys.exit(1)
if options.figure == "A":
figure = fig6a
elif options.figure == "B":
figure = fig6b
else:
print "You must specify one of '--figure A' or '--figure B'"
sys.exit(1)
# Convert list of str to list of float
noises = [float(noise) for noise in args]
# Run simulations in parallel
pool = multiprocessing.Pool()
results = pool.map(sequence_simulations.runExperiment1,
figure(options.passthru, noises))
fig = plt.figure()
ax = fig.add_subplot(111)
ax.set_xlabel("Sequence Elements")
ax.set_ylabel("Accuracy")
# Plot results
for result in results:
ax.plot(result, linewidth=2.0)
# Legend
if options.figure == "A":
ax.legend(["HTM Layer", "First Order Model"], loc="lower right")
elif options.figure == "B":
ax.legend(["{}% cell death".format(int(noise * 100)) for noise in noises],
loc="lower right")
# Horizontal bar at 50%
ax.plot([0.5 for x in xrange(len(results[0]))], "--")
# Re-tick axes
plt.yticks((0.1, 0.2, 0.3, 0.4, 0.5, 0.6),
("10%", "20%", "30%", "40%", "50%", "60%"))
plt.xticks((2000, 4000, 6000, 8000))
# Show plot
plt.show() | agpl-3.0 |
h2educ/scikit-learn | examples/calibration/plot_compare_calibration.py | 241 | 5008 | """
========================================
Comparison of Calibration of Classifiers
========================================
Well calibrated classifiers are probabilistic classifiers for which the output
of the predict_proba method can be directly interpreted as a confidence level.
For instance a well calibrated (binary) classifier should classify the samples
such that among the samples to which it gave a predict_proba value close to
0.8, approx. 80% actually belong to the positive class.
LogisticRegression returns well calibrated predictions as it directly
optimizes log-loss. In contrast, the other methods return biased probilities,
with different biases per method:
* GaussianNaiveBayes tends to push probabilties to 0 or 1 (note the counts in
the histograms). This is mainly because it makes the assumption that features
are conditionally independent given the class, which is not the case in this
dataset which contains 2 redundant features.
* RandomForestClassifier shows the opposite behavior: the histograms show
peaks at approx. 0.2 and 0.9 probability, while probabilities close to 0 or 1
are very rare. An explanation for this is given by Niculescu-Mizil and Caruana
[1]: "Methods such as bagging and random forests that average predictions from
a base set of models can have difficulty making predictions near 0 and 1
because variance in the underlying base models will bias predictions that
should be near zero or one away from these values. Because predictions are
restricted to the interval [0,1], errors caused by variance tend to be one-
sided near zero and one. For example, if a model should predict p = 0 for a
case, the only way bagging can achieve this is if all bagged trees predict
zero. If we add noise to the trees that bagging is averaging over, this noise
will cause some trees to predict values larger than 0 for this case, thus
moving the average prediction of the bagged ensemble away from 0. We observe
this effect most strongly with random forests because the base-level trees
trained with random forests have relatively high variance due to feature
subseting." As a result, the calibration curve shows a characteristic sigmoid
shape, indicating that the classifier could trust its "intuition" more and
return probabilties closer to 0 or 1 typically.
* Support Vector Classification (SVC) shows an even more sigmoid curve as
the RandomForestClassifier, which is typical for maximum-margin methods
(compare Niculescu-Mizil and Caruana [1]), which focus on hard samples
that are close to the decision boundary (the support vectors).
.. topic:: References:
.. [1] Predicting Good Probabilities with Supervised Learning,
A. Niculescu-Mizil & R. Caruana, ICML 2005
"""
print(__doc__)
# Author: Jan Hendrik Metzen <jhm@informatik.uni-bremen.de>
# License: BSD Style.
import numpy as np
np.random.seed(0)
import matplotlib.pyplot as plt
from sklearn import datasets
from sklearn.naive_bayes import GaussianNB
from sklearn.linear_model import LogisticRegression
from sklearn.ensemble import RandomForestClassifier
from sklearn.svm import LinearSVC
from sklearn.calibration import calibration_curve
X, y = datasets.make_classification(n_samples=100000, n_features=20,
n_informative=2, n_redundant=2)
train_samples = 100 # Samples used for training the models
X_train = X[:train_samples]
X_test = X[train_samples:]
y_train = y[:train_samples]
y_test = y[train_samples:]
# Create classifiers
lr = LogisticRegression()
gnb = GaussianNB()
svc = LinearSVC(C=1.0)
rfc = RandomForestClassifier(n_estimators=100)
###############################################################################
# Plot calibration plots
plt.figure(figsize=(10, 10))
ax1 = plt.subplot2grid((3, 1), (0, 0), rowspan=2)
ax2 = plt.subplot2grid((3, 1), (2, 0))
ax1.plot([0, 1], [0, 1], "k:", label="Perfectly calibrated")
for clf, name in [(lr, 'Logistic'),
(gnb, 'Naive Bayes'),
(svc, 'Support Vector Classification'),
(rfc, 'Random Forest')]:
clf.fit(X_train, y_train)
if hasattr(clf, "predict_proba"):
prob_pos = clf.predict_proba(X_test)[:, 1]
else: # use decision function
prob_pos = clf.decision_function(X_test)
prob_pos = \
(prob_pos - prob_pos.min()) / (prob_pos.max() - prob_pos.min())
fraction_of_positives, mean_predicted_value = \
calibration_curve(y_test, prob_pos, n_bins=10)
ax1.plot(mean_predicted_value, fraction_of_positives, "s-",
label="%s" % (name, ))
ax2.hist(prob_pos, range=(0, 1), bins=10, label=name,
histtype="step", lw=2)
ax1.set_ylabel("Fraction of positives")
ax1.set_ylim([-0.05, 1.05])
ax1.legend(loc="lower right")
ax1.set_title('Calibration plots (reliability curve)')
ax2.set_xlabel("Mean predicted value")
ax2.set_ylabel("Count")
ax2.legend(loc="upper center", ncol=2)
plt.tight_layout()
plt.show()
| bsd-3-clause |
IshankGulati/scikit-learn | examples/neighbors/plot_species_kde.py | 39 | 4039 | """
================================================
Kernel Density Estimate of Species Distributions
================================================
This shows an example of a neighbors-based query (in particular a kernel
density estimate) on geospatial data, using a Ball Tree built upon the
Haversine distance metric -- i.e. distances over points in latitude/longitude.
The dataset is provided by Phillips et. al. (2006).
If available, the example uses
`basemap <http://matplotlib.org/basemap>`_
to plot the coast lines and national boundaries of South America.
This example does not perform any learning over the data
(see :ref:`sphx_glr_auto_examples_applications_plot_species_distribution_modeling.py` for
an example of classification based on the attributes in this dataset). It
simply shows the kernel density estimate of observed data points in
geospatial coordinates.
The two species are:
- `"Bradypus variegatus"
<http://www.iucnredlist.org/apps/redlist/details/3038/0>`_ ,
the Brown-throated Sloth.
- `"Microryzomys minutus"
<http://www.iucnredlist.org/details/13408/0>`_ ,
also known as the Forest Small Rice Rat, a rodent that lives in Peru,
Colombia, Ecuador, Peru, and Venezuela.
References
----------
* `"Maximum entropy modeling of species geographic distributions"
<http://www.cs.princeton.edu/~schapire/papers/ecolmod.pdf>`_
S. J. Phillips, R. P. Anderson, R. E. Schapire - Ecological Modelling,
190:231-259, 2006.
"""
# Author: Jake Vanderplas <jakevdp@cs.washington.edu>
#
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn.datasets import fetch_species_distributions
from sklearn.datasets.species_distributions import construct_grids
from sklearn.neighbors import KernelDensity
# if basemap is available, we'll use it.
# otherwise, we'll improvise later...
try:
from mpl_toolkits.basemap import Basemap
basemap = True
except ImportError:
basemap = False
# Get matrices/arrays of species IDs and locations
data = fetch_species_distributions()
species_names = ['Bradypus Variegatus', 'Microryzomys Minutus']
Xtrain = np.vstack([data['train']['dd lat'],
data['train']['dd long']]).T
ytrain = np.array([d.decode('ascii').startswith('micro')
for d in data['train']['species']], dtype='int')
Xtrain *= np.pi / 180. # Convert lat/long to radians
# Set up the data grid for the contour plot
xgrid, ygrid = construct_grids(data)
X, Y = np.meshgrid(xgrid[::5], ygrid[::5][::-1])
land_reference = data.coverages[6][::5, ::5]
land_mask = (land_reference > -9999).ravel()
xy = np.vstack([Y.ravel(), X.ravel()]).T
xy = xy[land_mask]
xy *= np.pi / 180.
# Plot map of South America with distributions of each species
fig = plt.figure()
fig.subplots_adjust(left=0.05, right=0.95, wspace=0.05)
for i in range(2):
plt.subplot(1, 2, i + 1)
# construct a kernel density estimate of the distribution
print(" - computing KDE in spherical coordinates")
kde = KernelDensity(bandwidth=0.04, metric='haversine',
kernel='gaussian', algorithm='ball_tree')
kde.fit(Xtrain[ytrain == i])
# evaluate only on the land: -9999 indicates ocean
Z = -9999 + np.zeros(land_mask.shape[0])
Z[land_mask] = np.exp(kde.score_samples(xy))
Z = Z.reshape(X.shape)
# plot contours of the density
levels = np.linspace(0, Z.max(), 25)
plt.contourf(X, Y, Z, levels=levels, cmap=plt.cm.Reds)
if basemap:
print(" - plot coastlines using basemap")
m = Basemap(projection='cyl', llcrnrlat=Y.min(),
urcrnrlat=Y.max(), llcrnrlon=X.min(),
urcrnrlon=X.max(), resolution='c')
m.drawcoastlines()
m.drawcountries()
else:
print(" - plot coastlines from coverage")
plt.contour(X, Y, land_reference,
levels=[-9999], colors="k",
linestyles="solid")
plt.xticks([])
plt.yticks([])
plt.title(species_names[i])
plt.show()
| bsd-3-clause |
jszopi/repESP | repESP_old/graphs.py | 1 | 23402 | import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from matplotlib.backends.backend_pdf import PdfPages
import os
import numpy as np
from numpy.linalg import norm as vec_norm
import random
import math
import re
# This was necessary to prevent y-axis label from being cut off when plotting
# http://stackoverflow.com/a/17390833
from matplotlib import rcParams
rcParams.update({'figure.autolayout': True})
import field_comparison
DIR_LABELS = ['x', 'y', 'z']
def _plot_common(dimension, title, guideline=False):
"""Set up plot of correct dimensionality and return related objects"""
fig = plt.figure()
if dimension == 3:
ax = fig.add_subplot(111, projection='3d')
elif dimension == 2:
ax = fig.add_subplot(111)
else:
raise NotImplementedError("Plotting of dimension {0} not implemented"
.format(dimension))
# Add a horizontal line at 0 for 2D plots
if guideline and dimension == 2:
ax.axhline(color='k', linestyle='--')
if title is not None:
plt.title(title)
return fig, ax
def plot(*fields, color=None, color_span=None, dist_field_filter=None,
exclusion_dist=0, rand_skim=0.01, extra_filter=None, save_to=None,
axes_limits=None, title=None, get_limits=None):
assert 2 <= len(fields) <= 3
# Pack fields and color together
if color is not None:
fields_and_color = list(fields) + [color]
else:
fields_and_color = fields
field_comparison._check_grids(*fields_and_color)
field_comparison._check_fields_for_nans(*fields_and_color)
# Necessary, as the original Field will be overwritten when filtering
dist_field_filter_type = dist_field_filter.field_type
fig, ax = _plot_common(len(fields), title, guideline=True)
_set_axis_labels(ax, *fields)
# This function got really fat due to all that filtering and it can still
# handle only one additional filter. Some refactoring is due. TODO
if extra_filter is not None:
fields_and_color = extra_filter(*fields_and_color)
# This filtering step changes all Fields to np.arrays. As a result, in
# the next filtering step, by dist_field_filter, a mixture of np.arrays
# and Fields is passed, which is not handled by the filters. While that
# deficiency was intentional, I don't think there's a reason it should
# not be handled (TODO). But for now, a kludge:
if dist_field_filter is not None:
dist_field_filter = dist_field_filter.values
if dist_field_filter is not None:
if dist_field_filter_type != 'dist':
print("WARNING: The field selected for filtering is not of type "
"'dist' but ", dist_field_filter.field_type)
dist_field_filter, *fields_and_color = field_comparison.filter_by_dist(
exclusion_dist, *([dist_field_filter] + fields_and_color))
elif exclusion_dist:
print("WARNING: exclusion distance specified but no Field passed to "
"filter by.")
fields_and_color = field_comparison.skim(rand_skim, *fields_and_color)
fields_and_color = list(map(field_comparison._flatten_no_nans,
fields_and_color))
if color is not None:
cmap = _get_cmap(len(fields), color.field_type)
cmap_name = color.lookup_name()
*fields, color = fields_and_color
# ax.scatter has to be inside of the 'color is not None' conditional
# because an error occurs when the kwarg ``c`` is explicitly set to
# None, even though it's the default value.
vmin, vmax = color_span if color_span is not None else None, None
image = ax.scatter(*fields, c=color, cmap=cmap, vmin=vmin, vmax=vmax,
lw=0, s=5)
cbar = fig.colorbar(image, label=cmap_name)
else:
fields = fields_and_color
ax.scatter(*fields, lw=0, s=5)
_set_axes_limits(len(fields), ax, axes_limits)
_save_or_display(save_to)
# Save limits to get_limits. This is useful when they are to be reused in
# other plots. Saving the limits to an argument was more intuitive than
# returning them.
if get_limits is not None:
# Code copied from _set_axes_limits (TODO: DRY)
limits = []
for dir_label in DIR_LABELS[:len(fields)]:
# Get current limits
limits.append(getattr(ax, "get_" + dir_label + "lim")())
get_limits[:] = limits
def _set_axes_limits(dimension, ax, axes_limits):
"""Set axes limits"""
if axes_limits is None:
return
# Smaller lengths are allowed, will be interpreted as the first few axes.
# This should be an Exception not assertion though.
assert len(axes_limits) <= dimension
for axis_limits, dir_label in zip(axes_limits, DIR_LABELS):
# Get current limits
limits = list(getattr(ax, "get_" + dir_label + "lim")())
for i, axis_limit in enumerate(axis_limits):
if axis_limit is not None:
limits[i] = axis_limit
getattr(ax, "set_" + dir_label + "lim")(limits)
# Although **not for my purposes at the moment** (I only want to set limits
# so that different plots can be easily compared, so both axes will be
# getting set), it would be nice to rescale the axes which were not
# modified. However, when autoscaling, matplotlib always uses all the data.
# ax.relim() with ax.autoscale_view() seemed to be relevant but they do not
# easily operate on datapoints I think.
def _set_axis_labels(ax, *fields):
"""Set axis labels based on free-form names of Fields being plotted"""
for field, dir_label in zip(fields, DIR_LABELS):
getattr(ax, "set_" + dir_label + "label")(field.lookup_name())
def _get_cmap(dimension, field_type):
"""Return a color map based on plot dimensionality and field type"""
if field_type == 'dist':
if dimension != 3:
# Shading by distance is more intuitive
return plt.get_cmap('Blues_r')
else:
print("WARNING: Shading by distance doesn't look good on a 3D "
"plot. Colouring instead.")
return plt.get_cmap('coolwarm_r')
def _save_or_display(save_to=None):
"""Save the plot or display it if save_to is None"""
if save_to is None:
plt.show()
else:
if type(save_to) is PdfPages:
# Need to check the type first, because it may be a file object if
# a pdf is to be created, see:
# http://matplotlib.org/faq/howto_faq.html#save-multiple-plots-to-one-pdf-file
plt.savefig(save_to, format="pdf")
elif os.path.isfile(save_to):
raise FileExistsError("File exists: " + save_to)
else:
# DPI may need to be increased
plt.savefig(save_to)
plt.close()
def plot_points(points_field, dimension, title=None, color_span=None,
axes_limits=None, save_to=None, rand_skim=1, plane_eqn=None,
dist_thresh=None, molecule=None, atom_dist_threshs=None,
atom_format=None, show_all_atoms=False):
"""Plot fitting or cube points in 2 or 3D coloured by values
Parameters
----------
points_field : Field
The ``Field`` object containint the points to be plotted.
dimension : {2, 3}
Dimensions of the plot.
title : str, optional
Plot title.
color_span : [float, float], optional
The lower and upper limits for the color range for field values at
fitting points. If this option is not specified, the limits will be
calculated automatically based on all data points, not only the plotted
slice of points.
axes_limits : [float, float], optional
A pair of values for the axes limits in angstroms. The same limits will
be applied to all axes, non-square/cubic plots are currently not
supported.
save_to : str, optional
The file to which the graph is to be saved. If not specified, the graph
will be displayed in interactive mode.
rand_skim : float, optional
For plots with a large number of points, it may be necessary to plot
only a fraction of the points. The points to be plotted are selected
randomly and this option specifies the probability for a given point to
be plotted. Values in the range (0, 1] are allowed, 1 is the default
(all points plotted).
plane_eqn : List[float], optional
The equation for the slicing plane specified with a list of parameters
of the following plane equation: Ax + By + Cz + D = 0. The default is
``None``.
dist_thresh : float, optional
The distance in angstrom from the slicing plane within which points are
to be plotted. If all points are to be plotted, specify a very high
number. The default is ``None``.
molecule : Molecule, optional
The molecule to be plotted. The default is ``None``.
atom_dist_threshs : List[float], optional
The thresholds for atom distance from slicing plane, which will be used
to choose the formatting of atom labels as specified in
``atom_format``. The default is ``None`` and results in the thresholds
[0, 0.5, 1] i.e. four ranges: equal zero, between 0 and 0.5, between
0,.5 and 1, and above 1.
atom_format : List[dict], optional
The formatting for the atom labels for each of the distance ranges
specified with the ``atom_dist_thresh`` option. The default is ``None``
and results in:
.. code:: python
[{
'color': 'red',
'bbox': dict(
facecolor='none',
edgecolor='red'
)
}, {
'color': 'red',
'bbox': dict(
facecolor='none',
edgecolor='red',
linestyle='dashed'
)
}, {
'color': 'grey',
}, {
'color': 'grey',
}]
show_all_atoms : bool, optional
If the ``atom_format`` option specifies a formatting option for the
last, open range specified by ``atom_dist_threshs``, this option
decides whether atoms in that range are to be plotted. The default is
``False``.
"""
project_onto_plane = _check_args(dimension, plane_eqn, dist_thresh)
field_comparison._check_fields_for_nans(points_field)
fig, ax = _plot_common(dimension, title)
# Skimming, filtering and projecting
points, values = _points_dist_filter(
points_field.get_points(), points_field.get_values(), plane_eqn,
dist_thresh)
points, values = _points_rand_skim(points, values, rand_skim)
points = _project_points(points, project_onto_plane, dimension, plane_eqn)
_plot_atoms(molecule, ax, dimension, plane_eqn, project_onto_plane,
atom_dist_threshs, atom_format, show_all_atoms)
cmap_name = points_field.lookup_name()
cmap = plt.get_cmap('RdYlBu')
vmin, vmax = color_span if color_span is not None else None, None
image = ax.scatter(*list(zip(*points))[:dimension], c=values,
cmap=cmap, vmin=vmin, vmax=vmax, s=50, lw=0.5)
cbar = fig.colorbar(image, label=cmap_name)
_set_axis_labels2(ax, dimension, project_onto_plane, plane_eqn)
_set_axes_limits(dimension, ax, axes_limits)
if dimension == 2:
plt.axes().set_aspect('equal')
_save_or_display(save_to)
def _check_args(dimension, plane_eqn, dist_thresh):
"""Checks arguments and decides whether to project points"""
if dimension == 3:
project_onto_plane = False
elif dimension == 2:
if plane_eqn is None:
project_onto_plane = False
else:
project_onto_plane = True
else:
raise ValueError("Parameter `dimension` needs to be either 2 or 3 but "
"{0} was given.".format(dimension))
if dist_thresh is not None and plane_eqn is None:
raise ValueError("`dist_thresh` was specified but no `plane_eqn` was "
"given.")
if dist_thresh is None and dimension == 2:
print("WARNING: A 2D plot will look cluttered without cut-off value "
"for the distance from the specified plane (`dist_thresh`).")
return project_onto_plane
def _set_axis_labels2(ax, dimension, project_onto_plane, plane_eqn):
if project_onto_plane:
ax.set_xlabel(r'Coordinates mapped onto plane ${0:.2f}x {1:+.2f}y '
'{2:+.2f}z {3:+.2f} = 0$'.format(*plane_eqn))
else:
# Zip with dimension to stop early if it's less than 3 dimensions
for dir_label, dim in zip(DIR_LABELS, range(dimension)):
getattr(ax, "set_" + dir_label + "label")(dir_label)
def _plot_atoms(molecule, ax, dimension, plane_eqn, project_onto_plane,
atom_dist_threshs, atom_format, show_all_atoms):
# When writing docstrings, have a look at plot_points, where some of these
# options are already documented.
if molecule is None:
return
# Default values for formatting
if atom_format is None:
atom_format = [
{
'color': 'red',
'bbox': dict(
facecolor='none',
edgecolor='red'
)
}, {
'color': 'red',
'bbox': dict(
facecolor='none',
edgecolor='red',
linestyle='dashed'
)
}, {
'color': 'grey',
}, {
'color': 'grey',
}]
if atom_dist_threshs is None:
atom_dist_threshs = [0, 0.5, 1]
# This is outside of the loop to take advantage of projecting all atoms at
# once with _project_points
coords = [atom.coords for atom in molecule]
coords = _project_points(coords, project_onto_plane, dimension, plane_eqn)
for atom, coord in zip(molecule, coords):
assert 0 <= len(atom_format) - len(atom_dist_threshs) <= 1
atom_string = '{0}{1}'.format(atom.identity, atom.label)
# Avoid retyping _plot_atom arguments by creating a lambda
plot_atom = lambda curr_format, marker_fill: _plot_atom(
ax, coord, atom_string, dimension, curr_format,
marker_fill=marker_fill)
if plane_eqn is None:
plot_atom({'color': 'red'}, 'k')
else:
# This big for-else loop checks into which threshold range fits the
# atom's distance
for curr_thresh, curr_format in zip(atom_dist_threshs,
atom_format):
dist = _plane_point_dist(plane_eqn, atom.coords)
if _check_dist(dist, curr_thresh):
plot_atom(curr_format, 'k')
break
else:
# If it doesn't fit into any threshold, check if such atoms
# should be plotted and if their plotting arguments have been
# supplied as the additional, hanging element of `atom_format`
if (len(atom_format) == len(atom_dist_threshs) + 1 and
show_all_atoms):
plot_atom(atom_format[-1], 'grey')
def _plot_atom(ax, coords, atom_string, dimension, curr_format, marker='D',
marker_fill='b'):
"""Plot atom as text and optionally marker"""
ax.text(*coords[:dimension], atom_string, **curr_format)
if marker is not None:
ax.scatter(*coords[:dimension], marker='D', c=marker_fill)
def _plane_point_dist(equation, point):
"""Calculate the distance between a point and a plane given by equation
Parameters
----------
equation : List[float]
A list of coefficients of the equation describing the plane :math:`Ax +
By + Cz + D = 0`. The length should hence be 4. For example, the
plane :math:`z = 0` corresponds to the argument ``[0, 0, 1, 0]``.
point : List[float]
The coordinates of the point ``[x, y, z]``. A list of length 3.
Returns
-------
float
The calculated distance according to the equation:
.. math::
d = \\frac{A x + B y + C z + D}{\sqrt{A^2 + B^2 + C^2}}
Returning the signed value of this expression allows to distinguish
between points lying on the opposite sides of the plane.
"""
normal = np.array(equation[:3])
point = np.array(point)
return (np.dot(normal, point) + equation[3])/vec_norm(normal)
def _plane_through_points(point1, point2, point3):
point1 = np.array(point1)
point2 = np.array(point2)
point3 = np.array(point3)
u = point2 - point1
v = point3 - point1
cross = np.cross(u, v)
if not np.count_nonzero(cross):
raise ValueError("The supplied points appear to be colinear.")
a, b, c = cross[:3]
d = - (a*point1[0] + b*point1[1] + c*point1[2])
return a, b, c, d
def plane_through_atoms(molecule, label1, label2, label3):
points = [molecule[label - 1].coords for label in [label1, label2, label3]]
return _plane_through_points(*points)
def _project_point_onto_plane(equation, point):
"""Calculate coordinates of a point perpendicularly projected onto plane
Parameters
----------
equation : List[float]
A list of coefficients of the equation describing the plane :math:`Ax +
By + Cz + D = 0`. The length should hence be 4. For example, the
plane :math:`z = 0` corresponds to the argument ``[0, 0, 1, 0]``.
point : List[float]
The coordinates of the point ``[x, y, z]``. A list of length 3.
Returns
-------
np.ndarray[float]
The coordinates of the given point projected perpendicularly to the
given plane. Calculated according to equation:
.. math::
\\vec{OA'} = \\vec{OA} - d \\frac{\mathbf{n}}{\|\mathbf{n}\|},
where :math:`\mathbf{n}` is the vector normal to the plane.
"""
normal = np.array(equation[:3])
point = np.array(point)
return point - _plane_point_dist(equation, point)*normal/vec_norm(normal)
def _get_alt_coords(plane_eqn):
"""Create new coordinate system with z-axis orthogonal to given plane"""
# Normal to the plane
normal = np.array(plane_eqn[:3])
# Normalize (set magnitude to 1)
normal = normal/np.linalg.norm(normal)
# Set suggested direction of i, here it is the old x-direction.
i = np.array([1, 0, 0])
# Check if the normal coincides with the x-direction. If so, the i vector
# needs to be initially pointed in a different direction, e.g. that of y.
if np.dot(i, normal) == 1:
i = np.array([0, 1, 0])
# Select direction as close to the suggested one but orthogonal to the
# normal vector. This is done by taking the *rejected* vector when
# projecting i onto normal (the subtrahend).
i_prime = i - np.dot(i, normal)*normal
# Normalize
i_prime = i_prime/np.linalg.norm(i_prime)
# Find vector orthogonal to both i and the normal vector by taking their
# cross product. The order there is significant and was chosen to obtain a
# right-handed coordinate system (i, j, normal), just like (x, y, z)
j_prime = np.cross(normal, i_prime)
# No need to normalize
return i_prime, j_prime, normal
def _new_coord_matrix(new_coord_system):
"""Calculate matrix of transformation from old to new coordinate system"""
# This is an implementation of the formula after 6 on page 10 of:
# http://ocw.mit.edu/courses/aeronautics-and-astronautics/16-07-dynamics-fall-2009/lecture-notes/MIT16_07F09_Lec03.pdf
# The code below just calculates the elements of the matrix
old = np.identity(3)
# The templates are used to create the elements of the desired 3x3 matrix.
# They are populated in a meshgrid fashion, but I couldn't get it to work
# due to the nesting, so I settled on a list comprehension kludge.
# To simplify the code, the 9 elements of the matrix are kept as a
# contiguous array of 9 vectors, hence the reshaping.
old_template = np.array([old]*3).reshape(9, 3)
new_template = np.array([[elem]*3 for elem in new_coord_system])
new_template = new_template.reshape(9, 3)
# The desired matrix is calculated as an element-wise dot product
matrix = np.array([np.dot(old_elem, new_elem) for old_elem, new_elem in
zip(old_template, new_template)])
return matrix.reshape(3, 3)
def _project_points(points, project_onto_plane, dimension, plane_eqn):
"""Project points onto the given plane (3D) or its new coordinate system"""
if project_onto_plane:
if dimension == 3:
# Simple perpendicular projection onto 3D plane (this is expected
# to be rarely used and is not accessible through the 'public'
# `plot_points` as it switches projection off in 3D
points = [_project_point_onto_plane(plane_eqn, point) for point in
points]
elif dimension == 2:
# This is actually more than a projection, as 'looking' at the
# plane in a perpendicular manner requires a change of coordinate
# system. Otherwise the points would then be projected onto the
# (x, y) plane when flattening for plotting.
matrix = _new_coord_matrix(_get_alt_coords(plane_eqn))
points = [np.dot(matrix, point) for point in points]
return points
def _check_dist(dist, thresh):
"""Check if a distance is below the given threshold value"""
# The second condition ensures that floats are rounded correctly. With some
# of the grids some points may lie on the threshold value but would not be
# caught by the first condition due to float precision.
# Absolute tolerance was selected as one decimal place fewer than what
# seems to be the precision of Gaussian .esp coordinates.
return abs(dist) <= thresh or math.isclose(abs(dist), thresh, abs_tol=1e-4)
def _points_dist_filter(points, values, plane_eqn, dist_thresh):
if dist_thresh is None or plane_eqn is None:
return points, values
_points, _values = [], []
for point, value in zip(points, values):
dist = _plane_point_dist(plane_eqn, point)
if _check_dist(dist, dist_thresh):
_points.append(point)
_values.append(value)
return _points, _values
def _points_rand_skim(points, values, rand_skim):
if rand_skim == 1:
return points, values
_points, _values = [], []
for point, value in zip(points, values):
if random.random() <= rand_skim:
_points.append(point)
_values.append(value)
return _points, _values
def pretty_molecule_name(molecule_name):
if molecule_name.endswith("_plus"):
molecule_name = molecule_name[:-5] + "$^\oplus$"
elif molecule_name.endswith("_minus"):
molecule_name = molecule_name[:-6] + "$^\ominus$"
# Make all numbers subscripts
molecule_name = re.sub(r'(\d+)', r'$_{\1}$', molecule_name)
return molecule_name
| gpl-3.0 |
LSSTDESC/SLTimer | python/desc/sltimer/worker.py | 2 | 23802 | # ======================================================================
# License info here?
# ======================================================================
from __future__ import absolute_import
import os
import urllib
import subprocess
import pycs
import numpy as np
from .reading import *
from matplotlib import pyplot as plt
import matplotlib
# Force matplotlib to not use any Xwindows backend.
matplotlib.use('Agg')
__all__ = ['SLTimer', 'spl']
class SLTimer(object):
'''
Worker class for ingesting strongly lensed image light curves, and
measuring the time delays between them.
'''
def __init__(self):
self.agn = None
self.microlensing = None
self.time_delays = None
self.datafile = None
self.lcs = None
self.ml_knotstep = 350
self.knotstep = 20
self.Hbar = 70.
self.sigmaH = 7
self.phibar = None
self.sigmaPhi = None
self.Q=0
return
def download(self, url, format='rdb', and_read=False):
'''
Downloads the datafile from a url.
Parameters
----------
url : string
Web address of datafile.
format : string
Data format, 'rdb' or 'tdc2'
and_read : boolean
Read in data after downloading file?
Notes
-----
Don't forget to set `and_read=True` if you want to use the data!
'''
self.datafile = url.split('/')[-1]
if not os.path.isfile(self.datafile):
urllib.urlretrieve(url, self.datafile)
print 'Downloaded datafile:', url
if and_read:
self.read_in(format=format)
return
def read_in(self, datafile='self', format=None):
'''
Reads in light curve data from a file.
'''
if datafile == 'self':
pass
else:
self.datafile = datafile
if format == 'rdb':
self.lcs = read_in_rdb_data(self.datafile)
elif format == 'tdc2':
self.lcs = read_in_tdc2_data(self.datafile)
Q_FP_ERR = get_tdc2_header(self.datafile)
self.Q = Q_FP_ERR['Q']
self.phibar = Q_FP_ERR['FP']
self.sigmaPhi = Q_FP_ERR['FPErr']
else:
raise ValueError('Unrecognized or null format '+str(format))
self.Nim = len(self.lcs)
return
def prior(t):
Hbar=self.Hbar
sigmaH=self.sigmaH
phibar=self.phibar
sigmaPhi=self.sigmaPhi
Q=self.Q/(3.0*1E5)
# print(Q*phibar/Hbar)
f=1./(2*sigmaH*sigmaPhi*np.pi*Q)
s=-(Hbar)**2/(sigmaH**2)+(-phibar**2)/(sigmaPhi**2)
t=((Hbar/(sigmaH**2)+(phibar*t)/(Q*sigmaPhi**2))**2)/(1./(sigmaH**2)+(t**2)/((sigmaPhi**2)*(Q**2)))
normalize=np.max(t)+s
m=np.exp(s+t-normalize)
ft=(Hbar/sigmaH**2+(phibar*t)/(Q*(sigmaPhi**2)))/(1./sigmaH**2+t**2/((sigmaPhi**2)*(Q**2)))
fif=np.sqrt(np.pi/(1./sigmaH**2+t**2/((sigmaPhi**2)*(Q**2))))
return f*m*ft*fif
def optimize_spline_model(self):
'''
Optimizes a spline model for the intrinsic variability.
'''
return spl(self.lcs, knotstep=self.knotstep)
#========================================================== Plotting light curves
def display_light_curves(self, filename=None, jdrange=(None), title=None,
given_curve=None):
'''
Displays the lightcurves in a single panel plot.
'''
if given_curve is not None:
if len(given_curve) == 2:
lcs, agn = given_curve
else:
lcs = given_curve
agn = None
else:
lcs = self.lcs
agn = None
pycs.gen.mrg.colourise(lcs)
# Replace the following with an optional input list of shifts
#lcs[1].shifttime(-5.0)
#lcs[2].shifttime(-20.0)
#lcs[3].shifttime(-70.0)
pycs.gen.lc.display(lcs, [agn], figsize=(20, 7),
jdrange=jdrange, title=title, nicefont=True)
# lcs = pycs.gen.util
# for l in lcs:
# l.resetshifts()
if filename is not None:
pycs.gen.lc.display(lcs, [agn], figsize=(20, 7),
jdrange=jdrange, title=title, nicefont=True,
filename=filename)
return
def select_bands(self, bands):
'''
select bands you want to keep
Notes:
------
.. warning:: this function will change the light curve in SLTimer
'''
self.lcs = select_bands(self.lcs, bands)
def reset_lc(self):
for l in self.lcs:
l.resetshifts()
l.resetml()
return
def whiten(self):
'''
Whitens a set of multi-filter light curves to a single fictitious band.
'''
self.lcs = whiten(self.lcs)
return
#===================================================== Microlensing
def add_polynomial_microlensing(self):
'''
Adds polynomial microlensing to each lightcurve.
'''
pycs.gen.polyml.addtolc(self.lcs[0], nparams=3,
autoseasonsgap=600.0)
pycs.gen.polyml.addtolc(self.lcs[1], nparams=3,
autoseasonsgap=600.0)
if self.Nim == 4:
pycs.gen.polyml.addtolc(self.lcs[2], nparams=3,
autoseasonsgap=600.0)
pycs.gen.polyml.addtolc(self.lcs[3], nparams=3,
autoseasonsgap=600.0)
return
def add_spline_microlensing(self):
'''
Adds spline microlensing to each light curve.
'''
spline_microlensing(self.lcs, self.ml_knotstep)
return
#========================================= Primary workhorse method
def estimate_time_delays(self, method='pycs', microlensing='spline', agn='spline', error=None, quietly=False):
'''
Measures time delays between images, by modeling all the light
curves.
Parameters
----------
method : string
Modeling package to use.
microlensing : string
Choice of microlensing model to use.
agn : string
Choice of intrinsic AGN variability model to use.
error : string
Error estimation options [None, 'complete', 'intrinsic variance']
Notes
-----
Provides both polynomial and spline time delays.
Parameters
----------
method: string
Modeling package to use (currently only `pycs` is available)
microlensing: string
Model choice for microlensing light curves
agn: string
Model choice for intrinsic AGN variability
error: boolean
Estimate errors?
quietly: boolean
Redirect output to /dev/null?
'''
if method == 'pycs':
# print "You are using the pycs method."
pass
else:
print "The only available method is 'pycs' - exiting."
return
if quietly:
as_requested = {'stdout':None, 'stderr':None}
else:
as_requested = {'stdout':sys.stdout, 'stderr':sys.stderr}
# Tell the lightcurves that their model is going to include microlensing:
if microlensing == 'polynomial':
with SilentOperation(**as_requested):
self.add_polynomial_microlensing()
elif microlensing == 'spline':
with SilentOperation(**as_requested):
self.add_spline_microlensing()
else:
pass
# Keep a record:
self.microlensing = microlensing
# Optimize the model for both microlensing and intrinsic variability:
if agn == 'spline':
with SilentOperation(**as_requested):
self.agn = self.optimize_spline_model()
else:
print "Error: only free-knot spline models are available for AGN variability at present."
return
# Do error analysis, if required:
if error == 'complete':
with SilentOperation(**as_requested):
self.estimate_uncertainties()
elif error == 'intrinsic variance':
with SilentOperation(**as_requested):
self.find_intrinsic_variance()
else:
return
#===================================================== Evaluate the fitting
def compute_chisq(self, delay, batch=False, getlcs=False):
"""
return chisquare of spline fitting given time delay
Parameters
----------
delay : 1D array
array contains time delays for each light curve. The convention is
[dt_AB, dt_AC, dt_AD]
batch : bool
if batch==True, then delay can be a two dimensional array with each
row contains a set of time delay sample.
"""
if batch:
chisquare = []
for item in delay:
chisquare.append(get_chi_squared(
lcs_original=self.lcs,
ml_knotstep=self.ml_knotstep, delay=item,
getlcs=False, knotstep=self.knotstep
))
return chisquare
return get_chi_squared(lcs_original=self.lcs,
ml_knotstep=self.ml_knotstep,
getlcs=getlcs,
delay=delay, knotstep=self.knotstep)
def generate_random_sample(self, rangeList, nsample):
ndim = len(self.lcs)
#Generate samples
if rangeList is None:
rangeList = [[-100, 100]]*(ndim-1)
d = []
for item in xrange(ndim-1):
d.append(np.random.uniform(rangeList[item][0], rangeList[item][1],
nsample))
sample = np.array(d).T
return sample
def write_out_to(self, result, outName):
file_name = "{0}_delay_chi2_{1}_samples.txt".format(outName,
result.shape[0])
names = ["AB", "AC", "AD"]
header = "Smaples time delay for simple montecarlo and their corresponding \
chisquare. \n"
for index in xrange(result.shape[1]-1):
header += " dt_"+names[index]
header += " chisquare"
np.savetxt(file_name, result, header=header, comments="# ")
return
def plot_likelihood_from_file(self, file_name, chisquare=False, bins=20,
outName="from_file_", corner_plot=True,
add_prior= True):
result = np.loadtxt(file_name)
self.plot_likelihood(result, outName+file_name[-10:],
chisquare=chisquare, bins=bins,
corner_plot=corner_plot, add_prior=add_prior)
return
def plot_likelihood(self, result, outName, plot_contours=True,
plot_density=True, chisquare=False, bins=20,
corner_plot=True, add_prior=True):
import corner
log = True
sample = result[:, :-1]
if not chisquare:
weight = chi2_to_weight(result[:, -1])
title = "likelihood"
else:
weight = result[:, -1]
# weight = result[:, -1] - np.min(result[:, -1])
log = False
title = r"$\chi^2 plot$"
if corner_plot:
fig = corner.corner(sample, bins=bins,
labels=[r'$\Delta t_{AB}(days)$',
r'$\Delta t_{AC}(days)$',
r'$\Delta t_{AD}(days)$'],
weights=weight, plot_contours=plot_contours,
plot_density=plot_density,
max_n_ticks=10,
use_math_text=True
)
else:
if sample.shape[1] != 1:
print("corner=False can only be true when there is only 1D sample")
sample = sample.ravel()
fig = plt.figure()
ax = fig.add_subplot(111)
bins = np.linspace(sample.min(), sample.max(), bins)
wd, b = np.histogram(sample, bins=bins, weights=weight)
counts, b = np.histogram(sample, bins=bins)
bincentres = [(b[i]+b[i+1])/2. for i in range(len(b)-1)]
ax.set_xlabel(r'$\Delta t_{AB}(days)$')
ax.set_ylabel(r'$\chi^2$')
ax.step(bincentres, wd/counts, where='mid', color='k',
linestyle="-")
fig.suptitle(title)
fig.savefig("{0}_likelihood_{1}_samples.png".format(outName,
result.shape[0]))
return
def compute_likelihood_simpleMC(self, nsample=1000, nprocess=5,
rangeList=None, outName="",
save_file=True, samples=None):
'''
Compute the likelihood by Monte Carlo method
'''
from multiprocessing import Pool
from functools import partial
import time
if samples is not None:
sample = samples
nsample = len(sample)
else:
sample = self.generate_random_sample(rangeList=rangeList,
nsample=nsample)
#calculate the chisquare
start = time.time()
p = Pool(processes=nprocess)
chisquare = np.array(p.map(partial(
get_chi_squared,
lcs_original=self.lcs,
ml_knotstep=self.ml_knotstep,
getlcs=False,
knotstep=self.knotstep),
sample))
end = time.time()
print("Multiprocessing used {0} seconds.".format(end-start))
weight = chi2_to_weight(chisquare)
print("min chisquare,", np.min(chisquare))
print("#"*20)
print("weighted time delays (dAB,dAC,dAD)(days) :",
weight.T.dot(sample))
results = np.column_stack((sample, chisquare))
if save_file:
self.write_out_to(results, outName)
self.plot_likelihood(results, outName)
return sample[np.argmin(chisquare)]
def degree_of_freedom(self):
spline = pycs.spl.topopt.opt_rough(self.lcs, nit=1,
knotstep=self.knotstep,
verbose=False)
num = len(spline.t)
spline = pycs.spl.topopt.opt_rough(self.lcs, nit=1,
knotstep=self.ml_knotstep,
verbose=False)
num_ml = len(spline.t)
free_param = num*2+4+len(self.lcs)*(num_ml*2+4)+4
nm_constraint = 0
for l in self.lcs:
nm_constraint += len(l)
print("knotstep for intrinsic fluctuation is: {0}".format(self.knotstep))
print("knotstep for micro lensing is: {0}".format(self.ml_knotstep))
print("number of data points is: {0}".format(nm_constraint))
dof = nm_constraint-free_param
return {"dof" : dof, "# data" : nm_constraint}
def initialize_time_delays(self, method=None, pars=None):
'''
Initializes the curve shifts by specifying 1 or 3 time delays.
'''
if method is None:
dt = {'AB':0.0}
if self.Nim == 4:
dt['AC'] = 0.0
dt['AD'] = 0.0
elif method == 'guess':
dt = pars
assert pars is not None
assert len(dt) == (self.Nim - 1)
assert type(dt) == dict
elif method == 'simpleMC':
bestGuess = self.compute_likelihood_simpleMC(nsample=10,
nprocess=4,
save_file=False)
dt = {'AB': bestGuess[0]}
if self.Nim == 4:
dt = {'AC': bestGuess[1]}
dt = {'AD': bestGuess[2]}
else:
raise ValueError("Unrecognized initialization method '"+method+"'")
# Set the shifts of each light curve object in lcs:
# All lenses:
self.lcs[1].shifttime(dt['AB'])
# Quads only:
if self.Nim == 4:
self.lcs[2].shifttime(dt['AC'])
self.lcs[3].shifttime(dt['AD'])
# Report that shifting has occurred, and report time delays:
print "Initialization completed, using method '"+method+"'"
self.report_time_delays()
return
#===================================================== Resimulating the Data
def delete_old_files(self):
'''
Deletes the old files from previous error simulations.
'''
subprocess.call('rm -rfv sims_copies sims_mocks', shell=True)
subprocess.call('rm -rfv sims_copies_opt_spl sims_copies_opt_disp sims_copies_opt_regdiff', shell=True)
subprocess.call('rm -rfv sims_mocks_opt_spl sims_mocks_opt_disp sims_mocks_opt_regdiff', shell=True)
print "The old files have been deleted."
return
def make_plain_copies(self, n=None, npkl=None):
'''
Makes copies of the data.
'''
Ncopies = n*npkl
print "Making", Ncopies, "copies of the original dataset:"
pycs.sim.draw.multidraw(self.lcs, onlycopy=True, n=n, npkl=npkl, simset="copies")
return
def make_mock_light_curves(self, n=None, npkl=None):
'''
Make mock lightcurves to help estimate uncertainties.
'''
modellcs, modelspline = self.lcs, self.agn
def Atweakml(xlcs):
return pycs.sim.twk.tweakml(xlcs, beta=-1.5, sigma=0.25, fmin=1/500.0, fmax=None, psplot=False)
def Btweakml(xlcs):
return pycs.sim.twk.tweakml(xlcs, beta=-1.0, sigma=0.9, fmin=1/500.0, fmax=None, psplot=False)
def Ctweakml(xlcs):
return pycs.sim.twk.tweakml(xlcs, beta=-1.0, sigma=1.5, fmin=1/500.0, fmax=None, psplot=False)
def Dtweakml(xlcs):
return pycs.sim.twk.tweakml(xlcs, beta=-0.0, sigma=4.5, fmin=1/500.0, fmax=None, psplot=False)
Nmocks = n*npkl
truetsr = 8.0
print "Making", Nmocks, "synthetic datasets, varying time delays by +/-", truetsr/2.0, "days"
pycs.sim.draw.saveresiduals(modellcs, modelspline)
pycs.sim.draw.multidraw(modellcs, modelspline, n=n, npkl=npkl, simset="mocks", truetsr=truetsr, tweakml=[Atweakml, Btweakml, Ctweakml, Dtweakml])
return
#========================================Making Multiple Model Fits
def make_spline_model_fits_of_plain_copies(self):
# Pass the optimizer function to multirun:
pycs.sim.run.multirun("copies", self.lcs, spl, optset="spl", tsrand=10.0, keepopt=True)
return
def make_spline_model_fits_of_mock_light_curves(self):
tsrand = 1.0
# Pass the optimizer function to multirun:
pycs.sim.run.multirun("mocks", self.lcs, spl, optset="spl", tsrand=tsrand, keepopt=True)
return
def plot_intrinsic_variance_histograms(self): #The histogram will give the instrinsic variance
dataresults = [pycs.sim.run.collect("sims_copies_opt_spl", "blue", "Free-knot spline technique")]
pycs.sim.plot.hists(dataresults, r=5.0, nbins=100, showqs=False,
filename="fig_intrinsicvariance.pdf", dataout=True)
return
#=================================================== Error Analysis
def error_summary(self):
simresults = [
pycs.sim.run.collect("sims_mocks_opt_spl", "blue", "Free-knot spline technique")]
# Nice to replace self.time_delays with a version including error bars here...
# Maybe write out the "samples" for post-processing! Could also make a corner plot...
# Compare measured time delays with truth:
pycs.sim.plot.measvstrue(simresults, errorrange=3.5, r=5.0, nbins = 1, binclip=True, binclipr=20.0,
plotpoints=False, filename="fig_measvstrue.pdf", dataout=True)
# Plot covariances between delays:
pycs.sim.plot.covplot(simresults, filename="fig_covplot.pdf")
# Create a summary plot (of error bars and relationship bewtween measurements):
spl = (pycs.gen.util.readpickle("sims_copies_opt_spl_delays.pkl"),
pycs.gen.util.readpickle("sims_mocks_opt_spl_errorbars.pkl"))
# One last plot:
pycs.sim.plot.newdelayplot([spl], rplot=6.0, displaytext=True,
filename = "fig_delays.pdf", refshifts=[{"colour":"gray", "shifts":(0, -5, -20, -70)}])
return
#=====================================================Complete Error Analysis
def estimate_uncertainties(self, n=None, npkl=None):
self.delete_old_files()
self.make_plain_copies(n=n, npkl=npkl)
self.make_mock_light_curves(n=n, npkl=npkl)
# Add in an option to use regdiff and disp here
self.make_spline_model_fits_of_plain_copies()
self.make_spline_model_fits_of_mock_light_curves()
self.plot_intrinsic_variance_histograms()
self.error_summary()
return
def find_intrinsic_variance(self,n=None, npkl=None):
self.make_plain_copies(n=n, npkl=npkl)
self.make_spline_model_fits_of_plain_copies()
self.plot_intrinsic_variance_histograms()
return
def report_time_delays(self):
print "Time Delays:"
self.time_delays = pycs.gen.lc.getnicetimedelays(self.lcs, separator="\n", sorted=True)
print self.time_delays
return self.time_delays
# ======================================================================
# End of the SLTimer class.
# ======================================================================
# Optimizer functions (could go in "optimize.py" instead?)
def spl(lcs, shifttime=True, verbose=True, knotstep=20):
spline = pycs.spl.topopt.opt_rough(lcs, nit=5, knotstep=5/2.*knotstep,
shifttime=shifttime, verbose=verbose)
spline = pycs.spl.topopt.opt_rough(lcs, nit=5, knotstep=3/2.*knotstep,
shifttime=shifttime, verbose=verbose)
spline = pycs.spl.topopt.opt_fine(lcs, nit=10, knotstep=knotstep,
shifttime=shifttime, verbose=verbose)
return spline
def spline_microlensing(lcs, ml_knotstep):
if ml_knotstep is None:
print("you didn't add any microlensing")
else:
for l in lcs:
pycs.gen.splml.addtolc(l, knotstep=ml_knotstep)
return
# To compute the chisquare
def get_chi_squared(delay, lcs_original, ml_knotstep, getlcs, knotstep=20):
import copy
lcs = copy.deepcopy(lcs_original)
for l in lcs:
l.resetshifts()
l.resetml()
spline_microlensing(lcs, ml_knotstep)
for index, l in enumerate(lcs):
if index != 0:
l.timeshift = delay[index-1]
spline = spl(lcs, verbose=False, shifttime=False, knotstep=knotstep)
if getlcs:
return [lcs, spline]
else:
return spline.lastr2nostab
def chi2_to_weight(chisquare):
weight = np.exp(-0.5*(chisquare-np.min(chisquare)))
weight /= np.sum(weight)
return weight
| bsd-3-clause |
nanditav/15712-TensorFlow | tensorflow/contrib/learn/python/learn/tests/dataframe/tensorflow_dataframe_test.py | 24 | 13091 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for learn.dataframe.tensorflow_dataframe."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import csv
import math
import tempfile
import numpy as np
import tensorflow as tf
from tensorflow.contrib.learn.python.learn.dataframe import tensorflow_dataframe as df
from tensorflow.contrib.learn.python.learn.dataframe.transforms import densify
from tensorflow.core.example import example_pb2
from tensorflow.python.framework import dtypes
# pylint: disable=g-import-not-at-top
try:
import pandas as pd
HAS_PANDAS = True
except ImportError:
HAS_PANDAS = False
def _assert_df_equals_dict(expected_df, actual_dict):
for col in expected_df:
if expected_df[col].dtype in [np.float32, np.float64]:
assertion = np.testing.assert_allclose
else:
assertion = np.testing.assert_array_equal
if expected_df[col].dtype.kind in ["O", "S", "U"]:
# Python 2/3 compatibility
# TensorFlow always returns bytes, so we just convert the unicode
# expectations to bytes also before comparing.
expected_values = [x.encode("utf-8") for x in expected_df[col].values]
else:
expected_values = expected_df[col].values
assertion(expected_values,
actual_dict[col],
err_msg="Expected {} in column '{}'; got {}.".format(
expected_values, col, actual_dict[col]))
def _make_test_csv():
f = tempfile.NamedTemporaryFile(
dir=tf.test.get_temp_dir(), delete=False, mode="w")
w = csv.writer(f)
w.writerow(["int", "float", "bool", "string"])
for _ in range(100):
intvalue = np.random.randint(-10, 10)
floatvalue = np.random.rand()
boolvalue = int(np.random.rand() > 0.3)
stringvalue = "S: %.4f" % np.random.rand()
row = [intvalue, floatvalue, boolvalue, stringvalue]
w.writerow(row)
f.close()
return f.name
def _make_test_csv_sparse():
f = tempfile.NamedTemporaryFile(
dir=tf.test.get_temp_dir(), delete=False, mode="w")
w = csv.writer(f)
w.writerow(["int", "float", "bool", "string"])
for _ in range(100):
# leave columns empty; these will be read as default value (e.g. 0 or NaN)
intvalue = np.random.randint(-10, 10) if np.random.rand() > 0.5 else ""
floatvalue = np.random.rand() if np.random.rand() > 0.5 else ""
boolvalue = int(np.random.rand() > 0.3) if np.random.rand() > 0.5 else ""
stringvalue = (("S: %.4f" % np.random.rand())
if np.random.rand() > 0.5 else "")
row = [intvalue, floatvalue, boolvalue, stringvalue]
w.writerow(row)
f.close()
return f.name
def _make_test_tfrecord():
f = tempfile.NamedTemporaryFile(dir=tf.test.get_temp_dir(), delete=False)
w = tf.python_io.TFRecordWriter(f.name)
for i in range(100):
ex = example_pb2.Example()
ex.features.feature["var_len_int"].int64_list.value.extend(range((i % 3)))
ex.features.feature["fixed_len_float"].float_list.value.extend(
[float(i), 2 * float(i)])
w.write(ex.SerializeToString())
return f.name
class TensorFlowDataFrameTestCase(tf.test.TestCase):
"""Tests for `TensorFlowDataFrame`."""
def _assert_pandas_equals_tensorflow(self, pandas_df, tensorflow_df,
num_batches, batch_size):
self.assertItemsEqual(
list(pandas_df.columns) + ["index"], tensorflow_df.columns())
for batch_num, batch in enumerate(tensorflow_df.run(num_batches)):
row_numbers = [
total_row_num % pandas_df.shape[0]
for total_row_num in range(batch_size * batch_num, batch_size * (
batch_num + 1))
]
expected_df = pandas_df.iloc[row_numbers]
_assert_df_equals_dict(expected_df, batch)
def testInitFromPandas(self):
"""Test construction from Pandas DataFrame."""
if not HAS_PANDAS:
return
pandas_df = pd.DataFrame({"sparrow": range(10), "ostrich": 1})
tensorflow_df = df.TensorFlowDataFrame.from_pandas(pandas_df,
batch_size=10,
shuffle=False)
batch = tensorflow_df.run_one_batch()
np.testing.assert_array_equal(pandas_df.index.values, batch["index"],
"Expected index {}; got {}".format(
pandas_df.index.values, batch["index"]))
_assert_df_equals_dict(pandas_df, batch)
def testBatch(self):
"""Tests `batch` method.
`DataFrame.batch()` should iterate through the rows of the
`pandas.DataFrame`, and should "wrap around" when it reaches the last row.
"""
if not HAS_PANDAS:
return
pandas_df = pd.DataFrame({"albatross": range(10),
"bluejay": 1,
"cockatoo": range(0, 20, 2),
"penguin": list("abcdefghij")})
tensorflow_df = df.TensorFlowDataFrame.from_pandas(pandas_df, shuffle=False)
# Rebatch `df` into the following sizes successively.
batch_sizes = [4, 7]
num_batches = 3
final_batch_size = batch_sizes[-1]
for batch_size in batch_sizes:
tensorflow_df = tensorflow_df.batch(batch_size, shuffle=False)
self._assert_pandas_equals_tensorflow(pandas_df,
tensorflow_df,
num_batches=num_batches,
batch_size=final_batch_size)
def testFromNumpy(self):
x = np.eye(20)
tensorflow_df = df.TensorFlowDataFrame.from_numpy(x, batch_size=10)
for batch in tensorflow_df.run(30):
for ind, val in zip(batch["index"], batch["value"]):
expected_val = np.zeros_like(val)
expected_val[ind] = 1
np.testing.assert_array_equal(expected_val, val)
def testFromCSV(self):
if not HAS_PANDAS:
return
num_batches = 100
batch_size = 8
enqueue_size = 7
data_path = _make_test_csv()
default_values = [0, 0.0, 0, ""]
pandas_df = pd.read_csv(data_path)
tensorflow_df = df.TensorFlowDataFrame.from_csv(
[data_path],
enqueue_size=enqueue_size,
batch_size=batch_size,
shuffle=False,
default_values=default_values)
self._assert_pandas_equals_tensorflow(pandas_df,
tensorflow_df,
num_batches=num_batches,
batch_size=batch_size)
def testFromCSVLimitEpoch(self):
batch_size = 8
num_epochs = 17
expected_num_batches = (num_epochs * 100) // batch_size
data_path = _make_test_csv()
default_values = [0, 0.0, 0, ""]
tensorflow_df = df.TensorFlowDataFrame.from_csv(
[data_path],
batch_size=batch_size,
shuffle=False,
default_values=default_values)
result_batches = list(tensorflow_df.run(num_epochs=num_epochs))
actual_num_batches = len(result_batches)
self.assertEqual(expected_num_batches, actual_num_batches)
# TODO(soergel): figure out how to dequeue the final small batch
expected_rows = 1696 # num_epochs * 100
actual_rows = sum([len(x["int"]) for x in result_batches])
self.assertEqual(expected_rows, actual_rows)
def testFromCSVWithFeatureSpec(self):
if not HAS_PANDAS:
return
num_batches = 100
batch_size = 8
data_path = _make_test_csv_sparse()
feature_spec = {
"int": tf.FixedLenFeature(None, dtypes.int16, np.nan),
"float": tf.VarLenFeature(dtypes.float16),
"bool": tf.VarLenFeature(dtypes.bool),
"string": tf.FixedLenFeature(None, dtypes.string, "")
}
pandas_df = pd.read_csv(data_path, dtype={"string": object})
# Pandas insanely uses NaN for empty cells in a string column.
# And, we can't use Pandas replace() to fix them because nan != nan
s = pandas_df["string"]
for i in range(0, len(s)):
if isinstance(s[i], float) and math.isnan(s[i]):
pandas_df.set_value(i, "string", "")
tensorflow_df = df.TensorFlowDataFrame.from_csv_with_feature_spec(
[data_path],
batch_size=batch_size,
shuffle=False,
feature_spec=feature_spec)
# These columns were sparse; re-densify them for comparison
tensorflow_df["float"] = densify.Densify(np.nan)(tensorflow_df["float"])
tensorflow_df["bool"] = densify.Densify(np.nan)(tensorflow_df["bool"])
self._assert_pandas_equals_tensorflow(pandas_df,
tensorflow_df,
num_batches=num_batches,
batch_size=batch_size)
def testFromExamples(self):
num_batches = 77
enqueue_size = 11
batch_size = 13
data_path = _make_test_tfrecord()
features = {
"fixed_len_float": tf.FixedLenFeature(shape=[2],
dtype=tf.float32,
default_value=[0.0, 0.0]),
"var_len_int": tf.VarLenFeature(dtype=tf.int64)
}
tensorflow_df = df.TensorFlowDataFrame.from_examples(
data_path,
enqueue_size=enqueue_size,
batch_size=batch_size,
features=features,
shuffle=False)
# `test.tfrecord` contains 100 records with two features: var_len_int and
# fixed_len_float. Entry n contains `range(n % 3)` and
# `float(n)` for var_len_int and fixed_len_float,
# respectively.
num_records = 100
def _expected_fixed_len_float(n):
return np.array([float(n), 2 * float(n)])
def _expected_var_len_int(n):
return np.arange(n % 3)
for batch_num, batch in enumerate(tensorflow_df.run(num_batches)):
record_numbers = [
n % num_records
for n in range(batch_num * batch_size, (batch_num + 1) * batch_size)
]
for i, j in enumerate(record_numbers):
np.testing.assert_allclose(
_expected_fixed_len_float(j), batch["fixed_len_float"][i])
var_len_int = batch["var_len_int"]
for i, ind in enumerate(var_len_int.indices):
val = var_len_int.values[i]
expected_row = _expected_var_len_int(record_numbers[ind[0]])
expected_value = expected_row[ind[1]]
np.testing.assert_array_equal(expected_value, val)
def testSplitString(self):
batch_size = 8
num_epochs = 17
expected_num_batches = (num_epochs * 100) // batch_size
data_path = _make_test_csv()
default_values = [0, 0.0, 0, ""]
tensorflow_df = df.TensorFlowDataFrame.from_csv(
[data_path],
batch_size=batch_size,
shuffle=False,
default_values=default_values)
a, b = tensorflow_df.split("string", 0.7) # no rebatching
total_result_batches = list(tensorflow_df.run(num_epochs=num_epochs))
a_result_batches = list(a.run(num_epochs=num_epochs))
b_result_batches = list(b.run(num_epochs=num_epochs))
self.assertEqual(expected_num_batches, len(total_result_batches))
self.assertEqual(expected_num_batches, len(a_result_batches))
self.assertEqual(expected_num_batches, len(b_result_batches))
total_rows = sum([len(x["int"]) for x in total_result_batches])
a_total_rows = sum([len(x["int"]) for x in a_result_batches])
b_total_rows = sum([len(x["int"]) for x in b_result_batches])
print("Split rows: %s => %s, %s" % (total_rows, a_total_rows, b_total_rows))
# TODO(soergel): figure out how to dequeue the final small batch
expected_total_rows = 1696 # (num_epochs * 100)
self.assertEqual(expected_total_rows, total_rows)
self.assertEqual(1087, a_total_rows) # stochastic but deterministic
# self.assertEqual(int(total_rows * 0.7), a_total_rows)
self.assertEqual(609, b_total_rows) # stochastic but deterministic
# self.assertEqual(int(total_rows * 0.3), b_total_rows)
# The strings used for hashing were all unique in the original data, but
# we ran 17 epochs, so each one should appear 17 times. Each copy should
# be hashed into the same partition, so there should be no overlap of the
# keys.
a_strings = set([s for x in a_result_batches for s in x["string"]])
b_strings = set([s for x in b_result_batches for s in x["string"]])
self.assertEqual(frozenset(), a_strings & b_strings)
if __name__ == "__main__":
tf.test.main()
| apache-2.0 |
vybstat/scikit-learn | examples/applications/svm_gui.py | 287 | 11161 | """
==========
Libsvm GUI
==========
A simple graphical frontend for Libsvm mainly intended for didactic
purposes. You can create data points by point and click and visualize
the decision region induced by different kernels and parameter settings.
To create positive examples click the left mouse button; to create
negative examples click the right button.
If all examples are from the same class, it uses a one-class SVM.
"""
from __future__ import division, print_function
print(__doc__)
# Author: Peter Prettenhoer <peter.prettenhofer@gmail.com>
#
# License: BSD 3 clause
import matplotlib
matplotlib.use('TkAgg')
from matplotlib.backends.backend_tkagg import FigureCanvasTkAgg
from matplotlib.backends.backend_tkagg import NavigationToolbar2TkAgg
from matplotlib.figure import Figure
from matplotlib.contour import ContourSet
import Tkinter as Tk
import sys
import numpy as np
from sklearn import svm
from sklearn.datasets import dump_svmlight_file
from sklearn.externals.six.moves import xrange
y_min, y_max = -50, 50
x_min, x_max = -50, 50
class Model(object):
"""The Model which hold the data. It implements the
observable in the observer pattern and notifies the
registered observers on change event.
"""
def __init__(self):
self.observers = []
self.surface = None
self.data = []
self.cls = None
self.surface_type = 0
def changed(self, event):
"""Notify the observers. """
for observer in self.observers:
observer.update(event, self)
def add_observer(self, observer):
"""Register an observer. """
self.observers.append(observer)
def set_surface(self, surface):
self.surface = surface
def dump_svmlight_file(self, file):
data = np.array(self.data)
X = data[:, 0:2]
y = data[:, 2]
dump_svmlight_file(X, y, file)
class Controller(object):
def __init__(self, model):
self.model = model
self.kernel = Tk.IntVar()
self.surface_type = Tk.IntVar()
# Whether or not a model has been fitted
self.fitted = False
def fit(self):
print("fit the model")
train = np.array(self.model.data)
X = train[:, 0:2]
y = train[:, 2]
C = float(self.complexity.get())
gamma = float(self.gamma.get())
coef0 = float(self.coef0.get())
degree = int(self.degree.get())
kernel_map = {0: "linear", 1: "rbf", 2: "poly"}
if len(np.unique(y)) == 1:
clf = svm.OneClassSVM(kernel=kernel_map[self.kernel.get()],
gamma=gamma, coef0=coef0, degree=degree)
clf.fit(X)
else:
clf = svm.SVC(kernel=kernel_map[self.kernel.get()], C=C,
gamma=gamma, coef0=coef0, degree=degree)
clf.fit(X, y)
if hasattr(clf, 'score'):
print("Accuracy:", clf.score(X, y) * 100)
X1, X2, Z = self.decision_surface(clf)
self.model.clf = clf
self.model.set_surface((X1, X2, Z))
self.model.surface_type = self.surface_type.get()
self.fitted = True
self.model.changed("surface")
def decision_surface(self, cls):
delta = 1
x = np.arange(x_min, x_max + delta, delta)
y = np.arange(y_min, y_max + delta, delta)
X1, X2 = np.meshgrid(x, y)
Z = cls.decision_function(np.c_[X1.ravel(), X2.ravel()])
Z = Z.reshape(X1.shape)
return X1, X2, Z
def clear_data(self):
self.model.data = []
self.fitted = False
self.model.changed("clear")
def add_example(self, x, y, label):
self.model.data.append((x, y, label))
self.model.changed("example_added")
# update decision surface if already fitted.
self.refit()
def refit(self):
"""Refit the model if already fitted. """
if self.fitted:
self.fit()
class View(object):
"""Test docstring. """
def __init__(self, root, controller):
f = Figure()
ax = f.add_subplot(111)
ax.set_xticks([])
ax.set_yticks([])
ax.set_xlim((x_min, x_max))
ax.set_ylim((y_min, y_max))
canvas = FigureCanvasTkAgg(f, master=root)
canvas.show()
canvas.get_tk_widget().pack(side=Tk.TOP, fill=Tk.BOTH, expand=1)
canvas._tkcanvas.pack(side=Tk.TOP, fill=Tk.BOTH, expand=1)
canvas.mpl_connect('button_press_event', self.onclick)
toolbar = NavigationToolbar2TkAgg(canvas, root)
toolbar.update()
self.controllbar = ControllBar(root, controller)
self.f = f
self.ax = ax
self.canvas = canvas
self.controller = controller
self.contours = []
self.c_labels = None
self.plot_kernels()
def plot_kernels(self):
self.ax.text(-50, -60, "Linear: $u^T v$")
self.ax.text(-20, -60, "RBF: $\exp (-\gamma \| u-v \|^2)$")
self.ax.text(10, -60, "Poly: $(\gamma \, u^T v + r)^d$")
def onclick(self, event):
if event.xdata and event.ydata:
if event.button == 1:
self.controller.add_example(event.xdata, event.ydata, 1)
elif event.button == 3:
self.controller.add_example(event.xdata, event.ydata, -1)
def update_example(self, model, idx):
x, y, l = model.data[idx]
if l == 1:
color = 'w'
elif l == -1:
color = 'k'
self.ax.plot([x], [y], "%so" % color, scalex=0.0, scaley=0.0)
def update(self, event, model):
if event == "examples_loaded":
for i in xrange(len(model.data)):
self.update_example(model, i)
if event == "example_added":
self.update_example(model, -1)
if event == "clear":
self.ax.clear()
self.ax.set_xticks([])
self.ax.set_yticks([])
self.contours = []
self.c_labels = None
self.plot_kernels()
if event == "surface":
self.remove_surface()
self.plot_support_vectors(model.clf.support_vectors_)
self.plot_decision_surface(model.surface, model.surface_type)
self.canvas.draw()
def remove_surface(self):
"""Remove old decision surface."""
if len(self.contours) > 0:
for contour in self.contours:
if isinstance(contour, ContourSet):
for lineset in contour.collections:
lineset.remove()
else:
contour.remove()
self.contours = []
def plot_support_vectors(self, support_vectors):
"""Plot the support vectors by placing circles over the
corresponding data points and adds the circle collection
to the contours list."""
cs = self.ax.scatter(support_vectors[:, 0], support_vectors[:, 1],
s=80, edgecolors="k", facecolors="none")
self.contours.append(cs)
def plot_decision_surface(self, surface, type):
X1, X2, Z = surface
if type == 0:
levels = [-1.0, 0.0, 1.0]
linestyles = ['dashed', 'solid', 'dashed']
colors = 'k'
self.contours.append(self.ax.contour(X1, X2, Z, levels,
colors=colors,
linestyles=linestyles))
elif type == 1:
self.contours.append(self.ax.contourf(X1, X2, Z, 10,
cmap=matplotlib.cm.bone,
origin='lower', alpha=0.85))
self.contours.append(self.ax.contour(X1, X2, Z, [0.0], colors='k',
linestyles=['solid']))
else:
raise ValueError("surface type unknown")
class ControllBar(object):
def __init__(self, root, controller):
fm = Tk.Frame(root)
kernel_group = Tk.Frame(fm)
Tk.Radiobutton(kernel_group, text="Linear", variable=controller.kernel,
value=0, command=controller.refit).pack(anchor=Tk.W)
Tk.Radiobutton(kernel_group, text="RBF", variable=controller.kernel,
value=1, command=controller.refit).pack(anchor=Tk.W)
Tk.Radiobutton(kernel_group, text="Poly", variable=controller.kernel,
value=2, command=controller.refit).pack(anchor=Tk.W)
kernel_group.pack(side=Tk.LEFT)
valbox = Tk.Frame(fm)
controller.complexity = Tk.StringVar()
controller.complexity.set("1.0")
c = Tk.Frame(valbox)
Tk.Label(c, text="C:", anchor="e", width=7).pack(side=Tk.LEFT)
Tk.Entry(c, width=6, textvariable=controller.complexity).pack(
side=Tk.LEFT)
c.pack()
controller.gamma = Tk.StringVar()
controller.gamma.set("0.01")
g = Tk.Frame(valbox)
Tk.Label(g, text="gamma:", anchor="e", width=7).pack(side=Tk.LEFT)
Tk.Entry(g, width=6, textvariable=controller.gamma).pack(side=Tk.LEFT)
g.pack()
controller.degree = Tk.StringVar()
controller.degree.set("3")
d = Tk.Frame(valbox)
Tk.Label(d, text="degree:", anchor="e", width=7).pack(side=Tk.LEFT)
Tk.Entry(d, width=6, textvariable=controller.degree).pack(side=Tk.LEFT)
d.pack()
controller.coef0 = Tk.StringVar()
controller.coef0.set("0")
r = Tk.Frame(valbox)
Tk.Label(r, text="coef0:", anchor="e", width=7).pack(side=Tk.LEFT)
Tk.Entry(r, width=6, textvariable=controller.coef0).pack(side=Tk.LEFT)
r.pack()
valbox.pack(side=Tk.LEFT)
cmap_group = Tk.Frame(fm)
Tk.Radiobutton(cmap_group, text="Hyperplanes",
variable=controller.surface_type, value=0,
command=controller.refit).pack(anchor=Tk.W)
Tk.Radiobutton(cmap_group, text="Surface",
variable=controller.surface_type, value=1,
command=controller.refit).pack(anchor=Tk.W)
cmap_group.pack(side=Tk.LEFT)
train_button = Tk.Button(fm, text='Fit', width=5,
command=controller.fit)
train_button.pack()
fm.pack(side=Tk.LEFT)
Tk.Button(fm, text='Clear', width=5,
command=controller.clear_data).pack(side=Tk.LEFT)
def get_parser():
from optparse import OptionParser
op = OptionParser()
op.add_option("--output",
action="store", type="str", dest="output",
help="Path where to dump data.")
return op
def main(argv):
op = get_parser()
opts, args = op.parse_args(argv[1:])
root = Tk.Tk()
model = Model()
controller = Controller(model)
root.wm_title("Scikit-learn Libsvm GUI")
view = View(root, controller)
model.add_observer(view)
Tk.mainloop()
if opts.output:
model.dump_svmlight_file(opts.output)
if __name__ == "__main__":
main(sys.argv)
| bsd-3-clause |
nebw/keras | examples/variational_autoencoder_deconv.py | 5 | 4206 | '''This script demonstrates how to build a variational autoencoder with Keras and deconvolution layers.
Reference: "Auto-Encoding Variational Bayes" https://arxiv.org/abs/1312.6114
'''
import numpy as np
import matplotlib.pyplot as plt
from keras.layers import Input, Dense, Lambda, Flatten, Reshape
from keras.layers import Convolution2D, Deconvolution2D, MaxPooling2D
from keras.models import Model
from keras import backend as K
from keras import objectives
from keras.datasets import mnist
# input image dimensions
img_rows, img_cols, img_chns = 28, 28, 1
# number of convolutional filters to use
nb_filters = 32
# convolution kernel size
nb_conv = 3
batch_size = 16
original_dim = (img_chns, img_rows, img_cols)
latent_dim = 2
intermediate_dim = 128
epsilon_std = 0.01
nb_epoch = 5
x = Input(batch_shape=(batch_size,) + original_dim)
c = Convolution2D(nb_filters, nb_conv, nb_conv, border_mode='same', activation='relu')(x)
f = Flatten()(c)
h = Dense(intermediate_dim, activation='relu')(f)
z_mean = Dense(latent_dim)(h)
z_log_var = Dense(latent_dim)(h)
def sampling(args):
z_mean, z_log_var = args
epsilon = K.random_normal(shape=(batch_size, latent_dim),
mean=0., std=epsilon_std)
return z_mean + K.exp(z_log_var) * epsilon
# note that "output_shape" isn't necessary with the TensorFlow backend
# so you could write `Lambda(sampling)([z_mean, z_log_var])`
z = Lambda(sampling, output_shape=(latent_dim,))([z_mean, z_log_var])
# we instantiate these layers separately so as to reuse them later
decoder_h = Dense(intermediate_dim, activation='relu')
decoder_f = Dense(nb_filters*img_rows*img_cols, activation='relu')
decoder_c = Reshape((nb_filters, img_rows, img_cols))
decoder_mean = Deconvolution2D(img_chns, nb_conv, nb_conv,
(batch_size, img_chns, img_rows, img_cols),
border_mode='same')
h_decoded = decoder_h(z)
f_decoded = decoder_f(h_decoded)
c_decoded = decoder_c(f_decoded)
x_decoded_mean = decoder_mean(c_decoded)
def vae_loss(x, x_decoded_mean):
# NOTE: binary_crossentropy expects a batch_size by dim for x and x_decoded_mean, so we MUST flatten these!
x = K.flatten(x)
x_decoded_mean = K.flatten(x_decoded_mean)
xent_loss = objectives.binary_crossentropy(x, x_decoded_mean)
kl_loss = - 0.5 * K.mean(1 + z_log_var - K.square(z_mean) - K.exp(z_log_var), axis=-1)
return xent_loss + kl_loss
vae = Model(x, x_decoded_mean)
vae.compile(optimizer='rmsprop', loss=vae_loss)
vae.summary()
# train the VAE on MNIST digits
(x_train, y_train), (x_test, y_test) = mnist.load_data()
x_train = x_train.astype('float32')[:, None, :, :] / 255.
x_test = x_test.astype('float32')[:, None, :, :] / 255.
vae.fit(x_train, x_train,
shuffle=True,
nb_epoch=nb_epoch,
batch_size=batch_size,
validation_data=(x_test, x_test))
# build a model to project inputs on the latent space
encoder = Model(x, z_mean)
# display a 2D plot of the digit classes in the latent space
x_test_encoded = encoder.predict(x_test, batch_size=batch_size)
plt.figure(figsize=(6, 6))
plt.scatter(x_test_encoded[:, 0], x_test_encoded[:, 1], c=y_test)
plt.colorbar()
plt.show()
# build a digit generator that can sample from the learned distribution
decoder_input = Input(shape=(latent_dim,))
_h_decoded = decoder_h(decoder_input)
_f_decoded = decoder_f(_h_decoded)
_c_decoded = decoder_c(_f_decoded)
_x_decoded_mean = decoder_mean(_c_decoded)
generator = Model(decoder_input, _x_decoded_mean)
# display a 2D manifold of the digits
n = 15 # figure with 15x15 digits
digit_size = 28
figure = np.zeros((digit_size * n, digit_size * n))
# we will sample n points within [-15, 15] standard deviations
grid_x = np.linspace(-15, 15, n)
grid_y = np.linspace(-15, 15, n)
for i, yi in enumerate(grid_x):
for j, xi in enumerate(grid_y):
z_sample = np.array([[xi, yi]])
x_decoded = generator.predict(z_sample)
digit = x_decoded[0].reshape(digit_size, digit_size)
figure[i * digit_size: (i + 1) * digit_size,
j * digit_size: (j + 1) * digit_size] = digit
plt.figure(figsize=(10, 10))
plt.imshow(figure)
plt.show()
| mit |
dsm054/pandas | pandas/tests/test_take.py | 1 | 16730 | # -*- coding: utf-8 -*-
import re
from datetime import datetime
import numpy as np
import pytest
from pandas.compat import long
import pandas.core.algorithms as algos
import pandas.util.testing as tm
from pandas._libs.tslib import iNaT
@pytest.fixture(params=[True, False])
def writeable(request):
return request.param
# Check that take_nd works both with writeable arrays
# (in which case fast typed memory-views implementation)
# and read-only arrays alike.
@pytest.fixture(params=[
(np.float64, True),
(np.float32, True),
(np.uint64, False),
(np.uint32, False),
(np.uint16, False),
(np.uint8, False),
(np.int64, False),
(np.int32, False),
(np.int16, False),
(np.int8, False),
(np.object_, True),
(np.bool, False),
])
def dtype_can_hold_na(request):
return request.param
@pytest.fixture(params=[
(np.int8, np.int16(127), np.int8),
(np.int8, np.int16(128), np.int16),
(np.int32, 1, np.int32),
(np.int32, 2.0, np.float64),
(np.int32, 3.0 + 4.0j, np.complex128),
(np.int32, True, np.object_),
(np.int32, "", np.object_),
(np.float64, 1, np.float64),
(np.float64, 2.0, np.float64),
(np.float64, 3.0 + 4.0j, np.complex128),
(np.float64, True, np.object_),
(np.float64, "", np.object_),
(np.complex128, 1, np.complex128),
(np.complex128, 2.0, np.complex128),
(np.complex128, 3.0 + 4.0j, np.complex128),
(np.complex128, True, np.object_),
(np.complex128, "", np.object_),
(np.bool_, 1, np.object_),
(np.bool_, 2.0, np.object_),
(np.bool_, 3.0 + 4.0j, np.object_),
(np.bool_, True, np.bool_),
(np.bool_, '', np.object_),
])
def dtype_fill_out_dtype(request):
return request.param
class TestTake(object):
# Standard incompatible fill error.
fill_error = re.compile("Incompatible type for fill_value")
def test_1d_with_out(self, dtype_can_hold_na, writeable):
dtype, can_hold_na = dtype_can_hold_na
data = np.random.randint(0, 2, 4).astype(dtype)
data.flags.writeable = writeable
indexer = [2, 1, 0, 1]
out = np.empty(4, dtype=dtype)
algos.take_1d(data, indexer, out=out)
expected = data.take(indexer)
tm.assert_almost_equal(out, expected)
indexer = [2, 1, 0, -1]
out = np.empty(4, dtype=dtype)
if can_hold_na:
algos.take_1d(data, indexer, out=out)
expected = data.take(indexer)
expected[3] = np.nan
tm.assert_almost_equal(out, expected)
else:
with pytest.raises(TypeError, match=self.fill_error):
algos.take_1d(data, indexer, out=out)
# No Exception otherwise.
data.take(indexer, out=out)
def test_1d_fill_nonna(self, dtype_fill_out_dtype):
dtype, fill_value, out_dtype = dtype_fill_out_dtype
data = np.random.randint(0, 2, 4).astype(dtype)
indexer = [2, 1, 0, -1]
result = algos.take_1d(data, indexer, fill_value=fill_value)
assert ((result[[0, 1, 2]] == data[[2, 1, 0]]).all())
assert (result[3] == fill_value)
assert (result.dtype == out_dtype)
indexer = [2, 1, 0, 1]
result = algos.take_1d(data, indexer, fill_value=fill_value)
assert ((result[[0, 1, 2, 3]] == data[indexer]).all())
assert (result.dtype == dtype)
def test_2d_with_out(self, dtype_can_hold_na, writeable):
dtype, can_hold_na = dtype_can_hold_na
data = np.random.randint(0, 2, (5, 3)).astype(dtype)
data.flags.writeable = writeable
indexer = [2, 1, 0, 1]
out0 = np.empty((4, 3), dtype=dtype)
out1 = np.empty((5, 4), dtype=dtype)
algos.take_nd(data, indexer, out=out0, axis=0)
algos.take_nd(data, indexer, out=out1, axis=1)
expected0 = data.take(indexer, axis=0)
expected1 = data.take(indexer, axis=1)
tm.assert_almost_equal(out0, expected0)
tm.assert_almost_equal(out1, expected1)
indexer = [2, 1, 0, -1]
out0 = np.empty((4, 3), dtype=dtype)
out1 = np.empty((5, 4), dtype=dtype)
if can_hold_na:
algos.take_nd(data, indexer, out=out0, axis=0)
algos.take_nd(data, indexer, out=out1, axis=1)
expected0 = data.take(indexer, axis=0)
expected1 = data.take(indexer, axis=1)
expected0[3, :] = np.nan
expected1[:, 3] = np.nan
tm.assert_almost_equal(out0, expected0)
tm.assert_almost_equal(out1, expected1)
else:
for i, out in enumerate([out0, out1]):
with pytest.raises(TypeError, match=self.fill_error):
algos.take_nd(data, indexer, out=out, axis=i)
# No Exception otherwise.
data.take(indexer, out=out, axis=i)
def test_2d_fill_nonna(self, dtype_fill_out_dtype):
dtype, fill_value, out_dtype = dtype_fill_out_dtype
data = np.random.randint(0, 2, (5, 3)).astype(dtype)
indexer = [2, 1, 0, -1]
result = algos.take_nd(data, indexer, axis=0,
fill_value=fill_value)
assert ((result[[0, 1, 2], :] == data[[2, 1, 0], :]).all())
assert ((result[3, :] == fill_value).all())
assert (result.dtype == out_dtype)
result = algos.take_nd(data, indexer, axis=1,
fill_value=fill_value)
assert ((result[:, [0, 1, 2]] == data[:, [2, 1, 0]]).all())
assert ((result[:, 3] == fill_value).all())
assert (result.dtype == out_dtype)
indexer = [2, 1, 0, 1]
result = algos.take_nd(data, indexer, axis=0,
fill_value=fill_value)
assert ((result[[0, 1, 2, 3], :] == data[indexer, :]).all())
assert (result.dtype == dtype)
result = algos.take_nd(data, indexer, axis=1,
fill_value=fill_value)
assert ((result[:, [0, 1, 2, 3]] == data[:, indexer]).all())
assert (result.dtype == dtype)
def test_3d_with_out(self, dtype_can_hold_na):
dtype, can_hold_na = dtype_can_hold_na
data = np.random.randint(0, 2, (5, 4, 3)).astype(dtype)
indexer = [2, 1, 0, 1]
out0 = np.empty((4, 4, 3), dtype=dtype)
out1 = np.empty((5, 4, 3), dtype=dtype)
out2 = np.empty((5, 4, 4), dtype=dtype)
algos.take_nd(data, indexer, out=out0, axis=0)
algos.take_nd(data, indexer, out=out1, axis=1)
algos.take_nd(data, indexer, out=out2, axis=2)
expected0 = data.take(indexer, axis=0)
expected1 = data.take(indexer, axis=1)
expected2 = data.take(indexer, axis=2)
tm.assert_almost_equal(out0, expected0)
tm.assert_almost_equal(out1, expected1)
tm.assert_almost_equal(out2, expected2)
indexer = [2, 1, 0, -1]
out0 = np.empty((4, 4, 3), dtype=dtype)
out1 = np.empty((5, 4, 3), dtype=dtype)
out2 = np.empty((5, 4, 4), dtype=dtype)
if can_hold_na:
algos.take_nd(data, indexer, out=out0, axis=0)
algos.take_nd(data, indexer, out=out1, axis=1)
algos.take_nd(data, indexer, out=out2, axis=2)
expected0 = data.take(indexer, axis=0)
expected1 = data.take(indexer, axis=1)
expected2 = data.take(indexer, axis=2)
expected0[3, :, :] = np.nan
expected1[:, 3, :] = np.nan
expected2[:, :, 3] = np.nan
tm.assert_almost_equal(out0, expected0)
tm.assert_almost_equal(out1, expected1)
tm.assert_almost_equal(out2, expected2)
else:
for i, out in enumerate([out0, out1, out2]):
with pytest.raises(TypeError, match=self.fill_error):
algos.take_nd(data, indexer, out=out, axis=i)
# No Exception otherwise.
data.take(indexer, out=out, axis=i)
def test_3d_fill_nonna(self, dtype_fill_out_dtype):
dtype, fill_value, out_dtype = dtype_fill_out_dtype
data = np.random.randint(0, 2, (5, 4, 3)).astype(dtype)
indexer = [2, 1, 0, -1]
result = algos.take_nd(data, indexer, axis=0,
fill_value=fill_value)
assert ((result[[0, 1, 2], :, :] == data[[2, 1, 0], :, :]).all())
assert ((result[3, :, :] == fill_value).all())
assert (result.dtype == out_dtype)
result = algos.take_nd(data, indexer, axis=1,
fill_value=fill_value)
assert ((result[:, [0, 1, 2], :] == data[:, [2, 1, 0], :]).all())
assert ((result[:, 3, :] == fill_value).all())
assert (result.dtype == out_dtype)
result = algos.take_nd(data, indexer, axis=2,
fill_value=fill_value)
assert ((result[:, :, [0, 1, 2]] == data[:, :, [2, 1, 0]]).all())
assert ((result[:, :, 3] == fill_value).all())
assert (result.dtype == out_dtype)
indexer = [2, 1, 0, 1]
result = algos.take_nd(data, indexer, axis=0,
fill_value=fill_value)
assert ((result[[0, 1, 2, 3], :, :] == data[indexer, :, :]).all())
assert (result.dtype == dtype)
result = algos.take_nd(data, indexer, axis=1,
fill_value=fill_value)
assert ((result[:, [0, 1, 2, 3], :] == data[:, indexer, :]).all())
assert (result.dtype == dtype)
result = algos.take_nd(data, indexer, axis=2,
fill_value=fill_value)
assert ((result[:, :, [0, 1, 2, 3]] == data[:, :, indexer]).all())
assert (result.dtype == dtype)
def test_1d_other_dtypes(self):
arr = np.random.randn(10).astype(np.float32)
indexer = [1, 2, 3, -1]
result = algos.take_1d(arr, indexer)
expected = arr.take(indexer)
expected[-1] = np.nan
tm.assert_almost_equal(result, expected)
def test_2d_other_dtypes(self):
arr = np.random.randn(10, 5).astype(np.float32)
indexer = [1, 2, 3, -1]
# axis=0
result = algos.take_nd(arr, indexer, axis=0)
expected = arr.take(indexer, axis=0)
expected[-1] = np.nan
tm.assert_almost_equal(result, expected)
# axis=1
result = algos.take_nd(arr, indexer, axis=1)
expected = arr.take(indexer, axis=1)
expected[:, -1] = np.nan
tm.assert_almost_equal(result, expected)
def test_1d_bool(self):
arr = np.array([0, 1, 0], dtype=bool)
result = algos.take_1d(arr, [0, 2, 2, 1])
expected = arr.take([0, 2, 2, 1])
tm.assert_numpy_array_equal(result, expected)
result = algos.take_1d(arr, [0, 2, -1])
assert result.dtype == np.object_
def test_2d_bool(self):
arr = np.array([[0, 1, 0], [1, 0, 1], [0, 1, 1]], dtype=bool)
result = algos.take_nd(arr, [0, 2, 2, 1])
expected = arr.take([0, 2, 2, 1], axis=0)
tm.assert_numpy_array_equal(result, expected)
result = algos.take_nd(arr, [0, 2, 2, 1], axis=1)
expected = arr.take([0, 2, 2, 1], axis=1)
tm.assert_numpy_array_equal(result, expected)
result = algos.take_nd(arr, [0, 2, -1])
assert result.dtype == np.object_
def test_2d_float32(self):
arr = np.random.randn(4, 3).astype(np.float32)
indexer = [0, 2, -1, 1, -1]
# axis=0
result = algos.take_nd(arr, indexer, axis=0)
result2 = np.empty_like(result)
algos.take_nd(arr, indexer, axis=0, out=result2)
tm.assert_almost_equal(result, result2)
expected = arr.take(indexer, axis=0)
expected[[2, 4], :] = np.nan
tm.assert_almost_equal(result, expected)
# this now accepts a float32! # test with float64 out buffer
out = np.empty((len(indexer), arr.shape[1]), dtype='float32')
algos.take_nd(arr, indexer, out=out) # it works!
# axis=1
result = algos.take_nd(arr, indexer, axis=1)
result2 = np.empty_like(result)
algos.take_nd(arr, indexer, axis=1, out=result2)
tm.assert_almost_equal(result, result2)
expected = arr.take(indexer, axis=1)
expected[:, [2, 4]] = np.nan
tm.assert_almost_equal(result, expected)
def test_2d_datetime64(self):
# 2005/01/01 - 2006/01/01
arr = np.random.randint(
long(11045376), long(11360736), (5, 3)) * 100000000000
arr = arr.view(dtype='datetime64[ns]')
indexer = [0, 2, -1, 1, -1]
# axis=0
result = algos.take_nd(arr, indexer, axis=0)
result2 = np.empty_like(result)
algos.take_nd(arr, indexer, axis=0, out=result2)
tm.assert_almost_equal(result, result2)
expected = arr.take(indexer, axis=0)
expected.view(np.int64)[[2, 4], :] = iNaT
tm.assert_almost_equal(result, expected)
result = algos.take_nd(arr, indexer, axis=0,
fill_value=datetime(2007, 1, 1))
result2 = np.empty_like(result)
algos.take_nd(arr, indexer, out=result2, axis=0,
fill_value=datetime(2007, 1, 1))
tm.assert_almost_equal(result, result2)
expected = arr.take(indexer, axis=0)
expected[[2, 4], :] = datetime(2007, 1, 1)
tm.assert_almost_equal(result, expected)
# axis=1
result = algos.take_nd(arr, indexer, axis=1)
result2 = np.empty_like(result)
algos.take_nd(arr, indexer, axis=1, out=result2)
tm.assert_almost_equal(result, result2)
expected = arr.take(indexer, axis=1)
expected.view(np.int64)[:, [2, 4]] = iNaT
tm.assert_almost_equal(result, expected)
result = algos.take_nd(arr, indexer, axis=1,
fill_value=datetime(2007, 1, 1))
result2 = np.empty_like(result)
algos.take_nd(arr, indexer, out=result2, axis=1,
fill_value=datetime(2007, 1, 1))
tm.assert_almost_equal(result, result2)
expected = arr.take(indexer, axis=1)
expected[:, [2, 4]] = datetime(2007, 1, 1)
tm.assert_almost_equal(result, expected)
def test_take_axis_0(self):
arr = np.arange(12).reshape(4, 3)
result = algos.take(arr, [0, -1])
expected = np.array([[0, 1, 2], [9, 10, 11]])
tm.assert_numpy_array_equal(result, expected)
# allow_fill=True
result = algos.take(arr, [0, -1], allow_fill=True, fill_value=0)
expected = np.array([[0, 1, 2], [0, 0, 0]])
tm.assert_numpy_array_equal(result, expected)
def test_take_axis_1(self):
arr = np.arange(12).reshape(4, 3)
result = algos.take(arr, [0, -1], axis=1)
expected = np.array([[0, 2], [3, 5], [6, 8], [9, 11]])
tm.assert_numpy_array_equal(result, expected)
# allow_fill=True
result = algos.take(arr, [0, -1], axis=1, allow_fill=True,
fill_value=0)
expected = np.array([[0, 0], [3, 0], [6, 0], [9, 0]])
tm.assert_numpy_array_equal(result, expected)
class TestExtensionTake(object):
# The take method found in pd.api.extensions
def test_bounds_check_large(self):
arr = np.array([1, 2])
with pytest.raises(IndexError):
algos.take(arr, [2, 3], allow_fill=True)
with pytest.raises(IndexError):
algos.take(arr, [2, 3], allow_fill=False)
def test_bounds_check_small(self):
arr = np.array([1, 2, 3], dtype=np.int64)
indexer = [0, -1, -2]
with pytest.raises(ValueError):
algos.take(arr, indexer, allow_fill=True)
result = algos.take(arr, indexer)
expected = np.array([1, 3, 2], dtype=np.int64)
tm.assert_numpy_array_equal(result, expected)
@pytest.mark.parametrize('allow_fill', [True, False])
def test_take_empty(self, allow_fill):
arr = np.array([], dtype=np.int64)
# empty take is ok
result = algos.take(arr, [], allow_fill=allow_fill)
tm.assert_numpy_array_equal(arr, result)
with pytest.raises(IndexError):
algos.take(arr, [0], allow_fill=allow_fill)
def test_take_na_empty(self):
result = algos.take(np.array([]), [-1, -1], allow_fill=True,
fill_value=0.0)
expected = np.array([0., 0.])
tm.assert_numpy_array_equal(result, expected)
def test_take_coerces_list(self):
arr = [1, 2, 3]
result = algos.take(arr, [0, 0])
expected = np.array([1, 1])
tm.assert_numpy_array_equal(result, expected)
| bsd-3-clause |
ryandougherty/mwa-capstone | MWA_Tools/build/matplotlib/lib/matplotlib/tests/test_mlab.py | 3 | 1722 | import numpy as np
import matplotlib.mlab as mlab
import tempfile
from nose.tools import raises
def test_colinear_pca():
a = mlab.PCA._get_colinear()
pca = mlab.PCA(a)
assert(np.allclose(pca.fracs[2:], 0.))
assert(np.allclose(pca.Y[:,2:], 0.))
def test_recarray_csv_roundtrip():
expected = np.recarray((99,),
[('x',np.float),('y',np.float),('t',np.float)])
expected['x'][:] = np.linspace(-1e9, -1, 99)
expected['y'][:] = np.linspace(1, 1e9, 99)
expected['t'][:] = np.linspace(0, 0.01, 99)
fd = tempfile.TemporaryFile(suffix='csv')
mlab.rec2csv(expected,fd)
fd.seek(0)
actual = mlab.csv2rec(fd)
fd.close()
assert np.allclose( expected['x'], actual['x'] )
assert np.allclose( expected['y'], actual['y'] )
assert np.allclose( expected['t'], actual['t'] )
@raises(ValueError)
def test_rec2csv_bad_shape():
bad = np.recarray((99,4),[('x',np.float),('y',np.float)])
fd = tempfile.TemporaryFile(suffix='csv')
# the bad recarray should trigger a ValueError for having ndim > 1.
mlab.rec2csv(bad,fd)
def test_prctile():
# test odd lengths
x=[1,2,3]
assert mlab.prctile(x,50)==np.median(x)
# test even lengths
x=[1,2,3,4]
assert mlab.prctile(x,50)==np.median(x)
# derived from email sent by jason-sage to MPL-user on 20090914
ob1=[1,1,2,2,1,2,4,3,2,2,2,3,4,5,6,7,8,9,7,6,4,5,5]
p = [0, 75, 100]
expected = [1, 5.5, 9]
# test vectorized
actual = mlab.prctile(ob1,p)
assert np.allclose( expected, actual )
# test scalar
for pi, expectedi in zip(p,expected):
actuali = mlab.prctile(ob1,pi)
assert np.allclose( expectedi, actuali )
| gpl-2.0 |
warren-oneill/powerline | tests/test_product_conversion.py | 2 | 3427 | import pandas as pd
import numpy as np
from unittest import TestCase
from powerline.utils.hour_quarter_hour_converter import \
convert_between_h_and_qh
from powerline.exchanges.epex_exchange import EpexExchange
__author__ = "Max"
class TestProductConversion(TestCase):
"""
Testing the utility function for history conversion via mock histories
"""
def setUp(self):
exchange = EpexExchange()
self.hourly_products = exchange.products['hour']
self.quarterly_products = exchange.products['qh']
def test_conversion_hourly_to_quarterly(self):
hourly_data = np.array([range(0, 24), range(24, 48), range(48, 72)])
hourly_history = pd.DataFrame(hourly_data,
columns=self.hourly_products,
index=pd.date_range('2015-01-01',
'2015-01-03'))
quarterly_row_0 = np.array([0, 0, 0, 0, 1, 1, 1, 1, 2, 2, 2, 2,
3, 3, 3, 3, 4, 4, 4, 4, 5, 5, 5, 5,
6, 6, 6, 6, 7, 7, 7, 7, 8, 8, 8, 8, 9,
9, 9, 9, 10, 10, 10, 10, 11, 11, 11, 11,
12, 12, 12, 12, 13, 13, 13, 13,
14, 14, 14, 14, 15, 15, 15, 15,
16, 16, 16, 16, 17, 17, 17, 17,
18, 18, 18, 18, 19, 19, 19, 19,
20, 20, 20, 20, 21, 21, 21, 21,
22, 22, 22, 22, 23, 23, 23, 23])
quarterly_row_1 = quarterly_row_0 + 24
quarterly_row_2 = quarterly_row_0 + 48
quarterly_data = np.array([quarterly_row_0, quarterly_row_1,
quarterly_row_2])
expected_output = pd.DataFrame(quarterly_data,
columns=self.quarterly_products,
index=pd.date_range('2015-01-01',
'2015-01-03'))
observed_output = convert_between_h_and_qh(hourly_history)
self.assertTrue(observed_output.equals(expected_output))
def test_conversion_quarterly_to_hourly(self):
quarterly_data = np.array([range(0, 96), range(96, 192), range(192,
288)])
quarterly_history = pd.DataFrame(quarterly_data,
columns=self.quarterly_products,
index=pd.date_range('2015-01-01',
'2015-01-03'))
hourly_data = np.array([range(0, 24), range(24, 48), range(48, 72)]) \
* 4 + 1.5
expected_output = pd.DataFrame(hourly_data,
columns=self.hourly_products,
index=pd.date_range('2015-01-01',
'2015-01-03'))
observed_output = convert_between_h_and_qh(quarterly_history)
self.assertTrue(observed_output.equals(expected_output))
def test_no_history(self):
no_history = pd.DataFrame(np.random.randn(3, 3))
self.assertRaises(ValueError, convert_between_h_and_qh, no_history)
| apache-2.0 |
mbayon/TFG-MachineLearning | venv/lib/python3.6/site-packages/sklearn/datasets/samples_generator.py | 4 | 57684 | """
Generate samples of synthetic data sets.
"""
# Authors: B. Thirion, G. Varoquaux, A. Gramfort, V. Michel, O. Grisel,
# G. Louppe, J. Nothman
# License: BSD 3 clause
import numbers
import array
import numpy as np
from scipy import linalg
import scipy.sparse as sp
from ..preprocessing import MultiLabelBinarizer
from ..utils import check_array, check_random_state
from ..utils import shuffle as util_shuffle
from ..utils.random import sample_without_replacement
from ..externals import six
map = six.moves.map
zip = six.moves.zip
def _generate_hypercube(samples, dimensions, rng):
"""Returns distinct binary samples of length dimensions
"""
if dimensions > 30:
return np.hstack([rng.randint(2, size=(samples, dimensions - 30)),
_generate_hypercube(samples, 30, rng)])
out = sample_without_replacement(2 ** dimensions, samples,
random_state=rng).astype(dtype='>u4',
copy=False)
out = np.unpackbits(out.view('>u1')).reshape((-1, 32))[:, -dimensions:]
return out
def make_classification(n_samples=100, n_features=20, n_informative=2,
n_redundant=2, n_repeated=0, n_classes=2,
n_clusters_per_class=2, weights=None, flip_y=0.01,
class_sep=1.0, hypercube=True, shift=0.0, scale=1.0,
shuffle=True, random_state=None):
"""Generate a random n-class classification problem.
This initially creates clusters of points normally distributed (std=1)
about vertices of an `n_informative`-dimensional hypercube with sides of
length `2*class_sep` and assigns an equal number of clusters to each
class. It introduces interdependence between these features and adds
various types of further noise to the data.
Prior to shuffling, `X` stacks a number of these primary "informative"
features, "redundant" linear combinations of these, "repeated" duplicates
of sampled features, and arbitrary noise for and remaining features.
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int, optional (default=100)
The number of samples.
n_features : int, optional (default=20)
The total number of features. These comprise `n_informative`
informative features, `n_redundant` redundant features, `n_repeated`
duplicated features and `n_features-n_informative-n_redundant-
n_repeated` useless features drawn at random.
n_informative : int, optional (default=2)
The number of informative features. Each class is composed of a number
of gaussian clusters each located around the vertices of a hypercube
in a subspace of dimension `n_informative`. For each cluster,
informative features are drawn independently from N(0, 1) and then
randomly linearly combined within each cluster in order to add
covariance. The clusters are then placed on the vertices of the
hypercube.
n_redundant : int, optional (default=2)
The number of redundant features. These features are generated as
random linear combinations of the informative features.
n_repeated : int, optional (default=0)
The number of duplicated features, drawn randomly from the informative
and the redundant features.
n_classes : int, optional (default=2)
The number of classes (or labels) of the classification problem.
n_clusters_per_class : int, optional (default=2)
The number of clusters per class.
weights : list of floats or None (default=None)
The proportions of samples assigned to each class. If None, then
classes are balanced. Note that if `len(weights) == n_classes - 1`,
then the last class weight is automatically inferred.
More than `n_samples` samples may be returned if the sum of `weights`
exceeds 1.
flip_y : float, optional (default=0.01)
The fraction of samples whose class are randomly exchanged. Larger
values introduce noise in the labels and make the classification
task harder.
class_sep : float, optional (default=1.0)
The factor multiplying the hypercube size. Larger values spread
out the clusters/classes and make the classification task easier.
hypercube : boolean, optional (default=True)
If True, the clusters are put on the vertices of a hypercube. If
False, the clusters are put on the vertices of a random polytope.
shift : float, array of shape [n_features] or None, optional (default=0.0)
Shift features by the specified value. If None, then features
are shifted by a random value drawn in [-class_sep, class_sep].
scale : float, array of shape [n_features] or None, optional (default=1.0)
Multiply features by the specified value. If None, then features
are scaled by a random value drawn in [1, 100]. Note that scaling
happens after shifting.
shuffle : boolean, optional (default=True)
Shuffle the samples and the features.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, n_features]
The generated samples.
y : array of shape [n_samples]
The integer labels for class membership of each sample.
Notes
-----
The algorithm is adapted from Guyon [1] and was designed to generate
the "Madelon" dataset.
References
----------
.. [1] I. Guyon, "Design of experiments for the NIPS 2003 variable
selection benchmark", 2003.
See also
--------
make_blobs: simplified variant
make_multilabel_classification: unrelated generator for multilabel tasks
"""
generator = check_random_state(random_state)
# Count features, clusters and samples
if n_informative + n_redundant + n_repeated > n_features:
raise ValueError("Number of informative, redundant and repeated "
"features must sum to less than the number of total"
" features")
if 2 ** n_informative < n_classes * n_clusters_per_class:
raise ValueError("n_classes * n_clusters_per_class must"
" be smaller or equal 2 ** n_informative")
if weights and len(weights) not in [n_classes, n_classes - 1]:
raise ValueError("Weights specified but incompatible with number "
"of classes.")
n_useless = n_features - n_informative - n_redundant - n_repeated
n_clusters = n_classes * n_clusters_per_class
if weights and len(weights) == (n_classes - 1):
weights = weights + [1.0 - sum(weights)]
if weights is None:
weights = [1.0 / n_classes] * n_classes
weights[-1] = 1.0 - sum(weights[:-1])
# Distribute samples among clusters by weight
n_samples_per_cluster = []
for k in range(n_clusters):
n_samples_per_cluster.append(int(n_samples * weights[k % n_classes]
/ n_clusters_per_class))
for i in range(n_samples - sum(n_samples_per_cluster)):
n_samples_per_cluster[i % n_clusters] += 1
# Initialize X and y
X = np.zeros((n_samples, n_features))
y = np.zeros(n_samples, dtype=np.int)
# Build the polytope whose vertices become cluster centroids
centroids = _generate_hypercube(n_clusters, n_informative,
generator).astype(float)
centroids *= 2 * class_sep
centroids -= class_sep
if not hypercube:
centroids *= generator.rand(n_clusters, 1)
centroids *= generator.rand(1, n_informative)
# Initially draw informative features from the standard normal
X[:, :n_informative] = generator.randn(n_samples, n_informative)
# Create each cluster; a variant of make_blobs
stop = 0
for k, centroid in enumerate(centroids):
start, stop = stop, stop + n_samples_per_cluster[k]
y[start:stop] = k % n_classes # assign labels
X_k = X[start:stop, :n_informative] # slice a view of the cluster
A = 2 * generator.rand(n_informative, n_informative) - 1
X_k[...] = np.dot(X_k, A) # introduce random covariance
X_k += centroid # shift the cluster to a vertex
# Create redundant features
if n_redundant > 0:
B = 2 * generator.rand(n_informative, n_redundant) - 1
X[:, n_informative:n_informative + n_redundant] = \
np.dot(X[:, :n_informative], B)
# Repeat some features
if n_repeated > 0:
n = n_informative + n_redundant
indices = ((n - 1) * generator.rand(n_repeated) + 0.5).astype(np.intp)
X[:, n:n + n_repeated] = X[:, indices]
# Fill useless features
if n_useless > 0:
X[:, -n_useless:] = generator.randn(n_samples, n_useless)
# Randomly replace labels
if flip_y >= 0.0:
flip_mask = generator.rand(n_samples) < flip_y
y[flip_mask] = generator.randint(n_classes, size=flip_mask.sum())
# Randomly shift and scale
if shift is None:
shift = (2 * generator.rand(n_features) - 1) * class_sep
X += shift
if scale is None:
scale = 1 + 100 * generator.rand(n_features)
X *= scale
if shuffle:
# Randomly permute samples
X, y = util_shuffle(X, y, random_state=generator)
# Randomly permute features
indices = np.arange(n_features)
generator.shuffle(indices)
X[:, :] = X[:, indices]
return X, y
def make_multilabel_classification(n_samples=100, n_features=20, n_classes=5,
n_labels=2, length=50, allow_unlabeled=True,
sparse=False, return_indicator='dense',
return_distributions=False,
random_state=None):
"""Generate a random multilabel classification problem.
For each sample, the generative process is:
- pick the number of labels: n ~ Poisson(n_labels)
- n times, choose a class c: c ~ Multinomial(theta)
- pick the document length: k ~ Poisson(length)
- k times, choose a word: w ~ Multinomial(theta_c)
In the above process, rejection sampling is used to make sure that
n is never zero or more than `n_classes`, and that the document length
is never zero. Likewise, we reject classes which have already been chosen.
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int, optional (default=100)
The number of samples.
n_features : int, optional (default=20)
The total number of features.
n_classes : int, optional (default=5)
The number of classes of the classification problem.
n_labels : int, optional (default=2)
The average number of labels per instance. More precisely, the number
of labels per sample is drawn from a Poisson distribution with
``n_labels`` as its expected value, but samples are bounded (using
rejection sampling) by ``n_classes``, and must be nonzero if
``allow_unlabeled`` is False.
length : int, optional (default=50)
The sum of the features (number of words if documents) is drawn from
a Poisson distribution with this expected value.
allow_unlabeled : bool, optional (default=True)
If ``True``, some instances might not belong to any class.
sparse : bool, optional (default=False)
If ``True``, return a sparse feature matrix
.. versionadded:: 0.17
parameter to allow *sparse* output.
return_indicator : 'dense' (default) | 'sparse' | False
If ``dense`` return ``Y`` in the dense binary indicator format. If
``'sparse'`` return ``Y`` in the sparse binary indicator format.
``False`` returns a list of lists of labels.
return_distributions : bool, optional (default=False)
If ``True``, return the prior class probability and conditional
probabilities of features given classes, from which the data was
drawn.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, n_features]
The generated samples.
Y : array or sparse CSR matrix of shape [n_samples, n_classes]
The label sets.
p_c : array, shape [n_classes]
The probability of each class being drawn. Only returned if
``return_distributions=True``.
p_w_c : array, shape [n_features, n_classes]
The probability of each feature being drawn given each class.
Only returned if ``return_distributions=True``.
"""
generator = check_random_state(random_state)
p_c = generator.rand(n_classes)
p_c /= p_c.sum()
cumulative_p_c = np.cumsum(p_c)
p_w_c = generator.rand(n_features, n_classes)
p_w_c /= np.sum(p_w_c, axis=0)
def sample_example():
_, n_classes = p_w_c.shape
# pick a nonzero number of labels per document by rejection sampling
y_size = n_classes + 1
while (not allow_unlabeled and y_size == 0) or y_size > n_classes:
y_size = generator.poisson(n_labels)
# pick n classes
y = set()
while len(y) != y_size:
# pick a class with probability P(c)
c = np.searchsorted(cumulative_p_c,
generator.rand(y_size - len(y)))
y.update(c)
y = list(y)
# pick a non-zero document length by rejection sampling
n_words = 0
while n_words == 0:
n_words = generator.poisson(length)
# generate a document of length n_words
if len(y) == 0:
# if sample does not belong to any class, generate noise word
words = generator.randint(n_features, size=n_words)
return words, y
# sample words with replacement from selected classes
cumulative_p_w_sample = p_w_c.take(y, axis=1).sum(axis=1).cumsum()
cumulative_p_w_sample /= cumulative_p_w_sample[-1]
words = np.searchsorted(cumulative_p_w_sample, generator.rand(n_words))
return words, y
X_indices = array.array('i')
X_indptr = array.array('i', [0])
Y = []
for i in range(n_samples):
words, y = sample_example()
X_indices.extend(words)
X_indptr.append(len(X_indices))
Y.append(y)
X_data = np.ones(len(X_indices), dtype=np.float64)
X = sp.csr_matrix((X_data, X_indices, X_indptr),
shape=(n_samples, n_features))
X.sum_duplicates()
if not sparse:
X = X.toarray()
# return_indicator can be True due to backward compatibility
if return_indicator in (True, 'sparse', 'dense'):
lb = MultiLabelBinarizer(sparse_output=(return_indicator == 'sparse'))
Y = lb.fit([range(n_classes)]).transform(Y)
elif return_indicator is not False:
raise ValueError("return_indicator must be either 'sparse', 'dense' "
'or False.')
if return_distributions:
return X, Y, p_c, p_w_c
return X, Y
def make_hastie_10_2(n_samples=12000, random_state=None):
"""Generates data for binary classification used in
Hastie et al. 2009, Example 10.2.
The ten features are standard independent Gaussian and
the target ``y`` is defined by::
y[i] = 1 if np.sum(X[i] ** 2) > 9.34 else -1
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int, optional (default=12000)
The number of samples.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, 10]
The input samples.
y : array of shape [n_samples]
The output values.
References
----------
.. [1] T. Hastie, R. Tibshirani and J. Friedman, "Elements of Statistical
Learning Ed. 2", Springer, 2009.
See also
--------
make_gaussian_quantiles: a generalization of this dataset approach
"""
rs = check_random_state(random_state)
shape = (n_samples, 10)
X = rs.normal(size=shape).reshape(shape)
y = ((X ** 2.0).sum(axis=1) > 9.34).astype(np.float64)
y[y == 0.0] = -1.0
return X, y
def make_regression(n_samples=100, n_features=100, n_informative=10,
n_targets=1, bias=0.0, effective_rank=None,
tail_strength=0.5, noise=0.0, shuffle=True, coef=False,
random_state=None):
"""Generate a random regression problem.
The input set can either be well conditioned (by default) or have a low
rank-fat tail singular profile. See :func:`make_low_rank_matrix` for
more details.
The output is generated by applying a (potentially biased) random linear
regression model with `n_informative` nonzero regressors to the previously
generated input and some gaussian centered noise with some adjustable
scale.
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int, optional (default=100)
The number of samples.
n_features : int, optional (default=100)
The number of features.
n_informative : int, optional (default=10)
The number of informative features, i.e., the number of features used
to build the linear model used to generate the output.
n_targets : int, optional (default=1)
The number of regression targets, i.e., the dimension of the y output
vector associated with a sample. By default, the output is a scalar.
bias : float, optional (default=0.0)
The bias term in the underlying linear model.
effective_rank : int or None, optional (default=None)
if not None:
The approximate number of singular vectors required to explain most
of the input data by linear combinations. Using this kind of
singular spectrum in the input allows the generator to reproduce
the correlations often observed in practice.
if None:
The input set is well conditioned, centered and gaussian with
unit variance.
tail_strength : float between 0.0 and 1.0, optional (default=0.5)
The relative importance of the fat noisy tail of the singular values
profile if `effective_rank` is not None.
noise : float, optional (default=0.0)
The standard deviation of the gaussian noise applied to the output.
shuffle : boolean, optional (default=True)
Shuffle the samples and the features.
coef : boolean, optional (default=False)
If True, the coefficients of the underlying linear model are returned.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, n_features]
The input samples.
y : array of shape [n_samples] or [n_samples, n_targets]
The output values.
coef : array of shape [n_features] or [n_features, n_targets], optional
The coefficient of the underlying linear model. It is returned only if
coef is True.
"""
n_informative = min(n_features, n_informative)
generator = check_random_state(random_state)
if effective_rank is None:
# Randomly generate a well conditioned input set
X = generator.randn(n_samples, n_features)
else:
# Randomly generate a low rank, fat tail input set
X = make_low_rank_matrix(n_samples=n_samples,
n_features=n_features,
effective_rank=effective_rank,
tail_strength=tail_strength,
random_state=generator)
# Generate a ground truth model with only n_informative features being non
# zeros (the other features are not correlated to y and should be ignored
# by a sparsifying regularizers such as L1 or elastic net)
ground_truth = np.zeros((n_features, n_targets))
ground_truth[:n_informative, :] = 100 * generator.rand(n_informative,
n_targets)
y = np.dot(X, ground_truth) + bias
# Add noise
if noise > 0.0:
y += generator.normal(scale=noise, size=y.shape)
# Randomly permute samples and features
if shuffle:
X, y = util_shuffle(X, y, random_state=generator)
indices = np.arange(n_features)
generator.shuffle(indices)
X[:, :] = X[:, indices]
ground_truth = ground_truth[indices]
y = np.squeeze(y)
if coef:
return X, y, np.squeeze(ground_truth)
else:
return X, y
def make_circles(n_samples=100, shuffle=True, noise=None, random_state=None,
factor=.8):
"""Make a large circle containing a smaller circle in 2d.
A simple toy dataset to visualize clustering and classification
algorithms.
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int, optional (default=100)
The total number of points generated.
shuffle : bool, optional (default=True)
Whether to shuffle the samples.
noise : double or None (default=None)
Standard deviation of Gaussian noise added to the data.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
factor : double < 1 (default=.8)
Scale factor between inner and outer circle.
Returns
-------
X : array of shape [n_samples, 2]
The generated samples.
y : array of shape [n_samples]
The integer labels (0 or 1) for class membership of each sample.
"""
if factor > 1 or factor < 0:
raise ValueError("'factor' has to be between 0 and 1.")
generator = check_random_state(random_state)
# so as not to have the first point = last point, we add one and then
# remove it.
linspace = np.linspace(0, 2 * np.pi, n_samples // 2 + 1)[:-1]
outer_circ_x = np.cos(linspace)
outer_circ_y = np.sin(linspace)
inner_circ_x = outer_circ_x * factor
inner_circ_y = outer_circ_y * factor
X = np.vstack((np.append(outer_circ_x, inner_circ_x),
np.append(outer_circ_y, inner_circ_y))).T
y = np.hstack([np.zeros(n_samples // 2, dtype=np.intp),
np.ones(n_samples // 2, dtype=np.intp)])
if shuffle:
X, y = util_shuffle(X, y, random_state=generator)
if noise is not None:
X += generator.normal(scale=noise, size=X.shape)
return X, y
def make_moons(n_samples=100, shuffle=True, noise=None, random_state=None):
"""Make two interleaving half circles
A simple toy dataset to visualize clustering and classification
algorithms. Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int, optional (default=100)
The total number of points generated.
shuffle : bool, optional (default=True)
Whether to shuffle the samples.
noise : double or None (default=None)
Standard deviation of Gaussian noise added to the data.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, 2]
The generated samples.
y : array of shape [n_samples]
The integer labels (0 or 1) for class membership of each sample.
"""
n_samples_out = n_samples // 2
n_samples_in = n_samples - n_samples_out
generator = check_random_state(random_state)
outer_circ_x = np.cos(np.linspace(0, np.pi, n_samples_out))
outer_circ_y = np.sin(np.linspace(0, np.pi, n_samples_out))
inner_circ_x = 1 - np.cos(np.linspace(0, np.pi, n_samples_in))
inner_circ_y = 1 - np.sin(np.linspace(0, np.pi, n_samples_in)) - .5
X = np.vstack((np.append(outer_circ_x, inner_circ_x),
np.append(outer_circ_y, inner_circ_y))).T
y = np.hstack([np.zeros(n_samples_out, dtype=np.intp),
np.ones(n_samples_in, dtype=np.intp)])
if shuffle:
X, y = util_shuffle(X, y, random_state=generator)
if noise is not None:
X += generator.normal(scale=noise, size=X.shape)
return X, y
def make_blobs(n_samples=100, n_features=2, centers=3, cluster_std=1.0,
center_box=(-10.0, 10.0), shuffle=True, random_state=None):
"""Generate isotropic Gaussian blobs for clustering.
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int, optional (default=100)
The total number of points equally divided among clusters.
n_features : int, optional (default=2)
The number of features for each sample.
centers : int or array of shape [n_centers, n_features], optional
(default=3)
The number of centers to generate, or the fixed center locations.
cluster_std : float or sequence of floats, optional (default=1.0)
The standard deviation of the clusters.
center_box : pair of floats (min, max), optional (default=(-10.0, 10.0))
The bounding box for each cluster center when centers are
generated at random.
shuffle : boolean, optional (default=True)
Shuffle the samples.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, n_features]
The generated samples.
y : array of shape [n_samples]
The integer labels for cluster membership of each sample.
Examples
--------
>>> from sklearn.datasets.samples_generator import make_blobs
>>> X, y = make_blobs(n_samples=10, centers=3, n_features=2,
... random_state=0)
>>> print(X.shape)
(10, 2)
>>> y
array([0, 0, 1, 0, 2, 2, 2, 1, 1, 0])
See also
--------
make_classification: a more intricate variant
"""
generator = check_random_state(random_state)
if isinstance(centers, numbers.Integral):
centers = generator.uniform(center_box[0], center_box[1],
size=(centers, n_features))
else:
centers = check_array(centers)
n_features = centers.shape[1]
if isinstance(cluster_std, numbers.Real):
cluster_std = np.ones(len(centers)) * cluster_std
X = []
y = []
n_centers = centers.shape[0]
n_samples_per_center = [int(n_samples // n_centers)] * n_centers
for i in range(n_samples % n_centers):
n_samples_per_center[i] += 1
for i, (n, std) in enumerate(zip(n_samples_per_center, cluster_std)):
X.append(centers[i] + generator.normal(scale=std,
size=(n, n_features)))
y += [i] * n
X = np.concatenate(X)
y = np.array(y)
if shuffle:
indices = np.arange(n_samples)
generator.shuffle(indices)
X = X[indices]
y = y[indices]
return X, y
def make_friedman1(n_samples=100, n_features=10, noise=0.0, random_state=None):
"""Generate the "Friedman \#1" regression problem
This dataset is described in Friedman [1] and Breiman [2].
Inputs `X` are independent features uniformly distributed on the interval
[0, 1]. The output `y` is created according to the formula::
y(X) = 10 * sin(pi * X[:, 0] * X[:, 1]) + 20 * (X[:, 2] - 0.5) ** 2 \
+ 10 * X[:, 3] + 5 * X[:, 4] + noise * N(0, 1).
Out of the `n_features` features, only 5 are actually used to compute
`y`. The remaining features are independent of `y`.
The number of features has to be >= 5.
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int, optional (default=100)
The number of samples.
n_features : int, optional (default=10)
The number of features. Should be at least 5.
noise : float, optional (default=0.0)
The standard deviation of the gaussian noise applied to the output.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, n_features]
The input samples.
y : array of shape [n_samples]
The output values.
References
----------
.. [1] J. Friedman, "Multivariate adaptive regression splines", The Annals
of Statistics 19 (1), pages 1-67, 1991.
.. [2] L. Breiman, "Bagging predictors", Machine Learning 24,
pages 123-140, 1996.
"""
if n_features < 5:
raise ValueError("n_features must be at least five.")
generator = check_random_state(random_state)
X = generator.rand(n_samples, n_features)
y = 10 * np.sin(np.pi * X[:, 0] * X[:, 1]) + 20 * (X[:, 2] - 0.5) ** 2 \
+ 10 * X[:, 3] + 5 * X[:, 4] + noise * generator.randn(n_samples)
return X, y
def make_friedman2(n_samples=100, noise=0.0, random_state=None):
"""Generate the "Friedman \#2" regression problem
This dataset is described in Friedman [1] and Breiman [2].
Inputs `X` are 4 independent features uniformly distributed on the
intervals::
0 <= X[:, 0] <= 100,
40 * pi <= X[:, 1] <= 560 * pi,
0 <= X[:, 2] <= 1,
1 <= X[:, 3] <= 11.
The output `y` is created according to the formula::
y(X) = (X[:, 0] ** 2 + (X[:, 1] * X[:, 2] \
- 1 / (X[:, 1] * X[:, 3])) ** 2) ** 0.5 + noise * N(0, 1).
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int, optional (default=100)
The number of samples.
noise : float, optional (default=0.0)
The standard deviation of the gaussian noise applied to the output.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, 4]
The input samples.
y : array of shape [n_samples]
The output values.
References
----------
.. [1] J. Friedman, "Multivariate adaptive regression splines", The Annals
of Statistics 19 (1), pages 1-67, 1991.
.. [2] L. Breiman, "Bagging predictors", Machine Learning 24,
pages 123-140, 1996.
"""
generator = check_random_state(random_state)
X = generator.rand(n_samples, 4)
X[:, 0] *= 100
X[:, 1] *= 520 * np.pi
X[:, 1] += 40 * np.pi
X[:, 3] *= 10
X[:, 3] += 1
y = (X[:, 0] ** 2
+ (X[:, 1] * X[:, 2] - 1 / (X[:, 1] * X[:, 3])) ** 2) ** 0.5 \
+ noise * generator.randn(n_samples)
return X, y
def make_friedman3(n_samples=100, noise=0.0, random_state=None):
"""Generate the "Friedman \#3" regression problem
This dataset is described in Friedman [1] and Breiman [2].
Inputs `X` are 4 independent features uniformly distributed on the
intervals::
0 <= X[:, 0] <= 100,
40 * pi <= X[:, 1] <= 560 * pi,
0 <= X[:, 2] <= 1,
1 <= X[:, 3] <= 11.
The output `y` is created according to the formula::
y(X) = arctan((X[:, 1] * X[:, 2] - 1 / (X[:, 1] * X[:, 3])) \
/ X[:, 0]) + noise * N(0, 1).
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int, optional (default=100)
The number of samples.
noise : float, optional (default=0.0)
The standard deviation of the gaussian noise applied to the output.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, 4]
The input samples.
y : array of shape [n_samples]
The output values.
References
----------
.. [1] J. Friedman, "Multivariate adaptive regression splines", The Annals
of Statistics 19 (1), pages 1-67, 1991.
.. [2] L. Breiman, "Bagging predictors", Machine Learning 24,
pages 123-140, 1996.
"""
generator = check_random_state(random_state)
X = generator.rand(n_samples, 4)
X[:, 0] *= 100
X[:, 1] *= 520 * np.pi
X[:, 1] += 40 * np.pi
X[:, 3] *= 10
X[:, 3] += 1
y = np.arctan((X[:, 1] * X[:, 2] - 1 / (X[:, 1] * X[:, 3])) / X[:, 0]) \
+ noise * generator.randn(n_samples)
return X, y
def make_low_rank_matrix(n_samples=100, n_features=100, effective_rank=10,
tail_strength=0.5, random_state=None):
"""Generate a mostly low rank matrix with bell-shaped singular values
Most of the variance can be explained by a bell-shaped curve of width
effective_rank: the low rank part of the singular values profile is::
(1 - tail_strength) * exp(-1.0 * (i / effective_rank) ** 2)
The remaining singular values' tail is fat, decreasing as::
tail_strength * exp(-0.1 * i / effective_rank).
The low rank part of the profile can be considered the structured
signal part of the data while the tail can be considered the noisy
part of the data that cannot be summarized by a low number of linear
components (singular vectors).
This kind of singular profiles is often seen in practice, for instance:
- gray level pictures of faces
- TF-IDF vectors of text documents crawled from the web
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int, optional (default=100)
The number of samples.
n_features : int, optional (default=100)
The number of features.
effective_rank : int, optional (default=10)
The approximate number of singular vectors required to explain most of
the data by linear combinations.
tail_strength : float between 0.0 and 1.0, optional (default=0.5)
The relative importance of the fat noisy tail of the singular values
profile.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, n_features]
The matrix.
"""
generator = check_random_state(random_state)
n = min(n_samples, n_features)
# Random (ortho normal) vectors
u, _ = linalg.qr(generator.randn(n_samples, n), mode='economic')
v, _ = linalg.qr(generator.randn(n_features, n), mode='economic')
# Index of the singular values
singular_ind = np.arange(n, dtype=np.float64)
# Build the singular profile by assembling signal and noise components
low_rank = ((1 - tail_strength) *
np.exp(-1.0 * (singular_ind / effective_rank) ** 2))
tail = tail_strength * np.exp(-0.1 * singular_ind / effective_rank)
s = np.identity(n) * (low_rank + tail)
return np.dot(np.dot(u, s), v.T)
def make_sparse_coded_signal(n_samples, n_components, n_features,
n_nonzero_coefs, random_state=None):
"""Generate a signal as a sparse combination of dictionary elements.
Returns a matrix Y = DX, such as D is (n_features, n_components),
X is (n_components, n_samples) and each column of X has exactly
n_nonzero_coefs non-zero elements.
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int
number of samples to generate
n_components : int,
number of components in the dictionary
n_features : int
number of features of the dataset to generate
n_nonzero_coefs : int
number of active (non-zero) coefficients in each sample
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
data : array of shape [n_features, n_samples]
The encoded signal (Y).
dictionary : array of shape [n_features, n_components]
The dictionary with normalized components (D).
code : array of shape [n_components, n_samples]
The sparse code such that each column of this matrix has exactly
n_nonzero_coefs non-zero items (X).
"""
generator = check_random_state(random_state)
# generate dictionary
D = generator.randn(n_features, n_components)
D /= np.sqrt(np.sum((D ** 2), axis=0))
# generate code
X = np.zeros((n_components, n_samples))
for i in range(n_samples):
idx = np.arange(n_components)
generator.shuffle(idx)
idx = idx[:n_nonzero_coefs]
X[idx, i] = generator.randn(n_nonzero_coefs)
# encode signal
Y = np.dot(D, X)
return map(np.squeeze, (Y, D, X))
def make_sparse_uncorrelated(n_samples=100, n_features=10, random_state=None):
"""Generate a random regression problem with sparse uncorrelated design
This dataset is described in Celeux et al [1]. as::
X ~ N(0, 1)
y(X) = X[:, 0] + 2 * X[:, 1] - 2 * X[:, 2] - 1.5 * X[:, 3]
Only the first 4 features are informative. The remaining features are
useless.
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int, optional (default=100)
The number of samples.
n_features : int, optional (default=10)
The number of features.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, n_features]
The input samples.
y : array of shape [n_samples]
The output values.
References
----------
.. [1] G. Celeux, M. El Anbari, J.-M. Marin, C. P. Robert,
"Regularization in regression: comparing Bayesian and frequentist
methods in a poorly informative situation", 2009.
"""
generator = check_random_state(random_state)
X = generator.normal(loc=0, scale=1, size=(n_samples, n_features))
y = generator.normal(loc=(X[:, 0] +
2 * X[:, 1] -
2 * X[:, 2] -
1.5 * X[:, 3]), scale=np.ones(n_samples))
return X, y
def make_spd_matrix(n_dim, random_state=None):
"""Generate a random symmetric, positive-definite matrix.
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_dim : int
The matrix dimension.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_dim, n_dim]
The random symmetric, positive-definite matrix.
See also
--------
make_sparse_spd_matrix
"""
generator = check_random_state(random_state)
A = generator.rand(n_dim, n_dim)
U, s, V = linalg.svd(np.dot(A.T, A))
X = np.dot(np.dot(U, 1.0 + np.diag(generator.rand(n_dim))), V)
return X
def make_sparse_spd_matrix(dim=1, alpha=0.95, norm_diag=False,
smallest_coef=.1, largest_coef=.9,
random_state=None):
"""Generate a sparse symmetric definite positive matrix.
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
dim : integer, optional (default=1)
The size of the random matrix to generate.
alpha : float between 0 and 1, optional (default=0.95)
The probability that a coefficient is zero (see notes). Larger values
enforce more sparsity.
norm_diag : boolean, optional (default=False)
Whether to normalize the output matrix to make the leading diagonal
elements all 1
smallest_coef : float between 0 and 1, optional (default=0.1)
The value of the smallest coefficient.
largest_coef : float between 0 and 1, optional (default=0.9)
The value of the largest coefficient.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
prec : sparse matrix of shape (dim, dim)
The generated matrix.
Notes
-----
The sparsity is actually imposed on the cholesky factor of the matrix.
Thus alpha does not translate directly into the filling fraction of
the matrix itself.
See also
--------
make_spd_matrix
"""
random_state = check_random_state(random_state)
chol = -np.eye(dim)
aux = random_state.rand(dim, dim)
aux[aux < alpha] = 0
aux[aux > alpha] = (smallest_coef
+ (largest_coef - smallest_coef)
* random_state.rand(np.sum(aux > alpha)))
aux = np.tril(aux, k=-1)
# Permute the lines: we don't want to have asymmetries in the final
# SPD matrix
permutation = random_state.permutation(dim)
aux = aux[permutation].T[permutation]
chol += aux
prec = np.dot(chol.T, chol)
if norm_diag:
# Form the diagonal vector into a row matrix
d = np.diag(prec).reshape(1, prec.shape[0])
d = 1. / np.sqrt(d)
prec *= d
prec *= d.T
return prec
def make_swiss_roll(n_samples=100, noise=0.0, random_state=None):
"""Generate a swiss roll dataset.
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int, optional (default=100)
The number of sample points on the S curve.
noise : float, optional (default=0.0)
The standard deviation of the gaussian noise.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, 3]
The points.
t : array of shape [n_samples]
The univariate position of the sample according to the main dimension
of the points in the manifold.
Notes
-----
The algorithm is from Marsland [1].
References
----------
.. [1] S. Marsland, "Machine Learning: An Algorithmic Perspective",
Chapter 10, 2009.
http://seat.massey.ac.nz/personal/s.r.marsland/Code/10/lle.py
"""
generator = check_random_state(random_state)
t = 1.5 * np.pi * (1 + 2 * generator.rand(1, n_samples))
x = t * np.cos(t)
y = 21 * generator.rand(1, n_samples)
z = t * np.sin(t)
X = np.concatenate((x, y, z))
X += noise * generator.randn(3, n_samples)
X = X.T
t = np.squeeze(t)
return X, t
def make_s_curve(n_samples=100, noise=0.0, random_state=None):
"""Generate an S curve dataset.
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int, optional (default=100)
The number of sample points on the S curve.
noise : float, optional (default=0.0)
The standard deviation of the gaussian noise.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, 3]
The points.
t : array of shape [n_samples]
The univariate position of the sample according to the main dimension
of the points in the manifold.
"""
generator = check_random_state(random_state)
t = 3 * np.pi * (generator.rand(1, n_samples) - 0.5)
x = np.sin(t)
y = 2.0 * generator.rand(1, n_samples)
z = np.sign(t) * (np.cos(t) - 1)
X = np.concatenate((x, y, z))
X += noise * generator.randn(3, n_samples)
X = X.T
t = np.squeeze(t)
return X, t
def make_gaussian_quantiles(mean=None, cov=1., n_samples=100,
n_features=2, n_classes=3,
shuffle=True, random_state=None):
"""Generate isotropic Gaussian and label samples by quantile
This classification dataset is constructed by taking a multi-dimensional
standard normal distribution and defining classes separated by nested
concentric multi-dimensional spheres such that roughly equal numbers of
samples are in each class (quantiles of the :math:`\chi^2` distribution).
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
mean : array of shape [n_features], optional (default=None)
The mean of the multi-dimensional normal distribution.
If None then use the origin (0, 0, ...).
cov : float, optional (default=1.)
The covariance matrix will be this value times the unit matrix. This
dataset only produces symmetric normal distributions.
n_samples : int, optional (default=100)
The total number of points equally divided among classes.
n_features : int, optional (default=2)
The number of features for each sample.
n_classes : int, optional (default=3)
The number of classes
shuffle : boolean, optional (default=True)
Shuffle the samples.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, n_features]
The generated samples.
y : array of shape [n_samples]
The integer labels for quantile membership of each sample.
Notes
-----
The dataset is from Zhu et al [1].
References
----------
.. [1] J. Zhu, H. Zou, S. Rosset, T. Hastie, "Multi-class AdaBoost", 2009.
"""
if n_samples < n_classes:
raise ValueError("n_samples must be at least n_classes")
generator = check_random_state(random_state)
if mean is None:
mean = np.zeros(n_features)
else:
mean = np.array(mean)
# Build multivariate normal distribution
X = generator.multivariate_normal(mean, cov * np.identity(n_features),
(n_samples,))
# Sort by distance from origin
idx = np.argsort(np.sum((X - mean[np.newaxis, :]) ** 2, axis=1))
X = X[idx, :]
# Label by quantile
step = n_samples // n_classes
y = np.hstack([np.repeat(np.arange(n_classes), step),
np.repeat(n_classes - 1, n_samples - step * n_classes)])
if shuffle:
X, y = util_shuffle(X, y, random_state=generator)
return X, y
def _shuffle(data, random_state=None):
generator = check_random_state(random_state)
n_rows, n_cols = data.shape
row_idx = generator.permutation(n_rows)
col_idx = generator.permutation(n_cols)
result = data[row_idx][:, col_idx]
return result, row_idx, col_idx
def make_biclusters(shape, n_clusters, noise=0.0, minval=10,
maxval=100, shuffle=True, random_state=None):
"""Generate an array with constant block diagonal structure for
biclustering.
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
shape : iterable (n_rows, n_cols)
The shape of the result.
n_clusters : integer
The number of biclusters.
noise : float, optional (default=0.0)
The standard deviation of the gaussian noise.
minval : int, optional (default=10)
Minimum value of a bicluster.
maxval : int, optional (default=100)
Maximum value of a bicluster.
shuffle : boolean, optional (default=True)
Shuffle the samples.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape `shape`
The generated array.
rows : array of shape (n_clusters, X.shape[0],)
The indicators for cluster membership of each row.
cols : array of shape (n_clusters, X.shape[1],)
The indicators for cluster membership of each column.
References
----------
.. [1] Dhillon, I. S. (2001, August). Co-clustering documents and
words using bipartite spectral graph partitioning. In Proceedings
of the seventh ACM SIGKDD international conference on Knowledge
discovery and data mining (pp. 269-274). ACM.
See also
--------
make_checkerboard
"""
generator = check_random_state(random_state)
n_rows, n_cols = shape
consts = generator.uniform(minval, maxval, n_clusters)
# row and column clusters of approximately equal sizes
row_sizes = generator.multinomial(n_rows,
np.repeat(1.0 / n_clusters,
n_clusters))
col_sizes = generator.multinomial(n_cols,
np.repeat(1.0 / n_clusters,
n_clusters))
row_labels = np.hstack(list(np.repeat(val, rep) for val, rep in
zip(range(n_clusters), row_sizes)))
col_labels = np.hstack(list(np.repeat(val, rep) for val, rep in
zip(range(n_clusters), col_sizes)))
result = np.zeros(shape, dtype=np.float64)
for i in range(n_clusters):
selector = np.outer(row_labels == i, col_labels == i)
result[selector] += consts[i]
if noise > 0:
result += generator.normal(scale=noise, size=result.shape)
if shuffle:
result, row_idx, col_idx = _shuffle(result, random_state)
row_labels = row_labels[row_idx]
col_labels = col_labels[col_idx]
rows = np.vstack(row_labels == c for c in range(n_clusters))
cols = np.vstack(col_labels == c for c in range(n_clusters))
return result, rows, cols
def make_checkerboard(shape, n_clusters, noise=0.0, minval=10,
maxval=100, shuffle=True, random_state=None):
"""Generate an array with block checkerboard structure for
biclustering.
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
shape : iterable (n_rows, n_cols)
The shape of the result.
n_clusters : integer or iterable (n_row_clusters, n_column_clusters)
The number of row and column clusters.
noise : float, optional (default=0.0)
The standard deviation of the gaussian noise.
minval : int, optional (default=10)
Minimum value of a bicluster.
maxval : int, optional (default=100)
Maximum value of a bicluster.
shuffle : boolean, optional (default=True)
Shuffle the samples.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape `shape`
The generated array.
rows : array of shape (n_clusters, X.shape[0],)
The indicators for cluster membership of each row.
cols : array of shape (n_clusters, X.shape[1],)
The indicators for cluster membership of each column.
References
----------
.. [1] Kluger, Y., Basri, R., Chang, J. T., & Gerstein, M. (2003).
Spectral biclustering of microarray data: coclustering genes
and conditions. Genome research, 13(4), 703-716.
See also
--------
make_biclusters
"""
generator = check_random_state(random_state)
if hasattr(n_clusters, "__len__"):
n_row_clusters, n_col_clusters = n_clusters
else:
n_row_clusters = n_col_clusters = n_clusters
# row and column clusters of approximately equal sizes
n_rows, n_cols = shape
row_sizes = generator.multinomial(n_rows,
np.repeat(1.0 / n_row_clusters,
n_row_clusters))
col_sizes = generator.multinomial(n_cols,
np.repeat(1.0 / n_col_clusters,
n_col_clusters))
row_labels = np.hstack(list(np.repeat(val, rep) for val, rep in
zip(range(n_row_clusters), row_sizes)))
col_labels = np.hstack(list(np.repeat(val, rep) for val, rep in
zip(range(n_col_clusters), col_sizes)))
result = np.zeros(shape, dtype=np.float64)
for i in range(n_row_clusters):
for j in range(n_col_clusters):
selector = np.outer(row_labels == i, col_labels == j)
result[selector] += generator.uniform(minval, maxval)
if noise > 0:
result += generator.normal(scale=noise, size=result.shape)
if shuffle:
result, row_idx, col_idx = _shuffle(result, random_state)
row_labels = row_labels[row_idx]
col_labels = col_labels[col_idx]
rows = np.vstack(row_labels == label
for label in range(n_row_clusters)
for _ in range(n_col_clusters))
cols = np.vstack(col_labels == label
for _ in range(n_row_clusters)
for label in range(n_col_clusters))
return result, rows, cols
| mit |
waqasbhatti/astrobase | astrobase/lcproc/catalogs.py | 2 | 57723 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
# catalogs.py - Waqas Bhatti (wbhatti@astro.princeton.edu) - Feb 2019
'''
This contains functions to generate light curve catalogs from collections of
light curves.
'''
#############
## LOGGING ##
#############
import logging
from astrobase import log_sub, log_fmt, log_date_fmt
DEBUG = False
if DEBUG:
level = logging.DEBUG
else:
level = logging.INFO
LOGGER = logging.getLogger(__name__)
logging.basicConfig(
level=level,
style=log_sub,
format=log_fmt,
datefmt=log_date_fmt,
)
LOGDEBUG = LOGGER.debug
LOGINFO = LOGGER.info
LOGWARNING = LOGGER.warning
LOGERROR = LOGGER.error
LOGEXCEPTION = LOGGER.exception
#############
## IMPORTS ##
#############
import pickle
import os
import os.path
import glob
import shutil
import multiprocessing as mp
from concurrent.futures import ProcessPoolExecutor
import numpy as np
import numpy.random as npr
npr.seed(0xc0ffee)
import scipy.spatial as sps
import astropy.io.fits as pyfits
from astropy.wcs import WCS
from astropy.visualization import ZScaleInterval, LinearStretch
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
try:
from tqdm import tqdm
TQDM = True
except Exception:
TQDM = False
pass
# to turn a list of keys into a dict address
# from https://stackoverflow.com/a/14692747
from functools import reduce
from operator import getitem
def _dict_get(datadict, keylist):
return reduce(getitem, keylist, datadict)
############
## CONFIG ##
############
NCPUS = mp.cpu_count()
# these translate filter operators given as strings to Python operators
FILTEROPS = {'eq':'==',
'gt':'>',
'ge':'>=',
'lt':'<',
'le':'<=',
'ne':'!='}
###################
## LOCAL IMPORTS ##
###################
from astrobase.plotbase import fits_finder_chart
from astrobase.cpserver.checkplotlist import checkplot_infokey_worker
from astrobase.lcproc import get_lcformat
#####################################################
## FUNCTIONS TO GENERATE OBJECT CATALOGS (LCLISTS) ##
#####################################################
def _lclist_parallel_worker(task):
'''This is a parallel worker for makelclist.
Parameters
----------
task : tuple
This is a tuple containing the following items:
task[0] = lcf
task[1] = columns
task[2] = lcformat
task[3] = lcformatdir
task[4] = lcndetkey
Returns
-------
dict or None
This contains all of the info for the object processed in this LC read
operation. If this fails, returns None
'''
lcf, columns, lcformat, lcformatdir, lcndetkey = task
# get the bits needed for lcformat handling
# NOTE: we re-import things in this worker function because sometimes
# functions can't be pickled correctly for passing them to worker functions
# in a processing pool
try:
formatinfo = get_lcformat(lcformat,
use_lcformat_dir=lcformatdir)
if formatinfo:
(dfileglob, readerfunc,
dtimecols, dmagcols, derrcols,
magsarefluxes, normfunc) = formatinfo
else:
LOGERROR("can't figure out the light curve format")
return None
except Exception:
LOGEXCEPTION("can't figure out the light curve format")
return None
# we store the full path of the light curve
lcobjdict = {'lcfname':os.path.abspath(lcf)}
try:
# read the light curve in
lcdict = readerfunc(lcf)
# this should handle lists/tuples being returned by readerfunc
# we assume that the first element is the actual lcdict
# FIXME: figure out how to not need this assumption
if ( (isinstance(lcdict, (list, tuple))) and
(isinstance(lcdict[0], dict)) ):
lcdict = lcdict[0]
# insert all of the columns
for colkey in columns:
if '.' in colkey:
getkey = colkey.split('.')
else:
getkey = [colkey]
try:
thiscolval = _dict_get(lcdict, getkey)
except Exception:
LOGWARNING('column %s does not exist for %s' %
(colkey, lcf))
thiscolval = np.nan
# update the lcobjdict with this value
lcobjdict[getkey[-1]] = thiscolval
except Exception:
LOGEXCEPTION('could not figure out columns for %s' % lcf)
# insert all of the columns as nans
for colkey in columns:
if '.' in colkey:
getkey = colkey.split('.')
else:
getkey = [colkey]
thiscolval = np.nan
# update the lclistdict with this value
lcobjdict[getkey[-1]] = thiscolval
# now get the actual ndets; this excludes nans and infs
for dk in lcndetkey:
try:
if '.' in dk:
getdk = dk.split('.')
else:
getdk = [dk]
ndetcol = _dict_get(lcdict, getdk)
actualndets = ndetcol[np.isfinite(ndetcol)].size
lcobjdict['%s.ndet' % getdk[-1]] = actualndets
except Exception:
lcobjdict['%s.ndet' % getdk[-1]] = np.nan
return lcobjdict
def make_lclist(basedir,
outfile,
use_list_of_filenames=None,
lcformat='hat-sql',
lcformatdir=None,
fileglob=None,
recursive=True,
columns=('objectid',
'objectinfo.ra',
'objectinfo.decl',
'objectinfo.ndet'),
makecoordindex=('objectinfo.ra','objectinfo.decl'),
field_fitsfile=None,
field_wcsfrom=None,
field_scale=ZScaleInterval(),
field_stretch=LinearStretch(),
field_colormap=plt.cm.gray_r,
field_findersize=None,
field_pltopts={'marker':'o',
'markersize':10.0,
'markerfacecolor':'none',
'markeredgewidth':2.0,
'markeredgecolor':'red'},
field_grid=False,
field_gridcolor='k',
field_zoomcontain=True,
maxlcs=None,
nworkers=NCPUS):
'''This generates a light curve catalog for all light curves in a directory.
Given a base directory where all the files are, and a light curve format,
this will find all light curves, pull out the keys in each lcdict requested
in the `columns` kwarg for each object, and write them to the requested
output pickle file. These keys should be pointers to scalar values
(i.e. something like `objectinfo.ra` is OK, but something like 'times' won't
work because it's a vector).
Generally, this works with light curve reading functions that produce
lcdicts as detailed in the docstring for `lcproc.register_lcformat`. Once
you've registered your light curve reader functions using the
`lcproc.register_lcformat` function, pass in the `formatkey` associated with
your light curve format, and this function will be able to read all light
curves in that format as well as the object information stored in their
`objectinfo` dict.
Parameters
----------
basedir : str or list of str
If this is a str, points to a single directory to search for light
curves. If this is a list of str, it must be a list of directories to
search for light curves. All of these will be searched to find light
curve files matching either your light curve format's default fileglob
(when you registered your LC format), or a specific fileglob that you
can pass in using the `fileglob` kwargh here. If the `recursive` kwarg
is set, the provided directories will be searched recursively.
If `use_list_of_filenames` is not None, it will override this argument
and the function will take those light curves as the list of files it
must process instead of whatever is specified in `basedir`.
outfile : str
This is the name of the output file to write. This will be a pickle
file, so a good convention to use for this name is something like
'my-lightcurve-catalog.pkl'.
use_list_of_filenames : list of str or None
Use this kwarg to override whatever is provided in `basedir` and
directly pass in a list of light curve files to process. This can speed
up this function by a lot because no searches on disk will be performed
to find light curve files matching `basedir` and `fileglob`.
lcformat : str
This is the `formatkey` associated with your light curve format, which
you previously passed in to the `lcproc.register_lcformat`
function. This will be used to look up how to find and read the light
curves specified in `basedir` or `use_list_of_filenames`.
lcformatdir : str or None
If this is provided, gives the path to a directory when you've stored
your lcformat description JSONs, other than the usual directories lcproc
knows to search for them in. Use this along with `lcformat` to specify
an LC format JSON file that's not currently registered with lcproc.
fileglob : str or None
If provided, is a string that is a valid UNIX filename glob. Used to
override the default fileglob for this LC format when searching for
light curve files in `basedir`.
recursive : bool
If True, the directories specified in `basedir` will be searched
recursively for all light curve files that match the default fileglob
for this LC format or a specific one provided in `fileglob`.
columns : list of str
This is a list of keys in the lcdict produced by your light curve reader
function that contain object information, which will be extracted and
put into the output light curve catalog. It's highly recommended that
your LC reader function produce a lcdict that contains at least the
default keys shown here.
The lcdict keys to extract are specified by using an address scheme:
- First level dict keys can be specified directly:
e.g., 'objectid' will extract lcdict['objectid']
- Keys at other levels can be specified by using a period to indicate
the level:
- e.g., 'objectinfo.ra' will extract lcdict['objectinfo']['ra']
- e.g., 'objectinfo.varinfo.features.stetsonj' will extract
lcdict['objectinfo']['varinfo']['features']['stetsonj']
makecoordindex : list of two str or None
This is used to specify which lcdict keys contain the right ascension
and declination coordinates for this object. If these are provided, the
output light curve catalog will have a kdtree built on all object
coordinates, which enables fast spatial searches and cross-matching to
external catalogs by `checkplot` and `lcproc` functions.
field_fitsfile : str or None
If this is not None, it should be the path to a FITS image containing
the objects these light curves are for. If this is provided,
`make_lclist` will use the WCS information in the FITS itself if
`field_wcsfrom` is None (or from a WCS header file pointed to by
`field_wcsfrom`) to obtain x and y pixel coordinates for all of the
objects in the field. A finder chart will also be made using
`astrobase.plotbase.fits_finder_chart` using the corresponding
`field_scale`, `_stretch`, `_colormap`, `_findersize`, `_pltopts`,
`_grid`, and `_gridcolors` kwargs for that function, reproduced here to
enable customization of the finder chart plot.
field_wcsfrom : str or None
If `wcsfrom` is None, the WCS to transform the RA/Dec to pixel x/y will
be taken from the FITS header of `fitsfile`. If this is not None, it
must be a FITS or similar file that contains a WCS header in its first
extension.
field_scale : astropy.visualization.Interval object
`scale` sets the normalization for the FITS pixel values. This is an
astropy.visualization Interval object.
See http://docs.astropy.org/en/stable/visualization/normalization.html
for details on `scale` and `stretch` objects.
field_stretch : astropy.visualization.Stretch object
`stretch` sets the stretch function for mapping FITS pixel values to
output pixel values. This is an astropy.visualization Stretch object.
See http://docs.astropy.org/en/stable/visualization/normalization.html
for details on `scale` and `stretch` objects.
field_colormap : matplotlib Colormap object
`colormap` is a matplotlib color map object to use for the output image.
field_findersize : None or tuple of two ints
If `findersize` is None, the output image size will be set by the NAXIS1
and NAXIS2 keywords in the input `fitsfile` FITS header. Otherwise,
`findersize` must be a tuple with the intended x and y size of the image
in inches (all output images will use a DPI = 100).
field_pltopts : dict
`field_pltopts` controls how the overlay points will be plotted. This
a dict with standard matplotlib marker, etc. kwargs as key-val pairs,
e.g. 'markersize', 'markerfacecolor', etc. The default options make red
outline circles at the location of each object in the overlay.
field_grid : bool
`grid` sets if a grid will be made on the output image.
field_gridcolor : str
`gridcolor` sets the color of the grid lines. This is a usual matplotib
color spec string.
field_zoomcontain : bool
`field_zoomcontain` controls if the finder chart will be zoomed to
just contain the overlayed points. Everything outside the footprint of
these points will be discarded.
maxlcs : int or None
This sets how many light curves to process in the input LC list
generated by searching for LCs in `basedir` or in the list provided as
`use_list_of_filenames`.
nworkers : int
This sets the number of parallel workers to launch to collect
information from the light curves.
Returns
-------
str
Returns the path to the generated light curve catalog pickle file.
'''
try:
formatinfo = get_lcformat(lcformat,
use_lcformat_dir=lcformatdir)
if formatinfo:
(dfileglob, readerfunc,
dtimecols, dmagcols, derrcols,
magsarefluxes, normfunc) = formatinfo
else:
LOGERROR("can't figure out the light curve format")
return None
except Exception:
LOGEXCEPTION("can't figure out the light curve format")
return None
if not fileglob:
fileglob = dfileglob
# this is to get the actual ndet
# set to the magnitudes column
lcndetkey = dmagcols
if isinstance(use_list_of_filenames, list):
matching = use_list_of_filenames
else:
# handle the case where basedir is a list of directories
if isinstance(basedir, list):
matching = []
for bdir in basedir:
# now find the files
LOGINFO('searching for %s light curves in %s ...' % (lcformat,
bdir))
if recursive is False:
matching.extend(glob.glob(os.path.join(bdir, fileglob)))
else:
matching.extend(glob.glob(os.path.join(bdir,
'**',
fileglob),
recursive=True))
# otherwise, handle the usual case of one basedir to search in
else:
# now find the files
LOGINFO('searching for %s light curves in %s ...' %
(lcformat, basedir))
if recursive is False:
matching = glob.glob(os.path.join(basedir, fileglob))
else:
matching = glob.glob(os.path.join(basedir,
'**',
fileglob),recursive=True)
#
# now that we have all the files, process them
#
if matching and len(matching) > 0:
LOGINFO('found %s light curves' % len(matching))
# cut down matching to maxlcs
if maxlcs:
matching = matching[:maxlcs]
# prepare the output dict
lclistdict = {
'basedir':basedir,
'lcformat':lcformat,
'fileglob':fileglob,
'recursive':recursive,
'columns':columns,
'makecoordindex':makecoordindex,
'nfiles':len(matching),
'objects': {
}
}
# columns that will always be present in the output lclistdict
derefcols = ['lcfname']
derefcols.extend(['%s.ndet' % x.split('.')[-1] for x in lcndetkey])
for dc in derefcols:
lclistdict['objects'][dc] = []
# fill in the rest of the lclist columns from the columns kwarg
for col in columns:
# dereference the column
thiscol = col.split('.')
thiscol = thiscol[-1]
lclistdict['objects'][thiscol] = []
derefcols.append(thiscol)
# start collecting info
LOGINFO('collecting light curve info...')
tasks = [(x, columns, lcformat, lcformatdir, lcndetkey)
for x in matching]
with ProcessPoolExecutor(max_workers=nworkers) as executor:
results = executor.map(_lclist_parallel_worker, tasks)
results = list(results)
# update the columns in the overall dict from the results of the
# parallel map
for result in results:
for xcol in derefcols:
lclistdict['objects'][xcol].append(result[xcol])
executor.shutdown()
# done with collecting info
# turn all of the lists in the lclistdict into arrays
for col in lclistdict['objects']:
lclistdict['objects'][col] = np.array(lclistdict['objects'][col])
# handle duplicate objectids with different light curves
uniques, counts = np.unique(lclistdict['objects']['objectid'],
return_counts=True)
duplicated_objectids = uniques[counts > 1]
if duplicated_objectids.size > 0:
# redo the objectid array so it has a bit larger dtype so the extra
# tag can fit into the field
dt = lclistdict['objects']['objectid'].dtype.str
dt = '<U%s' % (
int(dt.replace('<','').replace('U','').replace('S','')) + 3
)
lclistdict['objects']['objectid'] = np.array(
lclistdict['objects']['objectid'],
dtype=dt
)
for objid in duplicated_objectids:
objid_inds = np.where(
lclistdict['objects']['objectid'] == objid
)
# mark the duplicates, assume the first instance is the actual
# one
for ncounter, nind in enumerate(objid_inds[0][1:]):
lclistdict['objects']['objectid'][nind] = '%s-%s' % (
lclistdict['objects']['objectid'][nind],
ncounter+2
)
LOGWARNING(
'tagging duplicated instance %s of objectid: '
'%s as %s-%s, lightcurve: %s' %
(ncounter+2, objid, objid, ncounter+2,
lclistdict['objects']['lcfname'][nind])
)
# if we're supposed to make a spatial index, do so
if (makecoordindex and
isinstance(makecoordindex, (list, tuple)) and
len(makecoordindex) == 2):
try:
# deref the column names
racol, declcol = makecoordindex
racol = racol.split('.')[-1]
declcol = declcol.split('.')[-1]
# get the ras and decls
objra, objdecl = (lclistdict['objects'][racol],
lclistdict['objects'][declcol])
# get the xyz unit vectors from ra,decl
# since i had to remind myself:
# https://en.wikipedia.org/wiki/Equatorial_coordinate_system
cosdecl = np.cos(np.radians(objdecl))
sindecl = np.sin(np.radians(objdecl))
cosra = np.cos(np.radians(objra))
sinra = np.sin(np.radians(objra))
xyz = np.column_stack((cosra*cosdecl,sinra*cosdecl, sindecl))
# generate the kdtree
kdt = sps.cKDTree(xyz,copy_data=True)
# put the tree into the dict
lclistdict['kdtree'] = kdt
LOGINFO('kdtree generated for (ra, decl): (%s, %s)' %
(makecoordindex[0], makecoordindex[1]))
except Exception:
LOGEXCEPTION('could not make kdtree for (ra, decl): (%s, %s)' %
(makecoordindex[0], makecoordindex[1]))
raise
# generate the xy pairs if fieldfits is not None
if field_fitsfile and os.path.exists(field_fitsfile):
# read in the FITS file
if field_wcsfrom is None:
hdulist = pyfits.open(field_fitsfile)
hdr = hdulist[0].header
hdulist.close()
w = WCS(hdr)
wcsok = True
elif os.path.exists(field_wcsfrom):
w = WCS(field_wcsfrom)
wcsok = True
else:
LOGERROR('could not determine WCS info for input FITS: %s' %
field_fitsfile)
wcsok = False
if wcsok:
# first, transform the ra/decl to x/y and put these in the
# lclist output dict
radecl = np.column_stack((objra, objdecl))
lclistdict['objects']['framexy'] = w.all_world2pix(
radecl,
1
)
# next, we'll make a PNG plot for the finder
finder_outfile = os.path.join(
os.path.dirname(outfile),
os.path.splitext(os.path.basename(outfile))[0] + '.png'
)
finder_png = fits_finder_chart(
field_fitsfile,
finder_outfile,
wcsfrom=field_wcsfrom,
scale=field_scale,
stretch=field_stretch,
colormap=field_colormap,
findersize=field_findersize,
overlay_ra=objra,
overlay_decl=objdecl,
overlay_pltopts=field_pltopts,
overlay_zoomcontain=field_zoomcontain,
grid=field_grid,
gridcolor=field_gridcolor
)
if finder_png is not None:
LOGINFO('generated a finder PNG '
'with an object position overlay '
'for this LC list: %s' % finder_png)
# write the pickle
with open(outfile,'wb') as outfd:
pickle.dump(lclistdict, outfd, protocol=pickle.HIGHEST_PROTOCOL)
LOGINFO('done. LC info -> %s' % outfile)
return outfile
else:
LOGERROR('no files found in %s matching %s' % (basedir, fileglob))
return None
def filter_lclist(lc_catalog,
objectidcol='objectid',
racol='ra',
declcol='decl',
xmatchexternal=None,
xmatchdistarcsec=3.0,
externalcolnums=(0,1,2),
externalcolnames=('objectid','ra','decl'),
externalcoldtypes='U20,f8,f8',
externalcolsep=None,
externalcommentchar='#',
conesearch=None,
conesearchworkers=1,
columnfilters=None,
field_fitsfile=None,
field_wcsfrom=None,
field_scale=ZScaleInterval(),
field_stretch=LinearStretch(),
field_colormap=plt.cm.gray_r,
field_findersize=None,
field_pltopts={'marker':'o',
'markersize':10.0,
'markerfacecolor':'none',
'markeredgewidth':2.0,
'markeredgecolor':'red'},
field_grid=False,
field_gridcolor='k',
field_zoomcontain=True,
copylcsto=None):
'''This is used to perform cone-search, cross-match, and column-filter
operations on a light curve catalog generated by `make_lclist`.
Uses the output of `make_lclist` above. This function returns a list of
light curves matching various criteria specified by the `xmatchexternal`,
`conesearch`, and `columnfilters kwargs`. Use this function to generate
input lists for other lcproc functions,
e.g. `lcproc.lcvfeatures.parallel_varfeatures`,
`lcproc.periodfinding.parallel_pf`, and `lcproc.lcbin.parallel_timebin`,
among others.
The operations are applied in this order if more than one is specified:
`xmatchexternal` -> `conesearch` -> `columnfilters`. All results from these
operations are joined using a logical AND operation.
Parameters
----------
objectidcol : str
This is the name of the object ID column in the light curve catalog.
racol : str
This is the name of the RA column in the light curve catalog.
declcol : str
This is the name of the Dec column in the light curve catalog.
xmatchexternal : str or None
If provided, this is the filename of a text file containing objectids,
ras and decs to match the objects in the light curve catalog to by their
positions.
xmatchdistarcsec : float
This is the distance in arcseconds to use when cross-matching to the
external catalog in `xmatchexternal`.
externalcolnums : sequence of int
This a list of the zero-indexed column numbers of columns to extract
from the external catalog file.
externalcolnames : sequence of str
This is a list of names of columns that will be extracted from the
external catalog file. This is the same length as
`externalcolnums`. These must contain the names provided as the
`objectid`, `ra`, and `decl` column names so this function knows which
column numbers correspond to those columns and can use them to set up
the cross-match.
externalcoldtypes : str
This is a CSV string containing numpy dtype definitions for all columns
listed to extract from the external catalog file. The number of dtype
definitions should be equal to the number of columns to extract.
externalcolsep : str or None
The column separator to use when extracting columns from the external
catalog file. If None, any whitespace between columns is used as the
separator.
externalcommentchar : str
The character indicating that a line in the external catalog file is to
be ignored.
conesearch : list of float
This is used to specify cone-search parameters. It should be a three
element list:
[center_ra_deg, center_decl_deg, search_radius_deg]
conesearchworkers : int
The number of parallel workers to launch for the cone-search operation.
columnfilters : list of str
This is a list of strings indicating any filters to apply on each column
in the light curve catalog. All column filters are applied in the
specified sequence and are combined with a logical AND operator. The
format of each filter string should be:
'<lc_catalog column>|<operator>|<operand>'
where:
- <lc_catalog column> is a column in the lc_catalog pickle file
- <operator> is one of: 'lt', 'gt', 'le', 'ge', 'eq', 'ne', which
correspond to the usual operators: <, >, <=, >=, ==, != respectively.
- <operand> is a float, int, or string.
field_fitsfile : str or None
If this is not None, it should be the path to a FITS image containing
the objects these light curves are for. If this is provided,
`make_lclist` will use the WCS information in the FITS itself if
`field_wcsfrom` is None (or from a WCS header file pointed to by
`field_wcsfrom`) to obtain x and y pixel coordinates for all of the
objects in the field. A finder chart will also be made using
`astrobase.plotbase.fits_finder_chart` using the corresponding
`field_scale`, `_stretch`, `_colormap`, `_findersize`, `_pltopts`,
`_grid`, and `_gridcolors` kwargs for that function, reproduced here to
enable customization of the finder chart plot.
field_wcsfrom : str or None
If `wcsfrom` is None, the WCS to transform the RA/Dec to pixel x/y will
be taken from the FITS header of `fitsfile`. If this is not None, it
must be a FITS or similar file that contains a WCS header in its first
extension.
field_scale : astropy.visualization.Interval object
`scale` sets the normalization for the FITS pixel values. This is an
astropy.visualization Interval object.
See http://docs.astropy.org/en/stable/visualization/normalization.html
for details on `scale` and `stretch` objects.
field_stretch : astropy.visualization.Stretch object
`stretch` sets the stretch function for mapping FITS pixel values to
output pixel values. This is an astropy.visualization Stretch object.
See http://docs.astropy.org/en/stable/visualization/normalization.html
for details on `scale` and `stretch` objects.
field_colormap : matplotlib Colormap object
`colormap` is a matplotlib color map object to use for the output image.
field_findersize : None or tuple of two ints
If `findersize` is None, the output image size will be set by the NAXIS1
and NAXIS2 keywords in the input `fitsfile` FITS header. Otherwise,
`findersize` must be a tuple with the intended x and y size of the image
in inches (all output images will use a DPI = 100).
field_pltopts : dict
`field_pltopts` controls how the overlay points will be plotted. This
a dict with standard matplotlib marker, etc. kwargs as key-val pairs,
e.g. 'markersize', 'markerfacecolor', etc. The default options make red
outline circles at the location of each object in the overlay.
field_grid : bool
`grid` sets if a grid will be made on the output image.
field_gridcolor : str
`gridcolor` sets the color of the grid lines. This is a usual matplotib
color spec string.
field_zoomcontain : bool
`field_zoomcontain` controls if the finder chart will be zoomed to
just contain the overlayed points. Everything outside the footprint of
these points will be discarded.
copylcsto : str
If this is provided, it is interpreted as a directory target to copy
all the light curves that match the specified conditions.
Returns
-------
tuple
Returns a two elem tuple: (matching_object_lcfiles, matching_objectids)
if conesearch and/or column filters are used. If `xmatchexternal` is
also used, a three-elem tuple is returned: (matching_object_lcfiles,
matching_objectids, extcat_matched_objectids).
'''
with open(lc_catalog,'rb') as infd:
lclist = pickle.load(infd)
# generate numpy arrays of the matching object indexes. we do it this way so
# we can AND everything at the end, instead of having to look up the objects
# at these indices and running the columnfilter on them
xmatch_matching_index = np.full_like(lclist['objects'][objectidcol],
False,
dtype=np.bool)
conesearch_matching_index = np.full_like(lclist['objects'][objectidcol],
False,
dtype=np.bool)
# do the xmatch first
ext_matches = []
ext_matching_objects = []
if (xmatchexternal and
isinstance(xmatchexternal, str) and
os.path.exists(xmatchexternal)):
try:
# read in the external file
extcat = np.genfromtxt(xmatchexternal,
usecols=externalcolnums,
delimiter=externalcolsep,
names=externalcolnames,
dtype=externalcoldtypes,
comments=externalcommentchar)
ext_cosdecl = np.cos(np.radians(extcat['decl']))
ext_sindecl = np.sin(np.radians(extcat['decl']))
ext_cosra = np.cos(np.radians(extcat['ra']))
ext_sinra = np.sin(np.radians(extcat['ra']))
ext_xyz = np.column_stack((ext_cosra*ext_cosdecl,
ext_sinra*ext_cosdecl,
ext_sindecl))
ext_xyzdist = 2.0 * np.sin(np.radians(xmatchdistarcsec/3600.0)/2.0)
# get our kdtree
our_kdt = lclist['kdtree']
# get the external kdtree
ext_kdt = sps.cKDTree(ext_xyz)
# do a query_ball_tree
extkd_matchinds = ext_kdt.query_ball_tree(our_kdt, ext_xyzdist)
for extind, mind in enumerate(extkd_matchinds):
if len(mind) > 0:
ext_matches.append(mind[0])
# get the whole matching row for the ext objects recarray
ext_matching_objects.append(extcat[extind])
ext_matches = np.array(ext_matches)
if ext_matches.size > 0:
# update the xmatch_matching_index
xmatch_matching_index[ext_matches] = True
LOGINFO('xmatch: objects matched to %s within %.1f arcsec: %s' %
(xmatchexternal, xmatchdistarcsec, ext_matches.size))
else:
LOGERROR("xmatch: no objects were cross-matched to external "
"catalog spec: %s, can't continue" % xmatchexternal)
return None, None, None
except Exception:
LOGEXCEPTION('could not match to external catalog spec: %s' %
repr(xmatchexternal))
raise
# do the cone search next
if (conesearch and
isinstance(conesearch, (list, tuple)) and
len(conesearch) == 3):
try:
racenter, declcenter, searchradius = conesearch
cosdecl = np.cos(np.radians(declcenter))
sindecl = np.sin(np.radians(declcenter))
cosra = np.cos(np.radians(racenter))
sinra = np.sin(np.radians(racenter))
# this is the search distance in xyz unit vectors
xyzdist = 2.0 * np.sin(np.radians(searchradius)/2.0)
# get the kdtree
our_kdt = lclist['kdtree']
# look up the coordinates
kdtindices = our_kdt.query_ball_point([cosra*cosdecl,
sinra*cosdecl,
sindecl],
xyzdist,
n_jobs=conesearchworkers)
if kdtindices and len(kdtindices) > 0:
LOGINFO('cone search: objects within %.4f deg '
'of (%.3f, %.3f): %s' %
(searchradius, racenter, declcenter, len(kdtindices)))
# update the conesearch_matching_index
matchingind = kdtindices
conesearch_matching_index[np.array(matchingind)] = True
# we fail immediately if we found nothing. this assumes the user
# cares more about the cone-search than the regular column filters
else:
LOGERROR("cone-search: no objects were found within "
"%.4f deg of (%.3f, %.3f): %s, can't continue" %
(searchradius, racenter, declcenter, len(kdtindices)))
return None, None
except Exception:
LOGEXCEPTION('cone-search: could not run a cone-search, '
'is there a kdtree present in %s?' % lc_catalog)
raise
# now that we're done with cone-search, do the column filtering
allfilterinds = []
if columnfilters and isinstance(columnfilters, list):
# go through each filter
for cfilt in columnfilters:
try:
fcol, foperator, foperand = cfilt.split('|')
foperator = FILTEROPS[foperator]
# generate the evalstring
filterstr = (
"np.isfinite(lclist['objects']['%s']) & "
"(lclist['objects']['%s'] %s %s)"
) % (fcol, fcol, foperator, foperand)
filterind = eval(filterstr)
ngood = lclist['objects'][objectidcol][filterind].size
LOGINFO('filter: %s -> objects matching: %s ' % (cfilt, ngood))
allfilterinds.append(filterind)
except Exception:
LOGEXCEPTION('filter: could not understand filter spec: %s'
% cfilt)
LOGWARNING('filter: not applying this broken filter')
# now that we have all the filter indices good to go
# logical-AND all the things
# make sure we only do filtering if we were told to do so
if (xmatchexternal or conesearch or columnfilters):
filterstack = []
if xmatchexternal:
filterstack.append(xmatch_matching_index)
if conesearch:
filterstack.append(conesearch_matching_index)
if columnfilters:
filterstack.extend(allfilterinds)
finalfilterind = np.column_stack(filterstack)
finalfilterind = np.all(finalfilterind, axis=1)
# get the filtered object light curves and object names
filteredobjectids = lclist['objects'][objectidcol][finalfilterind]
filteredlcfnames = lclist['objects']['lcfname'][finalfilterind]
else:
filteredobjectids = lclist['objects'][objectidcol]
filteredlcfnames = lclist['objects']['lcfname']
# if we're told to make a finder chart with the selected objects
if field_fitsfile is not None and os.path.exists(field_fitsfile):
# get the RA and DEC of the matching objects
matching_ra = lclist['objects'][racol][finalfilterind]
matching_decl = lclist['objects'][declcol][finalfilterind]
matching_postfix = []
if xmatchexternal is not None:
matching_postfix.append(
'xmatch_%s' %
os.path.splitext(os.path.basename(xmatchexternal))[0]
)
if conesearch is not None:
matching_postfix.append('conesearch_RA%.3f_DEC%.3f_RAD%.5f' %
tuple(conesearch))
if columnfilters is not None:
for cfi, cf in enumerate(columnfilters):
if cfi == 0:
matching_postfix.append('filter_%s_%s_%s' %
tuple(cf.split('|')))
else:
matching_postfix.append('_and_%s_%s_%s' %
tuple(cf.split('|')))
if len(matching_postfix) > 0:
matching_postfix = '-%s' % '_'.join(matching_postfix)
else:
matching_postfix = ''
# next, we'll make a PNG plot for the finder
finder_outfile = os.path.join(
os.path.dirname(lc_catalog),
'%s%s.png' %
(os.path.splitext(os.path.basename(lc_catalog))[0],
matching_postfix)
)
finder_png = fits_finder_chart(
field_fitsfile,
finder_outfile,
wcsfrom=field_wcsfrom,
scale=field_scale,
stretch=field_stretch,
colormap=field_colormap,
findersize=field_findersize,
overlay_ra=matching_ra,
overlay_decl=matching_decl,
overlay_pltopts=field_pltopts,
field_zoomcontain=field_zoomcontain,
grid=field_grid,
gridcolor=field_gridcolor
)
if finder_png is not None:
LOGINFO('generated a finder PNG '
'with an object position overlay '
'for this filtered LC list: %s' % finder_png)
# if copylcsto is not None, copy LCs over to it
if copylcsto is not None:
if not os.path.exists(copylcsto):
os.mkdir(copylcsto)
if TQDM:
lciter = tqdm(filteredlcfnames)
else:
lciter = filteredlcfnames
LOGINFO('copying matching light curves to %s' % copylcsto)
for lc in lciter:
shutil.copy(lc, copylcsto)
LOGINFO('done. objects matching all filters: %s' % filteredobjectids.size)
if xmatchexternal and len(ext_matching_objects) > 0:
return filteredlcfnames, filteredobjectids, ext_matching_objects
else:
return filteredlcfnames, filteredobjectids
############################################################
## ADDING CHECKPLOT INFO BACK TO THE LIGHT CURVE CATALOGS ##
############################################################
def _cpinfo_key_worker(task):
'''This wraps `checkplotlist.checkplot_infokey_worker`.
This is used to get the correct dtype for each element in retrieved results.
Parameters
----------
task : tuple
task[0] = cpfile
task[1] = keyspeclist (infokeys kwarg from `add_cpinfo_to_lclist`)
Returns
-------
dict
All of the requested keys from the checkplot are returned along with
their values in a dict.
'''
cpfile, keyspeclist = task
keystoget = [x[0] for x in keyspeclist]
nonesubs = [x[-2] for x in keyspeclist]
nansubs = [x[-1] for x in keyspeclist]
# reform the keystoget into a list of lists
for i, k in enumerate(keystoget):
thisk = k.split('.')
thisk = [(int(x) if x.isdecimal() else x) for x in thisk]
keystoget[i] = thisk
# add in the objectid as well to match to the object catalog later
keystoget.insert(0,['objectid'])
nonesubs.insert(0, '')
nansubs.insert(0,'')
# get all the keys we need
vals = checkplot_infokey_worker((cpfile, keystoget))
# if they have some Nones, nans, etc., reform them as expected
for val, nonesub, nansub, valind in zip(vals, nonesubs,
nansubs, range(len(vals))):
if val is None:
outval = nonesub
elif isinstance(val, float) and not np.isfinite(val):
outval = nansub
elif isinstance(val, (list, tuple)):
outval = ', '.join(val)
else:
outval = val
vals[valind] = outval
return vals
CPINFO_DEFAULTKEYS = [
# key, dtype, first level, overwrite=T|append=F, None sub, nan sub
('comments',
np.unicode_, False, True, '', ''),
('objectinfo.objecttags',
np.unicode_, True, True, '', ''),
('objectinfo.twomassid',
np.unicode_, True, True, '', ''),
('objectinfo.bmag',
np.float_, True, True, np.nan, np.nan),
('objectinfo.vmag',
np.float_, True, True, np.nan, np.nan),
('objectinfo.rmag',
np.float_, True, True, np.nan, np.nan),
('objectinfo.imag',
np.float_, True, True, np.nan, np.nan),
('objectinfo.jmag',
np.float_, True, True, np.nan, np.nan),
('objectinfo.hmag',
np.float_, True, True, np.nan, np.nan),
('objectinfo.kmag',
np.float_, True, True, np.nan, np.nan),
('objectinfo.sdssu',
np.float_, True, True, np.nan, np.nan),
('objectinfo.sdssg',
np.float_, True, True, np.nan, np.nan),
('objectinfo.sdssr',
np.float_, True, True, np.nan, np.nan),
('objectinfo.sdssi',
np.float_, True, True, np.nan, np.nan),
('objectinfo.sdssz',
np.float_, True, True, np.nan, np.nan),
('objectinfo.dered_bmag',
np.float_, True, True, np.nan, np.nan),
('objectinfo.dered_vmag',
np.float_, True, True, np.nan, np.nan),
('objectinfo.dered_rmag',
np.float_, True, True, np.nan, np.nan),
('objectinfo.dered_imag',
np.float_, True, True, np.nan, np.nan),
('objectinfo.dered_jmag',
np.float_, True, True, np.nan, np.nan),
('objectinfo.dered_hmag',
np.float_, True, True, np.nan, np.nan),
('objectinfo.dered_kmag',
np.float_, True, True, np.nan, np.nan),
('objectinfo.dered_sdssu',
np.float_, True, True, np.nan, np.nan),
('objectinfo.dered_sdssg',
np.float_, True, True, np.nan, np.nan),
('objectinfo.dered_sdssr',
np.float_, True, True, np.nan, np.nan),
('objectinfo.dered_sdssi',
np.float_, True, True, np.nan, np.nan),
('objectinfo.dered_sdssz',
np.float_, True, True, np.nan, np.nan),
('objectinfo.extinction_bmag',
np.float_, True, True, np.nan, np.nan),
('objectinfo.extinction_vmag',
np.float_, True, True, np.nan, np.nan),
('objectinfo.extinction_rmag',
np.float_, True, True, np.nan, np.nan),
('objectinfo.extinction_imag',
np.float_, True, True, np.nan, np.nan),
('objectinfo.extinction_jmag',
np.float_, True, True, np.nan, np.nan),
('objectinfo.extinction_hmag',
np.float_, True, True, np.nan, np.nan),
('objectinfo.extinction_kmag',
np.float_, True, True, np.nan, np.nan),
('objectinfo.extinction_sdssu',
np.float_, True, True, np.nan, np.nan),
('objectinfo.extinction_sdssg',
np.float_, True, True, np.nan, np.nan),
('objectinfo.extinction_sdssr',
np.float_, True, True, np.nan, np.nan),
('objectinfo.extinction_sdssi',
np.float_, True, True, np.nan, np.nan),
('objectinfo.extinction_sdssz',
np.float_, True, True, np.nan, np.nan),
('objectinfo.color_classes',
np.unicode_, True, True, '', ''),
('objectinfo.pmra',
np.float_, True, True, np.nan, np.nan),
('objectinfo.pmdecl',
np.float_, True, True, np.nan, np.nan),
('objectinfo.propermotion',
np.float_, True, True, np.nan, np.nan),
('objectinfo.rpmj',
np.float_, True, True, np.nan, np.nan),
('objectinfo.gl',
np.float_, True, True, np.nan, np.nan),
('objectinfo.gb',
np.float_, True, True, np.nan, np.nan),
('objectinfo.gaia_status',
np.unicode_, True, True, '', ''),
('objectinfo.gaia_ids.0',
np.unicode_, True, True, '', ''),
('objectinfo.gaiamag',
np.float_, True, True, np.nan, np.nan),
('objectinfo.gaia_parallax',
np.float_, True, True, np.nan, np.nan),
('objectinfo.gaia_parallax_err',
np.float_, True, True, np.nan, np.nan),
('objectinfo.gaia_absmag',
np.float_, True, True, np.nan, np.nan),
('objectinfo.simbad_best_mainid',
np.unicode_, True, True, '', ''),
('objectinfo.simbad_best_objtype',
np.unicode_, True, True, '', ''),
('objectinfo.simbad_best_allids',
np.unicode_, True, True, '', ''),
('objectinfo.simbad_best_distarcsec',
np.float_, True, True, np.nan, np.nan),
#
# TIC info
#
('objectinfo.ticid',
np.unicode_, True, True, '', ''),
('objectinfo.tic_version',
np.unicode_, True, True, '', ''),
('objectinfo.tessmag',
np.float_, True, True, np.nan, np.nan),
#
# variability info
#
('varinfo.vartags',
np.unicode_, False, True, '', ''),
('varinfo.varperiod',
np.float_, False, True, np.nan, np.nan),
('varinfo.varepoch',
np.float_, False, True, np.nan, np.nan),
('varinfo.varisperiodic',
np.int_, False, True, 0, 0),
('varinfo.objectisvar',
np.int_, False, True, 0, 0),
('varinfo.features.median',
np.float_, False, True, np.nan, np.nan),
('varinfo.features.mad',
np.float_, False, True, np.nan, np.nan),
('varinfo.features.stdev',
np.float_, False, True, np.nan, np.nan),
('varinfo.features.mag_iqr',
np.float_, False, True, np.nan, np.nan),
('varinfo.features.skew',
np.float_, False, True, np.nan, np.nan),
('varinfo.features.kurtosis',
np.float_, False, True, np.nan, np.nan),
('varinfo.features.stetsonj',
np.float_, False, True, np.nan, np.nan),
('varinfo.features.stetsonk',
np.float_, False, True, np.nan, np.nan),
('varinfo.features.eta_normal',
np.float_, False, True, np.nan, np.nan),
('varinfo.features.linear_fit_slope',
np.float_, False, True, np.nan, np.nan),
('varinfo.features.magnitude_ratio',
np.float_, False, True, np.nan, np.nan),
('varinfo.features.beyond1std',
np.float_, False, True, np.nan, np.nan)
]
def add_cpinfo_to_lclist(
checkplots, # list or a directory path
initial_lc_catalog,
magcol, # to indicate checkplot magcol
outfile,
checkplotglob='checkplot*.pkl*',
infokeys=CPINFO_DEFAULTKEYS,
nworkers=NCPUS
):
'''This adds checkplot info to the initial light curve catalogs generated by
`make_lclist`.
This is used to incorporate all the extra info checkplots can have for
objects back into columns in the light curve catalog produced by
`make_lclist`. Objects are matched between the checkplots and the light
curve catalog using their `objectid`. This then allows one to search this
'augmented' light curve catalog by these extra columns. The 'augmented'
light curve catalog also forms the basis for search interface provided by
the LCC-Server.
The default list of keys that will be extracted from a checkplot and added
as columns in the initial light curve catalog is listed above in the
`CPINFO_DEFAULTKEYS` list.
Parameters
----------
checkplots : str or list
If this is a str, is interpreted as a directory which will be searched
for checkplot pickle files using `checkplotglob`. If this is a list, it
will be interpreted as a list of checkplot pickle files to process.
initial_lc_catalog : str
This is the path to the light curve catalog pickle made by
`make_lclist`.
magcol : str
This is used to indicate the light curve magnitude column to extract
magnitude column specific information. For example, Stetson variability
indices can be generated using magnitude measurements in separate
photometric apertures, which appear in separate `magcols` in the
checkplot. To associate each such feature of the object with its
specific `magcol`, pass that `magcol` in here. This `magcol` will then
be added as a prefix to the resulting column in the 'augmented' LC
catalog, e.g. Stetson J will appear as `magcol1_stetsonj` and
`magcol2_stetsonj` for two separate magcols.
outfile : str
This is the file name of the output 'augmented' light curve catalog
pickle file that will be written.
infokeys : list of tuples
This is a list of keys to extract from the checkplot and some info on
how this extraction is to be done. Each key entry is a six-element
tuple of the following form:
- key name in the checkplot
- numpy dtype of the value of this key
- False if key is associated with a magcol or True otherwise
- False if subsequent updates to the same column name will append to
existing key values in the output augmented light curve catalog or
True if these will overwrite the existing key value
- character to use to substitute a None value of the key in the
checkplot in the output light curve catalog column
- character to use to substitute a nan value of the key in the
checkplot in the output light curve catalog column
See the `CPFINFO_DEFAULTKEYS` list above for examples.
nworkers : int
The number of parallel workers to launch to extract checkplot
information.
Returns
-------
str
Returns the path to the generated 'augmented' light curve catalog pickle
file.
'''
# get the checkplots from the directory if one is provided
if not isinstance(checkplots, list) and os.path.exists(checkplots):
checkplots = sorted(glob.glob(os.path.join(checkplots, checkplotglob)))
tasklist = [(cpf, infokeys) for cpf in checkplots]
with ProcessPoolExecutor(max_workers=nworkers) as executor:
resultfutures = executor.map(_cpinfo_key_worker, tasklist)
results = list(resultfutures)
executor.shutdown()
# now that we have all the checkplot info, we need to match to the
# objectlist in the lclist
# open the lclist
with open(initial_lc_catalog,'rb') as infd:
lc_catalog = pickle.load(infd)
# convert the lc_catalog['columns'] item to a list if it's not
# this is so we can append columns to it later
lc_catalog['columns'] = list(lc_catalog['columns'])
catalog_objectids = np.array(lc_catalog['objects']['objectid'])
checkplot_objectids = np.array([x[0] for x in results])
# add the extra key arrays in the lclist dict
extrainfokeys = []
actualkeys = []
# set up the extrainfokeys list
for keyspec in infokeys:
key, dtype, firstlevel, overwrite_append, nonesub, nansub = keyspec
if firstlevel:
eik = key
else:
eik = '%s.%s' % (magcol, key)
extrainfokeys.append(eik)
# now handle the output dicts and column list
eactual = eik.split('.')
# this handles dereferenced list indices
if not eactual[-1].isdigit():
if not firstlevel:
eactual = '.'.join([eactual[0], eactual[-1]])
else:
eactual = eactual[-1]
else:
elastkey = eactual[-2]
# for list columns, this converts stuff like errs -> err,
# and parallaxes -> parallax
if elastkey.endswith('es'):
elastkey = elastkey[:-2]
elif elastkey.endswith('s'):
elastkey = elastkey[:-1]
if not firstlevel:
eactual = '.'.join([eactual[0], elastkey])
else:
eactual = elastkey
actualkeys.append(eactual)
# add a new column only if required
if eactual not in lc_catalog['columns']:
lc_catalog['columns'].append(eactual)
# we'll overwrite earlier existing columns in any case
lc_catalog['objects'][eactual] = []
# now go through each objectid in the catalog and add the extra keys to
# their respective arrays
for catobj in tqdm(catalog_objectids):
cp_objind = np.where(checkplot_objectids == catobj)
if len(cp_objind[0]) > 0:
# get the info line for this checkplot
thiscpinfo = results[cp_objind[0][0]]
# the first element is the objectid which we remove
thiscpinfo = thiscpinfo[1:]
# update the object catalog entries for this object
for ekind, ek in enumerate(actualkeys):
# add the actual thing to the output list
lc_catalog['objects'][ek].append(
thiscpinfo[ekind]
)
else:
# update the object catalog entries for this object
for ekind, ek in enumerate(actualkeys):
thiskeyspec = infokeys[ekind]
nonesub = thiskeyspec[-2]
lc_catalog['objects'][ek].append(
nonesub
)
# now we should have all the new keys in the object catalog
# turn them into arrays
for ek in actualkeys:
lc_catalog['objects'][ek] = np.array(
lc_catalog['objects'][ek]
)
# add the magcol to the lc_catalog
if 'magcols' in lc_catalog:
if magcol not in lc_catalog['magcols']:
lc_catalog['magcols'].append(magcol)
else:
lc_catalog['magcols'] = [magcol]
# write back the new object catalog
with open(outfile, 'wb') as outfd:
pickle.dump(lc_catalog, outfd, protocol=pickle.HIGHEST_PROTOCOL)
return outfile
| mit |
rseubert/scikit-learn | benchmarks/bench_plot_lasso_path.py | 301 | 4003 | """Benchmarks of Lasso regularization path computation using Lars and CD
The input data is mostly low rank but is a fat infinite tail.
"""
from __future__ import print_function
from collections import defaultdict
import gc
import sys
from time import time
import numpy as np
from sklearn.linear_model import lars_path
from sklearn.linear_model import lasso_path
from sklearn.datasets.samples_generator import make_regression
def compute_bench(samples_range, features_range):
it = 0
results = defaultdict(lambda: [])
max_it = len(samples_range) * len(features_range)
for n_samples in samples_range:
for n_features in features_range:
it += 1
print('====================')
print('Iteration %03d of %03d' % (it, max_it))
print('====================')
dataset_kwargs = {
'n_samples': n_samples,
'n_features': n_features,
'n_informative': n_features / 10,
'effective_rank': min(n_samples, n_features) / 10,
#'effective_rank': None,
'bias': 0.0,
}
print("n_samples: %d" % n_samples)
print("n_features: %d" % n_features)
X, y = make_regression(**dataset_kwargs)
gc.collect()
print("benchmarking lars_path (with Gram):", end='')
sys.stdout.flush()
tstart = time()
G = np.dot(X.T, X) # precomputed Gram matrix
Xy = np.dot(X.T, y)
lars_path(X, y, Xy=Xy, Gram=G, method='lasso')
delta = time() - tstart
print("%0.3fs" % delta)
results['lars_path (with Gram)'].append(delta)
gc.collect()
print("benchmarking lars_path (without Gram):", end='')
sys.stdout.flush()
tstart = time()
lars_path(X, y, method='lasso')
delta = time() - tstart
print("%0.3fs" % delta)
results['lars_path (without Gram)'].append(delta)
gc.collect()
print("benchmarking lasso_path (with Gram):", end='')
sys.stdout.flush()
tstart = time()
lasso_path(X, y, precompute=True)
delta = time() - tstart
print("%0.3fs" % delta)
results['lasso_path (with Gram)'].append(delta)
gc.collect()
print("benchmarking lasso_path (without Gram):", end='')
sys.stdout.flush()
tstart = time()
lasso_path(X, y, precompute=False)
delta = time() - tstart
print("%0.3fs" % delta)
results['lasso_path (without Gram)'].append(delta)
return results
if __name__ == '__main__':
from mpl_toolkits.mplot3d import axes3d # register the 3d projection
import matplotlib.pyplot as plt
samples_range = np.linspace(10, 2000, 5).astype(np.int)
features_range = np.linspace(10, 2000, 5).astype(np.int)
results = compute_bench(samples_range, features_range)
max_time = max(max(t) for t in results.values())
fig = plt.figure('scikit-learn Lasso path benchmark results')
i = 1
for c, (label, timings) in zip('bcry', sorted(results.items())):
ax = fig.add_subplot(2, 2, i, projection='3d')
X, Y = np.meshgrid(samples_range, features_range)
Z = np.asarray(timings).reshape(samples_range.shape[0],
features_range.shape[0])
# plot the actual surface
ax.plot_surface(X, Y, Z.T, cstride=1, rstride=1, color=c, alpha=0.8)
# dummy point plot to stick the legend to since surface plot do not
# support legends (yet?)
#ax.plot([1], [1], [1], color=c, label=label)
ax.set_xlabel('n_samples')
ax.set_ylabel('n_features')
ax.set_zlabel('Time (s)')
ax.set_zlim3d(0.0, max_time * 1.1)
ax.set_title(label)
#ax.legend()
i += 1
plt.show()
| bsd-3-clause |
DiamondLightSource/auto_tomo_calibration-experimental | old_code_scripts/measure_resolution/lmfit/model.py | 7 | 40402 | """
Concise nonlinear curve fitting.
"""
from __future__ import print_function
import warnings
import inspect
import operator
from copy import deepcopy
import numpy as np
from . import Parameters, Parameter, Minimizer
from .printfuncs import fit_report
from collections import MutableSet
try:
from collections import OrderedDict
except ImportError:
from ordereddict import OrderedDict
class OrderedSet(MutableSet):
"""from http://code.activestate.com/recipes/576694-orderedset/"""
def __init__(self, iterable=None):
self.end = end = []
end += [None, end, end] # sentinel node for doubly linked list
self.map = {} # key --> [key, prev, next]
if iterable is not None:
self |= iterable
def __len__(self):
return len(self.map)
def __contains__(self, key):
return key in self.map
def add(self, key):
if key not in self.map:
end = self.end
curr = end[1]
curr[2] = end[1] = self.map[key] = [key, curr, end]
def discard(self, key):
if key in self.map:
key, prev, next = self.map.pop(key)
prev[2] = next
next[1] = prev
def __iter__(self):
end = self.end
curr = end[2]
while curr is not end:
yield curr[0]
curr = curr[2]
def __reversed__(self):
end = self.end
curr = end[1]
while curr is not end:
yield curr[0]
curr = curr[1]
def pop(self, last=True):
if not self:
raise KeyError('set is empty')
key = self.end[1][0] if last else self.end[2][0]
self.discard(key)
return key
def __repr__(self):
if not self:
return '%s()' % (self.__class__.__name__,)
return '%s(%r)' % (self.__class__.__name__, list(self))
def __eq__(self, other):
if isinstance(other, OrderedSet):
return len(self) == len(other) and list(self) == list(other)
return set(self) == set(other)
# Use pandas.isnull for aligning missing data is pandas is available.
# otherwise use numpy.isnan
try:
from pandas import isnull, Series
except ImportError:
isnull = np.isnan
Series = type(NotImplemented)
def _align(var, mask, data):
"align missing data, with pandas is available"
if isinstance(data, Series) and isinstance(var, Series):
return var.reindex_like(data).dropna()
elif mask is not None:
return var[mask]
return var
try:
from matplotlib import pyplot as plt
_HAS_MATPLOTLIB = True
except ImportError:
_HAS_MATPLOTLIB = False
def _ensureMatplotlib(function):
if _HAS_MATPLOTLIB:
return function
else:
def no_op(*args, **kwargs):
print('matplotlib module is required for plotting the results')
return no_op
class Model(object):
"""Create a model from a user-defined function.
Parameters
----------
func: function to be wrapped
independent_vars: list of strings or None (default)
arguments to func that are independent variables
param_names: list of strings or None (default)
names of arguments to func that are to be made into parameters
missing: None, 'none', 'drop', or 'raise'
'none' or None: Do not check for null or missing values (default)
'drop': Drop null or missing observations in data.
if pandas is installed, pandas.isnull is used, otherwise
numpy.isnan is used.
'raise': Raise a (more helpful) exception when data contains null
or missing values.
name: None or string
name for the model. When `None` (default) the name is the same as
the model function (`func`).
Note
----
Parameter names are inferred from the function arguments,
and a residual function is automatically constructed.
Example
-------
>>> def decay(t, tau, N):
... return N*np.exp(-t/tau)
...
>>> my_model = Model(decay, independent_vars=['t'])
"""
_forbidden_args = ('data', 'weights', 'params')
_invalid_ivar = "Invalid independent variable name ('%s') for function %s"
_invalid_par = "Invalid parameter name ('%s') for function %s"
_invalid_missing = "missing must be None, 'none', 'drop', or 'raise'."
_valid_missing = (None, 'none', 'drop', 'raise')
_invalid_hint = "unknown parameter hint '%s' for param '%s'"
_hint_names = ('value', 'vary', 'min', 'max', 'expr')
def __init__(self, func, independent_vars=None, param_names=None,
missing='none', prefix='', name=None, **kws):
self.func = func
self._prefix = prefix
self._param_root_names = param_names # will not include prefixes
self.independent_vars = independent_vars
self._func_allargs = []
self._func_haskeywords = False
if not missing in self._valid_missing:
raise ValueError(self._invalid_missing)
self.missing = missing
self.opts = kws
self.param_hints = OrderedDict()
self._param_names = OrderedSet()
self._parse_params()
if self.independent_vars is None:
self.independent_vars = []
if name is None and hasattr(self.func, '__name__'):
name = self.func.__name__
self._name = name
def _reprstring(self, long=False):
out = self._name
opts = []
if len(self._prefix) > 0:
opts.append("prefix='%s'" % (self._prefix))
if long:
for k, v in self.opts.items():
opts.append("%s='%s'" % (k, v))
if len(opts) > 0:
out = "%s, %s" % (out, ', '.join(opts))
return "Model(%s)" % out
@property
def name(self):
return self._reprstring(long=False)
@name.setter
def name(self, value):
self._name = value
@property
def prefix(self):
return self._prefix
@prefix.setter
def prefix(self, value):
self._prefix = value
self._parse_params()
@property
def param_names(self):
return self._param_names
def __repr__(self):
return "<lmfit.Model: %s>" % (self.name)
def copy(self, prefix=None):
"""Return a completely independent copy of the whole model.
Parameters
----------
prefix: string or None. If not None new model's prefix is
changed to the passed value.
"""
new = deepcopy(self)
if prefix is not None:
new.prefix = prefix
return new
def _parse_params(self):
"build params from function arguments"
if self.func is None:
return
argspec = inspect.getargspec(self.func)
pos_args = argspec.args[:]
keywords = argspec.keywords
kw_args = {}
if argspec.defaults is not None:
for val in reversed(argspec.defaults):
kw_args[pos_args.pop()] = val
#
self._func_haskeywords = keywords is not None
self._func_allargs = pos_args + list(kw_args.keys())
allargs = self._func_allargs
if len(allargs) == 0 and keywords is not None:
return
# default independent_var = 1st argument
if self.independent_vars is None:
self.independent_vars = [pos_args[0]]
# default param names: all positional args
# except independent variables
self.def_vals = {}
might_be_param = []
if self._param_root_names is None:
self._param_root_names = pos_args[:]
for key, val in kw_args.items():
if (not isinstance(val, bool) and
isinstance(val, (float, int))):
self._param_root_names.append(key)
self.def_vals[key] = val
elif val is None:
might_be_param.append(key)
for p in self.independent_vars:
if p in self._param_root_names:
self._param_root_names.remove(p)
new_opts = {}
for opt, val in self.opts.items():
if (opt in self._param_root_names or opt in might_be_param and
isinstance(val, Parameter)):
self.set_param_hint(opt, value=val.value,
min=val.min, max=val.max, expr=val.expr)
elif opt in self._func_allargs:
new_opts[opt] = val
self.opts = new_opts
names = []
if self._prefix is None:
self._prefix = ''
for pname in self._param_root_names:
names.append("%s%s" % (self._prefix, pname))
# check variables names for validity
# The implicit magic in fit() requires us to disallow some
fname = self.func.__name__
for arg in self.independent_vars:
if arg not in allargs or arg in self._forbidden_args:
raise ValueError(self._invalid_ivar % (arg, fname))
for arg in names:
if (self._strip_prefix(arg) not in allargs or
arg in self._forbidden_args):
raise ValueError(self._invalid_par % (arg, fname))
self._param_names = OrderedSet(names)
def set_param_hint(self, name, **kwargs):
"""set hints for parameter, including optional bounds
and constraints (value, vary, min, max, expr)
these will be used by make_params() when building
default parameters
example:
model = GaussianModel()
model.set_param_hint('amplitude', min=-100.0, max=0.)
"""
npref = len(self._prefix)
if npref > 0 and name.startswith(self._prefix):
name = name[npref:]
if name not in self.param_hints:
self.param_hints[name] = OrderedDict()
hints = self.param_hints[name]
for key, val in kwargs.items():
if key in self._hint_names:
hints[key] = val
else:
warnings.warn(self._invalid_hint % (key, name))
def make_params(self, **kwargs):
"""create and return a Parameters object for a Model.
This applies any default values
"""
verbose = False
if 'verbose' in kwargs:
verbose = kwargs['verbose']
params = Parameters()
for name in self.param_names:
par = Parameter(name=name)
basename = name[len(self._prefix):]
# apply defaults from model function definition
if basename in self.def_vals:
par.value = self.def_vals[basename]
# apply defaults from parameter hints
if basename in self.param_hints:
hint = self.param_hints[basename]
for item in self._hint_names:
if item in hint:
setattr(par, item, hint[item])
# apply values passed in through kw args
if basename in kwargs:
# kw parameter names with no prefix
par.value = kwargs[basename]
if name in kwargs:
# kw parameter names with prefix
par.value = kwargs[name]
params[name] = par
# add any additional parameters defined in param_hints
# note that composites may define their own additional
# convenience parameters here
for basename, hint in self.param_hints.items():
name = "%s%s" % (self._prefix, basename)
if name not in params:
par = params[name] = Parameter(name=name)
for item in self._hint_names:
if item in hint:
setattr(par, item, hint[item])
# Add the new parameter to the self.param_names
self._param_names.add(name)
if verbose: print( ' - Adding parameter "%s"' % name)
return params
def guess(self, data=None, **kws):
"""stub for guess starting values --
should be implemented for each model subclass to
run self.make_params(), update starting values
and return a Parameters object"""
cname = self.__class__.__name__
msg = 'guess() not implemented for %s' % cname
raise NotImplementedError(msg)
def _residual(self, params, data, weights, **kwargs):
"default residual: (data-model)*weights"
diff = self.eval(params, **kwargs) - data
if weights is not None:
diff *= weights
return np.asarray(diff).ravel() # for compatibility with pandas.Series
def _handle_missing(self, data):
"handle missing data"
if self.missing == 'raise':
if np.any(isnull(data)):
raise ValueError("Data contains a null value.")
elif self.missing == 'drop':
mask = ~isnull(data)
if np.all(mask):
return None # short-circuit this -- no missing values
mask = np.asarray(mask) # for compatibility with pandas.Series
return mask
def _strip_prefix(self, name):
npref = len(self._prefix)
if npref > 0 and name.startswith(self._prefix):
name = name[npref:]
return name
def make_funcargs(self, params=None, kwargs=None, strip=True):
"""convert parameter values and keywords to function arguments"""
if params is None: params = {}
if kwargs is None: kwargs = {}
out = {}
out.update(self.opts)
for name, par in params.items():
if strip:
name = self._strip_prefix(name)
if name in self._func_allargs or self._func_haskeywords:
out[name] = par.value
# kwargs handled slightly differently -- may set param value too!
for name, val in kwargs.items():
if strip:
name = self._strip_prefix(name)
if name in self._func_allargs or self._func_haskeywords:
out[name] = val
if name in params:
params[name].value = val
return out
def _make_all_args(self, params=None, **kwargs):
"""generate **all** function args for all functions"""
args = {}
for key, val in self.make_funcargs(params, kwargs).items():
args["%s%s" % (self._prefix, key)] = val
return args
def eval(self, params=None, **kwargs):
"""evaluate the model with the supplied parameters"""
result = self.func(**self.make_funcargs(params, kwargs))
# Handle special case of constant result and one
# independent variable (of any dimension).
if np.ndim(result) == 0 and len(self.independent_vars) == 1:
result = np.tile(result, kwargs[self.independent_vars[0]].shape)
return result
@property
def components(self):
"""return components for composite model"""
return [self]
def eval_components(self, params=None, **kwargs):
"""
evaluate the model with the supplied parameters and returns a ordered
dict containting name, result pairs.
"""
key = self._prefix
if len(key) < 1:
key = self._name
return {key: self.eval(params=params, **kwargs)}
def fit(self, data, params=None, weights=None, method='leastsq',
iter_cb=None, scale_covar=True, verbose=True, fit_kws=None, **kwargs):
"""Fit the model to the data.
Parameters
----------
data: array-like
params: Parameters object
weights: array-like of same size as data
used for weighted fit
method: fitting method to use (default = 'leastsq')
iter_cb: None or callable callback function to call at each iteration.
scale_covar: bool (default True) whether to auto-scale covariance matrix
verbose: bool (default True) print a message when a new parameter is
added because of a hint.
fit_kws: dict
default fitting options, such as xtol and maxfev, for scipy optimizer
keyword arguments: optional, named like the arguments of the
model function, will override params. See examples below.
Returns
-------
lmfit.ModelResult
Examples
--------
# Take t to be the independent variable and data to be the
# curve we will fit.
# Using keyword arguments to set initial guesses
>>> result = my_model.fit(data, tau=5, N=3, t=t)
# Or, for more control, pass a Parameters object.
>>> result = my_model.fit(data, params, t=t)
# Keyword arguments override Parameters.
>>> result = my_model.fit(data, params, tau=5, t=t)
Note
----
All parameters, however passed, are copied on input, so the original
Parameter objects are unchanged.
"""
if params is None:
params = self.make_params(verbose=verbose)
else:
params = deepcopy(params)
# If any kwargs match parameter names, override params.
param_kwargs = set(kwargs.keys()) & set(self.param_names)
for name in param_kwargs:
p = kwargs[name]
if isinstance(p, Parameter):
p.name = name # allows N=Parameter(value=5) with implicit name
params[name] = deepcopy(p)
else:
params[name].set(value=p)
del kwargs[name]
# All remaining kwargs should correspond to independent variables.
for name in kwargs.keys():
if not name in self.independent_vars:
warnings.warn("The keyword argument %s does not" % name +
"match any arguments of the model function." +
"It will be ignored.", UserWarning)
# If any parameter is not initialized raise a more helpful error.
missing_param = any([p not in params.keys()
for p in self.param_names])
blank_param = any([(p.value is None and p.expr is None)
for p in params.values()])
if missing_param or blank_param:
msg = ('Assign each parameter an initial value by passing '
'Parameters or keyword arguments to fit.\n')
missing = [p for p in self.param_names if p not in params.keys()]
blank = [name for name, p in params.items()
if (p.value is None and p.expr is None)]
msg += 'Missing parameters: %s\n' % str(missing)
msg += 'Non initialized parameters: %s' % str(blank)
raise ValueError(msg)
# Do not alter anything that implements the array interface (np.array, pd.Series)
# but convert other iterables (e.g., Python lists) to numpy arrays.
if not hasattr(data, '__array__'):
data = np.asfarray(data)
for var in self.independent_vars:
var_data = kwargs[var]
if (not hasattr(var_data, '__array__')) and (not np.isscalar(var_data)):
kwargs[var] = np.asfarray(var_data)
# Handle null/missing values.
mask = None
if self.missing not in (None, 'none'):
mask = self._handle_missing(data) # This can raise.
if mask is not None:
data = data[mask]
if weights is not None:
weights = _align(weights, mask, data)
# If independent_vars and data are alignable (pandas), align them,
# and apply the mask from above if there is one.
for var in self.independent_vars:
if not np.isscalar(kwargs[var]):
kwargs[var] = _align(kwargs[var], mask, data)
if fit_kws is None:
fit_kws = {}
output = ModelResult(self, params, method=method, iter_cb=iter_cb,
scale_covar=scale_covar, fcn_kws=kwargs,
**fit_kws)
output.fit(data=data, weights=weights)
output.components = self.components
return output
def __add__(self, other):
return CompositeModel(self, other, operator.add)
def __sub__(self, other):
return CompositeModel(self, other, operator.sub)
def __mul__(self, other):
return CompositeModel(self, other, operator.mul)
def __div__(self, other):
return CompositeModel(self, other, operator.truediv)
def __truediv__(self, other):
return CompositeModel(self, other, operator.truediv)
class CompositeModel(Model):
"""Create a composite model -- a binary operator of two Models
Parameters
----------
left_model: left-hand side model-- must be a Model()
right_model: right-hand side model -- must be a Model()
oper: callable binary operator (typically, operator.add, operator.mul, etc)
independent_vars: list of strings or None (default)
arguments to func that are independent variables
param_names: list of strings or None (default)
names of arguments to func that are to be made into parameters
missing: None, 'none', 'drop', or 'raise'
'none' or None: Do not check for null or missing values (default)
'drop': Drop null or missing observations in data.
if pandas is installed, pandas.isnull is used, otherwise
numpy.isnan is used.
'raise': Raise a (more helpful) exception when data contains null
or missing values.
name: None or string
name for the model. When `None` (default) the name is the same as
the model function (`func`).
"""
_names_collide = ("\nTwo models have parameters named '{clash}'. "
"Use distinct names.")
_bad_arg = "CompositeModel: argument {arg} is not a Model"
_bad_op = "CompositeModel: operator {op} is not callable"
_known_ops = {operator.add: '+', operator.sub: '-',
operator.mul: '*', operator.truediv: '/'}
def __init__(self, left, right, op, **kws):
if not isinstance(left, Model):
raise ValueError(self._bad_arg.format(arg=left))
if not isinstance(right, Model):
raise ValueError(self._bad_arg.format(arg=right))
if not callable(op):
raise ValueError(self._bad_op.format(op=op))
self.left = left
self.right = right
self.op = op
name_collisions = left.param_names & right.param_names
if len(name_collisions) > 0:
msg = ''
for collision in name_collisions:
msg += self._names_collide.format(clash=collision)
raise NameError(msg)
# we assume that all the sub-models have the same independent vars
if 'independent_vars' not in kws:
kws['independent_vars'] = self.left.independent_vars
if 'missing' not in kws:
kws['missing'] = self.left.missing
def _tmp(self, *args, **kws): pass
Model.__init__(self, _tmp, **kws)
for side in (left, right):
prefix = side.prefix
for basename, hint in side.param_hints.items():
self.param_hints["%s%s" % (prefix, basename)] = hint
def _parse_params(self):
self._func_haskeywords = (self.left._func_haskeywords or
self.right._func_haskeywords)
self._func_allargs = (self.left._func_allargs +
self.right._func_allargs)
self.def_vals = deepcopy(self.right.def_vals)
self.def_vals.update(self.left.def_vals)
self.opts = deepcopy(self.right.opts)
self.opts.update(self.left.opts)
def _reprstring(self, long=False):
return "(%s %s %s)" % (self.left._reprstring(long=long),
self._known_ops.get(self.op, self.op),
self.right._reprstring(long=long))
def eval(self, params=None, **kwargs):
return self.op(self.left.eval(params=params, **kwargs),
self.right.eval(params=params, **kwargs))
def eval_components(self, **kwargs):
"""return ordered dict of name, results for each component"""
out = OrderedDict(self.left.eval_components(**kwargs))
out.update(self.right.eval_components(**kwargs))
return out
@property
def param_names(self):
return self.left.param_names | self.right.param_names
@property
def components(self):
"""return components for composite model"""
return self.left.components + self.right.components
def _make_all_args(self, params=None, **kwargs):
"""generate **all** function args for all functions"""
out = self.right._make_all_args(params=params, **kwargs)
out.update(self.left._make_all_args(params=params, **kwargs))
return out
class ModelResult(Minimizer):
"""Result from Model fit
Attributes
-----------
model instance of Model -- the model function
params instance of Parameters -- the fit parameters
data array of data values to compare to model
weights array of weights used in fitting
init_params copy of params, before being updated by fit()
init_values array of parameter values, before being updated by fit()
init_fit model evaluated with init_params.
best_fit model evaluated with params after being updated by fit()
Methods:
--------
fit(data=None, params=None, weights=None, method=None, **kwargs)
fit (or re-fit) model with params to data (with weights)
using supplied method. The keyword arguments are sent to
as keyword arguments to the model function.
all inputs are optional, defaulting to the value used in
the previous fit. This allows easily changing data or
parameter settings, or both.
eval(**kwargs)
evaluate the current model, with the current parameter values,
with values in kwargs sent to the model function.
eval_components(**kwargs)
evaluate the current model, with the current parameter values,
with values in kwargs sent to the model function and returns
a ordered dict with the model names as the key and the component
results as the values.
fit_report(modelpars=None, show_correl=True, min_correl=0.1)
return a fit report.
plot_fit(self, ax=None, datafmt='o', fitfmt='-', initfmt='--',
numpoints=None, data_kws=None, fit_kws=None, init_kws=None,
ax_kws=None)
Plot the fit results using matplotlib.
plot_residuals(self, ax=None, datafmt='o', data_kws=None, fit_kws=None,
ax_kws=None)
Plot the fit residuals using matplotlib.
plot(self, datafmt='o', fitfmt='-', initfmt='--', numpoints=None,
data_kws=None, fit_kws=None, init_kws=None, ax_res_kws=None,
ax_fit_kws=None, fig_kws=None)
Plot the fit results and residuals using matplotlib.
"""
def __init__(self, model, params, data=None, weights=None,
method='leastsq', fcn_args=None, fcn_kws=None,
iter_cb=None, scale_covar=True, **fit_kws):
self.model = model
self.data = data
self.weights = weights
self.method = method
self.init_params = deepcopy(params)
Minimizer.__init__(self, model._residual, params, fcn_args=fcn_args,
fcn_kws=fcn_kws, iter_cb=iter_cb,
scale_covar=scale_covar, **fit_kws)
def fit(self, data=None, params=None, weights=None, method=None, **kwargs):
"""perform fit for a Model, given data and params"""
if data is not None:
self.data = data
if params is not None:
self.init_params = params
if weights is not None:
self.weights = weights
if method is not None:
self.method = method
self.userargs = (self.data, self.weights)
self.userkws.update(kwargs)
self.init_fit = self.model.eval(params=self.params, **self.userkws)
_ret = self.minimize(method=self.method)
for attr in dir(_ret):
if not attr.startswith('_') :
setattr(self, attr, getattr(_ret, attr))
self.init_values = self.model._make_all_args(self.init_params)
self.best_values = self.model._make_all_args(_ret.params)
self.best_fit = self.model.eval(params=_ret.params, **self.userkws)
def eval(self, **kwargs):
self.userkws.update(kwargs)
return self.model.eval(params=self.params, **self.userkws)
def eval_components(self, **kwargs):
self.userkws.update(kwargs)
return self.model.eval_components(params=self.params, **self.userkws)
def fit_report(self, **kwargs):
"return fit report"
return '[[Model]]\n %s\n%s\n' % (self.model._reprstring(long=True),
fit_report(self, **kwargs))
@_ensureMatplotlib
def plot_fit(self, ax=None, datafmt='o', fitfmt='-', initfmt='--', yerr=None,
numpoints=None, data_kws=None, fit_kws=None, init_kws=None,
ax_kws=None):
"""Plot the fit results using matplotlib.
The method will plot results of the fit using matplotlib, including:
the data points, the initial fit curve and the fitted curve. If the fit
model included weights, errorbars will also be plotted.
Parameters
----------
ax : matplotlib.axes.Axes, optional
The axes to plot on. The default in None, which means use the
current pyplot axis or create one if there is none.
datafmt : string, optional
matplotlib format string for data points
fitfmt : string, optional
matplotlib format string for fitted curve
initfmt : string, optional
matplotlib format string for initial conditions for the fit
yerr : ndarray, optional
array of uncertainties for data array
numpoints : int, optional
If provided, the final and initial fit curves are evaluated not
only at data points, but refined to contain `numpoints` points in
total.
data_kws : dictionary, optional
keyword arguments passed on to the plot function for data points
fit_kws : dictionary, optional
keyword arguments passed on to the plot function for fitted curve
init_kws : dictionary, optional
keyword arguments passed on to the plot function for the initial
conditions of the fit
ax_kws : dictionary, optional
keyword arguments for a new axis, if there is one being created
Returns
-------
matplotlib.axes.Axes
Notes
----
For details about plot format strings and keyword arguments see
documentation of matplotlib.axes.Axes.plot.
If yerr is specified or if the fit model included weights, then
matplotlib.axes.Axes.errorbar is used to plot the data. If yerr is
not specified and the fit includes weights, yerr set to 1/self.weights
If `ax` is None then matplotlib.pyplot.gca(**ax_kws) is called.
See Also
--------
ModelResult.plot_residuals : Plot the fit residuals using matplotlib.
ModelResult.plot : Plot the fit results and residuals using matplotlib.
"""
if data_kws is None:
data_kws = {}
if fit_kws is None:
fit_kws = {}
if init_kws is None:
init_kws = {}
if ax_kws is None:
ax_kws = {}
if len(self.model.independent_vars) == 1:
independent_var = self.model.independent_vars[0]
else:
print('Fit can only be plotted if the model function has one '
'independent variable.')
return False
if not isinstance(ax, plt.Axes):
ax = plt.gca(**ax_kws)
x_array = self.userkws[independent_var]
# make a dense array for x-axis if data is not dense
if numpoints is not None and len(self.data) < numpoints:
x_array_dense = np.linspace(min(x_array), max(x_array), numpoints)
else:
x_array_dense = x_array
ax.plot(x_array_dense, self.model.eval(self.init_params,
**{independent_var: x_array_dense}), initfmt,
label='init', **init_kws)
ax.plot(x_array_dense, self.model.eval(self.params,
**{independent_var: x_array_dense}), fitfmt,
label='best-fit', **fit_kws)
if yerr is None and self.weights is not None:
yerr = 1.0/self.weights
if yerr is not None:
ax.errorbar(x_array, self.data, yerr=yerr,
fmt=datafmt, label='data', **data_kws)
else:
ax.plot(x_array, self.data, datafmt, label='data', **data_kws)
ax.set_title(self.model.name)
ax.set_xlabel(independent_var)
ax.set_ylabel('y')
ax.legend()
return ax
@_ensureMatplotlib
def plot_residuals(self, ax=None, datafmt='o', yerr=None, data_kws=None,
fit_kws=None, ax_kws=None):
"""Plot the fit residuals using matplotlib.
The method will plot residuals of the fit using matplotlib, including:
the data points and the fitted curve (as horizontal line). If the fit
model included weights, errorbars will also be plotted.
Parameters
----------
ax : matplotlib.axes.Axes, optional
The axes to plot on. The default in None, which means use the
current pyplot axis or create one if there is none.
datafmt : string, optional
matplotlib format string for data points
yerr : ndarray, optional
array of uncertainties for data array
data_kws : dictionary, optional
keyword arguments passed on to the plot function for data points
fit_kws : dictionary, optional
keyword arguments passed on to the plot function for fitted curve
ax_kws : dictionary, optional
keyword arguments for a new axis, if there is one being created
Returns
-------
matplotlib.axes.Axes
Notes
----
For details about plot format strings and keyword arguments see
documentation of matplotlib.axes.Axes.plot.
If yerr is specified or if the fit model included weights, then
matplotlib.axes.Axes.errorbar is used to plot the data. If yerr is
not specified and the fit includes weights, yerr set to 1/self.weights
If `ax` is None then matplotlib.pyplot.gca(**ax_kws) is called.
See Also
--------
ModelResult.plot_fit : Plot the fit results using matplotlib.
ModelResult.plot : Plot the fit results and residuals using matplotlib.
"""
if data_kws is None:
data_kws = {}
if fit_kws is None:
fit_kws = {}
if fit_kws is None:
fit_kws = {}
if ax_kws is None:
ax_kws = {}
if len(self.model.independent_vars) == 1:
independent_var = self.model.independent_vars[0]
else:
print('Fit can only be plotted if the model function has one '
'independent variable.')
return False
if not isinstance(ax, plt.Axes):
ax = plt.gca(**ax_kws)
x_array = self.userkws[independent_var]
ax.axhline(0, **fit_kws)
if yerr is None and self.weights is not None:
yerr = 1.0/self.weights
if yerr is not None:
ax.errorbar(x_array, self.eval() - self.data, yerr=yerr,
fmt=datafmt, label='residuals', **data_kws)
else:
ax.plot(x_array, self.eval() - self.data, datafmt,
label='residuals', **data_kws)
ax.set_title(self.model.name)
ax.set_ylabel('residuals')
ax.legend()
return ax
@_ensureMatplotlib
def plot(self, datafmt='o', fitfmt='-', initfmt='--', yerr=None,
numpoints=None, fig=None, data_kws=None, fit_kws=None,
init_kws=None, ax_res_kws=None, ax_fit_kws=None,
fig_kws=None):
"""Plot the fit results and residuals using matplotlib.
The method will produce a matplotlib figure with both results of the
fit and the residuals plotted. If the fit model included weights,
errorbars will also be plotted.
Parameters
----------
datafmt : string, optional
matplotlib format string for data points
fitfmt : string, optional
matplotlib format string for fitted curve
initfmt : string, optional
matplotlib format string for initial conditions for the fit
yerr : ndarray, optional
array of uncertainties for data array
numpoints : int, optional
If provided, the final and initial fit curves are evaluated not
only at data points, but refined to contain `numpoints` points in
total.
fig : matplotlib.figure.Figure, optional
The figure to plot on. The default in None, which means use the
current pyplot figure or create one if there is none.
data_kws : dictionary, optional
keyword arguments passed on to the plot function for data points
fit_kws : dictionary, optional
keyword arguments passed on to the plot function for fitted curve
init_kws : dictionary, optional
keyword arguments passed on to the plot function for the initial
conditions of the fit
ax_res_kws : dictionary, optional
keyword arguments for the axes for the residuals plot
ax_fit_kws : dictionary, optional
keyword arguments for the axes for the fit plot
fig_kws : dictionary, optional
keyword arguments for a new figure, if there is one being created
Returns
-------
matplotlib.figure.Figure
Notes
----
The method combines ModelResult.plot_fit and ModelResult.plot_residuals.
If yerr is specified or if the fit model included weights, then
matplotlib.axes.Axes.errorbar is used to plot the data. If yerr is
not specified and the fit includes weights, yerr set to 1/self.weights
If `fig` is None then matplotlib.pyplot.figure(**fig_kws) is called.
See Also
--------
ModelResult.plot_fit : Plot the fit results using matplotlib.
ModelResult.plot_residuals : Plot the fit residuals using matplotlib.
"""
if data_kws is None:
data_kws = {}
if fit_kws is None:
fit_kws = {}
if init_kws is None:
init_kws = {}
if ax_res_kws is None:
ax_res_kws = {}
if ax_fit_kws is None:
ax_fit_kws = {}
if fig_kws is None:
fig_kws = {}
if len(self.model.independent_vars) != 1:
print('Fit can only be plotted if the model function has one '
'independent variable.')
return False
if not isinstance(fig, plt.Figure):
fig = plt.figure(**fig_kws)
gs = plt.GridSpec(nrows=2, ncols=1, height_ratios=[1, 4])
ax_res = fig.add_subplot(gs[0], **ax_res_kws)
ax_fit = fig.add_subplot(gs[1], sharex=ax_res, **ax_fit_kws)
self.plot_fit(ax=ax_fit, datafmt=datafmt, fitfmt=fitfmt, yerr=yerr,
initfmt=initfmt, numpoints=numpoints, data_kws=data_kws,
fit_kws=fit_kws, init_kws=init_kws, ax_kws=ax_fit_kws)
self.plot_residuals(ax=ax_res, datafmt=datafmt, yerr=yerr,
data_kws=data_kws, fit_kws=fit_kws,
ax_kws=ax_res_kws)
return fig
| apache-2.0 |
camallen/aggregation | experimental/penguins/clusterAnalysis/ibcc_priors.py | 2 | 2589 | #!/usr/bin/env python
__author__ = 'greghines'
import numpy as np
import matplotlib.pyplot as plt
import pymongo
import cPickle as pickle
import os
import math
import sys
if os.path.exists("/home/ggdhines"):
sys.path.append("/home/ggdhines/PycharmProjects/reduction/experimental/clusteringAlg")
else:
sys.path.append("/home/greg/github/reduction/experimental/clusteringAlg")
from clusterCompare import cluster_compare
if os.path.exists("/home/ggdhines"):
base_directory = "/home/ggdhines"
else:
base_directory = "/home/greg"
penguins,temp = pickle.load(open(base_directory+"/Databases/penguins_vote_.pickle","rb"))
#does this cluster have a corresponding cluster in the gold standard data?
#ie. does this cluster represent an actual penguin?
# #user penguins for first image - with 5 images
# print len(penguins[5][0])
# #user data
# print penguins[5][0][0]
# #gold standard data
# #print penguins[5][0][1]
#
# #users who annotated the first "penguin" in the first image
# print penguins[5][0][0][0][1]
# #and their corresponds points
# print penguins[5][0][0][0][0]
#have as a list not a tuple since we need the index
user_set = []
max_users = 20
#first - create a list of ALL users - so we can figure out who has annotated a "penguin" or hasn't
for image_index in range(len(penguins[max_users])):
for penguin_index in range(len(penguins[max_users][image_index][0])):
users = penguins[max_users][image_index][0][penguin_index][1]
for u in users:
if not(u in user_set):
user_set.append(u)
confusion_matrix = {u:[[0,0],[0,0]] for u in user_set}
overall_confusion_matrix = [[0,0],[0,0]]
print len(user_set)
#now actually figure out how has annotated a penguin or hasn't
for image_index in range(len(penguins[max_users])):
for penguin_index in range(len(penguins[max_users][image_index][0])):
users = penguins[max_users][image_index][0][penguin_index][1]
if len(users) >= 6:
penguin = 1
else:
penguin = 0
for user_index,u_ in enumerate(user_set):
if u_ in users:
confusion_matrix[u_][1][penguin] += 1
overall_confusion_matrix[penguin][1] += 1
else:
confusion_matrix[u_][0][penguin] += 1
overall_confusion_matrix[penguin][0] += 1
true_negative = []
true_positive = []
for cm in confusion_matrix.values():
true_negative.append(cm[0][0]/float(sum(cm[0])))
true_positive.append(cm[1][1]/float(sum(cm[1])))
print np.mean(true_negative)
print np.mean(true_positive)
| apache-2.0 |
crazyzlj/PyGeoC | setup.py | 1 | 5288 | """A setuptools based setup module for PyGeoC.
PyGeoC is short for "Python for GeoComputation"
Author: Liangjun Zhu
E-mail: zlj@lreis.ac.cn
Blog : zhulj.net
"""
# To use a consistent encoding
from codecs import open
from os import path
# Always prefer setuptools over distutils
from setuptools import setup
from setuptools.command.test import test as TestCommand
import pygeoc
here = path.abspath(path.dirname(__file__))
# Get the long description from the README file
with open(path.join(here, 'README.md'), encoding='utf-8') as f:
long_description = f.read()
# class Tox(TestCommand):
# def finalize_options(self):
# TestCommand.finalize_options(self)
# self.test_args = []
# self.test_suite = True
#
# def run_tests(self):
# # import here, cause outside the eggs aren't loaded
# import tox, sys
# errcode = tox.cmdline(self.test_args)
# sys.exit(errcode)
class PyTest(TestCommand):
def finalize_options(self):
TestCommand.finalize_options(self)
self.test_args = []
self.test_suite = True
def run_tests(self):
import pytest
import sys
errcode = pytest.main(self.test_args)
sys.exit(errcode)
setup(
name='PyGeoC',
# Versions should comply with PEP440. For a discussion on single-sourcing
# the version across setup.py and the project code, see
# https://packaging.python.org/en/latest/single_source_version.html
version=pygeoc.__version__,
description='Python for GeoComputation',
long_description='Using Python to handle GeoComputation such as hydrologic analysis'
'by gridded DEM.',
# The project's main homepage.
url=pygeoc.__url__,
# Author details
author=pygeoc.__author__,
author_email=pygeoc.__email__,
# Choose your license
license='MIT',
# See https://pypi.python.org/pypi?%3Aaction=list_classifiers
classifiers=[
# How mature is this project? Common values are
# 1 - Planning
# 2 - Pre-Alpha
# 3 - Alpha
# 4 - Beta
# 5 - Production/Stable
'Development Status :: 4 - Beta',
# Indicate who your project is intended for
'Intended Audience :: Science/Research',
'Topic :: Scientific/Engineering :: GIS',
# Pick your license as you wish (should match "license" above)
'License :: OSI Approved :: MIT License',
# Specify the Python versions you support here. In particular, ensure
# that you indicate whether you support Python 2, Python 3 or both.
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
],
# What does your project relate to?
keywords='GeoComputation utility library',
# You can just specify the packages manually here if your project is
# simple. Or you can use find_packages().
packages=['pygeoc'],
# Alternatively, if you want to distribute just a my_module.py, uncomment
# this:
# py_modules=["my_module"],
# List run-time dependencies here. These will be installed by pip when
# your project is installed. For an analysis of "install_requires" vs pip's
# requirements files see:
# https://packaging.python.org/en/latest/requirements.html
install_requires=[
# In case of incompatibility, users are encouraged to
# install these required package by themselves.
# See requirements.txt and requirements_dev.txt for more details.
# 'gdal>=1.9.0',
# 'numpy>=1.9.0',
'matplotlib',
'typing',
'future',
'six'
],
# List additional groups of dependencies here (e.g. development
# dependencies). You can install these using the following syntax,
# for example:
# $ pip install -e .[dev,test]
extras_require={'testing': ['pytest']},
# If there are data files included in your packages that need to be
# installed, specify them here. If using Python 2.6 or less, then these
# have to be included in MANIFEST.in as well.
package_data={},
# Although 'package_data' is the preferred approach, in some case you may
# need to place data files outside of your packages. See:
# http://docs.python.org/3.4/distutils/setupscript.html#installing-additional-files # noqa
# In this case, 'data_file' will be installed into '<sys.prefix>/my_data'
# data_files=[('my_data', ['data/data_file'])],
data_files=[],
tests_require=['pytest'],
# cmdclass={'test': Tox},
cmdclass={'test': PyTest},
# To provide executable scripts, use entry points in preference to the
# "scripts" keyword. Entry points provide cross-platform support and allow
# pip to create the appropriate form of executable for the target platform.
entry_points={
'console_scripts': [
'PyGeoC=PyGeoC:main',
],
},
)
| mit |
MTG/sms-tools | lectures/08-Sound-transformations/plots-code/hps-morph.py | 2 | 2709 | # function for doing a morph between two sounds using the hpsModel
import numpy as np
import matplotlib.pyplot as plt
from scipy.signal import get_window
import sys, os
sys.path.append(os.path.join(os.path.dirname(os.path.realpath(__file__)), '../../../software/models/'))
sys.path.append(os.path.join(os.path.dirname(os.path.realpath(__file__)), '../../../software/transformations/'))
import hpsModel as HPS
import hpsTransformations as HPST
import harmonicTransformations as HT
import utilFunctions as UF
inputFile1='../../../sounds/violin-B3.wav'
window1='blackman'
M1=1001
N1=1024
t1=-100
minSineDur1=0.05
nH=60
minf01=200
maxf01=300
f0et1=10
harmDevSlope1=0.01
stocf=0.1
inputFile2='../../../sounds/soprano-E4.wav'
window2='blackman'
M2=901
N2=1024
t2=-100
minSineDur2=0.05
minf02=250
maxf02=500
f0et2=10
harmDevSlope2=0.01
Ns = 512
H = 128
(fs1, x1) = UF.wavread(inputFile1)
(fs2, x2) = UF.wavread(inputFile2)
w1 = get_window(window1, M1)
w2 = get_window(window2, M2)
hfreq1, hmag1, hphase1, stocEnv1 = HPS.hpsModelAnal(x1, fs1, w1, N1, H, t1, nH, minf01, maxf01, f0et1, harmDevSlope1, minSineDur1, Ns, stocf)
hfreq2, hmag2, hphase2, stocEnv2 = HPS.hpsModelAnal(x2, fs2, w2, N2, H, t2, nH, minf02, maxf02, f0et2, harmDevSlope2, minSineDur2, Ns, stocf)
hfreqIntp = np.array([0, .5, 1, .5])
hmagIntp = np.array([0, .5, 1, .5])
stocIntp = np.array([0, .5, 1, .5])
yhfreq, yhmag, ystocEnv = HPST.hpsMorph(hfreq1, hmag1, stocEnv1, hfreq2, hmag2, stocEnv2, hfreqIntp, hmagIntp, stocIntp)
y, yh, yst = HPS.hpsModelSynth(yhfreq, yhmag, np.array([]), ystocEnv, Ns, H, fs1)
UF.wavwrite(y,fs1, 'hps-morph.wav')
plt.figure(figsize=(12, 9))
frame = 200
plt.subplot(2,3,1)
plt.vlines(hfreq1[frame,:], -100, hmag1[frame,:], lw=1.5, color='b')
plt.axis([0,5000, -80, -15])
plt.title('x1: harmonics')
plt.subplot(2,3,2)
plt.vlines(hfreq2[frame,:], -100, hmag2[frame,:], lw=1.5, color='r')
plt.axis([0,5000, -80, -15])
plt.title('x2: harmonics')
plt.subplot(2,3,3)
yhfreq[frame,:][yhfreq[frame,:]==0] = np.nan
plt.vlines(yhfreq[frame,:], -100, yhmag[frame,:], lw=1.5, color='c')
plt.axis([0,5000, -80, -15])
plt.title('y: harmonics')
stocaxis = (fs1/2)*np.arange(stocEnv1[0,:].size)/float(stocEnv1[0,:].size)
plt.subplot(2,3,4)
plt.plot(stocaxis, stocEnv1[frame,:], lw=1.5, marker='x', color='b')
plt.axis([0,20000, -73, -27])
plt.title('x1: stochastic')
plt.subplot(2,3,5)
plt.plot(stocaxis, stocEnv2[frame,:], lw=1.5, marker='x', color='r')
plt.axis([0,20000, -73, -27])
plt.title('x2: stochastic')
plt.subplot(2,3,6)
plt.plot(stocaxis, ystocEnv[frame,:], lw=1.5, marker='x', color='c')
plt.axis([0,20000, -73, -27])
plt.title('y: stochastic')
plt.tight_layout()
plt.savefig('hps-morph.png')
plt.show()
| agpl-3.0 |
kazemakase/scikit-learn | sklearn/datasets/mlcomp.py | 289 | 3855 | # Copyright (c) 2010 Olivier Grisel <olivier.grisel@ensta.org>
# License: BSD 3 clause
"""Glue code to load http://mlcomp.org data as a scikit.learn dataset"""
import os
import numbers
from sklearn.datasets.base import load_files
def _load_document_classification(dataset_path, metadata, set_=None, **kwargs):
if set_ is not None:
dataset_path = os.path.join(dataset_path, set_)
return load_files(dataset_path, metadata.get('description'), **kwargs)
LOADERS = {
'DocumentClassification': _load_document_classification,
# TODO: implement the remaining domain formats
}
def load_mlcomp(name_or_id, set_="raw", mlcomp_root=None, **kwargs):
"""Load a datasets as downloaded from http://mlcomp.org
Parameters
----------
name_or_id : the integer id or the string name metadata of the MLComp
dataset to load
set_ : select the portion to load: 'train', 'test' or 'raw'
mlcomp_root : the filesystem path to the root folder where MLComp datasets
are stored, if mlcomp_root is None, the MLCOMP_DATASETS_HOME
environment variable is looked up instead.
**kwargs : domain specific kwargs to be passed to the dataset loader.
Read more in the :ref:`User Guide <datasets>`.
Returns
-------
data : Bunch
Dictionary-like object, the interesting attributes are:
'filenames', the files holding the raw to learn, 'target', the
classification labels (integer index), 'target_names',
the meaning of the labels, and 'DESCR', the full description of the
dataset.
Note on the lookup process: depending on the type of name_or_id,
will choose between integer id lookup or metadata name lookup by
looking at the unzipped archives and metadata file.
TODO: implement zip dataset loading too
"""
if mlcomp_root is None:
try:
mlcomp_root = os.environ['MLCOMP_DATASETS_HOME']
except KeyError:
raise ValueError("MLCOMP_DATASETS_HOME env variable is undefined")
mlcomp_root = os.path.expanduser(mlcomp_root)
mlcomp_root = os.path.abspath(mlcomp_root)
mlcomp_root = os.path.normpath(mlcomp_root)
if not os.path.exists(mlcomp_root):
raise ValueError("Could not find folder: " + mlcomp_root)
# dataset lookup
if isinstance(name_or_id, numbers.Integral):
# id lookup
dataset_path = os.path.join(mlcomp_root, str(name_or_id))
else:
# assume name based lookup
dataset_path = None
expected_name_line = "name: " + name_or_id
for dataset in os.listdir(mlcomp_root):
metadata_file = os.path.join(mlcomp_root, dataset, 'metadata')
if not os.path.exists(metadata_file):
continue
with open(metadata_file) as f:
for line in f:
if line.strip() == expected_name_line:
dataset_path = os.path.join(mlcomp_root, dataset)
break
if dataset_path is None:
raise ValueError("Could not find dataset with metadata line: " +
expected_name_line)
# loading the dataset metadata
metadata = dict()
metadata_file = os.path.join(dataset_path, 'metadata')
if not os.path.exists(metadata_file):
raise ValueError(dataset_path + ' is not a valid MLComp dataset')
with open(metadata_file) as f:
for line in f:
if ":" in line:
key, value = line.split(":", 1)
metadata[key.strip()] = value.strip()
format = metadata.get('format', 'unknow')
loader = LOADERS.get(format)
if loader is None:
raise ValueError("No loader implemented for format: " + format)
return loader(dataset_path, metadata, set_=set_, **kwargs)
| bsd-3-clause |
jswoboda/GeoDataPython | GeoData/utilityfuncs.py | 1 | 25206 | #!/usr/bin/env python
"""
Note: "cartesian" column order is x,y,z in the Nx3 matrix
This module holds a number of functions that can be used to read data into
GeoData objects. All of the function s have the following outputs
(data,coordnames,dataloc,sensorloc,times)
Outputs
data - A dictionary with keys that are the names of the data. The values
are numpy arrays. If the data comes from satilites the arrays are one
dimensional. If the data comes from sensors that are not moving the
values are NlxNt numpy arrays.
coordnames - The type of coordinate system.
dataloc - A Nlx3 numpy array of the location of the measurement.
sensorloc - The location of the sensor in WGS84 coordinates.
times - A Ntx2 numpy array of times. The first element is start of the
measurement the second element is the end of the measurement.
@author: John Swoboda
"""
from __future__ import division,absolute_import
from six import string_types,integer_types
import logging
import numpy as np
import tables as tb
import h5py
import posixpath
import scipy as sp
from astropy.io import fits
from pandas import DataFrame
from datetime import datetime
from dateutil.parser import parse
from pytz import UTC
#
from . import CoordTransforms as CT
from . import Path
USEPANDAS = True #20x speedup vs CPython
VARNAMES = ['data','coordnames','dataloc','sensorloc','times']
EPOCH = datetime(1970,1,1,0,0,0,tzinfo=UTC)
def readMad_hdf5 (filename, paramstr): #timelims=None
"""@author: Michael Hirsch / Anna Stuhlmacher
madrigal h5 read in function for the python implementation of GeoData for Madrigal Sondrestrom data
Input:
filename path to hdf5 file
list of parameters to look at written as strings
Returns:
dictionary with keys are the Madrigal parameter string, the value is an array
rows are unique data locations (data_loc) = (rng, azm, el1)
columns are unique times
Here we use Pandas DataFrames internally to speed the reading process by 20+ times,
while still passing out Numpy arrays
"""
h5fn = Path(filename).expanduser()
#%% read hdf5 file
with h5py.File(str(h5fn), "r", libver='latest') as f:
lat,lon,sensor_alt = (f['/Metadata/Experiment Parameters'][7][1],
f['/Metadata/Experiment Parameters'][8][1],
f['/Metadata/Experiment Parameters'][9][1])
D = f['/Data/Table Layout']
filt_data = DataFrame(columns=['range','az','el','ut1','ut2'])
try:
filt_data['range'] = D['gdalt']
except ValueError:
filt_data['range'] = D['range']
filt_data['az'] = D['azm']
filt_data['el'] = D['elm']
filt_data['ut1'] = D['ut1_unix']
filt_data['ut2'] = D['ut2_unix']
for p in paramstr:
if not p in D.dtype.names:
logging.warning('{} is not a valid parameter name.'.format(p))
continue
filt_data[p] = D[p]
#%% SELECT
filt_data.dropna(axis=0,how='any',subset=['range','az','el'],inplace=True)
#create list of unique data location lists
dataloc = filt_data[['range','az','el']].drop_duplicates()
uniq_times = filt_data['ut1'].drop_duplicates().values
#initialize and fill data dictionary with parameter arrays
#notnan = filt_data.index
if not USEPANDAS:
all_loc=filt_data[['range','az','el']].values.tolist()
all_times = filt_data['ut1'].values.tolist()
dataloclist = dataloc.values.tolist()
uniq_timeslist = uniq_times = filt_data['ut1'].drop_duplicates().values.tolist()
maxcols = len(uniq_times); maxrows = len(dataloc)
data = {}
for p in paramstr:
if USEPANDAS:
# example of doing via numpy
# filt_data has already been filtered for time and location with the isr parameter(s) riding along.
#Just reshape it!
#NOTE: take off the .values to pass the DataFrame
d1_in = filt_data[p].reshape((dataloc.shape[0], uniq_times.shape[0]), order='F')
data[p] = DataFrame(data=d1_in, columns=uniq_times).values
else:
#example with CPython
vec = filt_data[p].values #list of parameter pulled from all data
arr = np.empty([maxrows,maxcols]) #converting the tempdata list into array form
for t in range(vec.size):
#row
row = dataloclist.index(all_loc[t])
#column-time
col = uniq_timeslist.index(all_times[t])
arr[row][col] = vec[t]
data[p] = arr
#example of doing by MultiIndex
# data[p]= DataFrame(index=[dataloc['range'],dataloc['az'],dataloc['el']],
# columns=uniq_times)
# for i,qq in filt_data.iterrows():
# ci = qq[['range','az','el']].values
# data[p].loc[ci[0],ci[1],ci[2]][qq['ut1'].astype(int)] = qq[p]
#get the sensor location (lat, long, rng)
sensorloc = np.array([lat,lon,sensor_alt], dtype=float) #they are bytes so we NEED float!
coordnames = 'Spherical'
#NOTE temporarily passing dataloc as Numpy array till rest of program is updated to Pandas
return (data,coordnames,dataloc.values,sensorloc,uniq_times)
def readSRI_h5(fn,params,timelims = None):
assert isinstance(params,(tuple,list))
h5fn = Path(fn).expanduser()
'''This will read the SRI formated h5 files for RISR and PFISR.'''
coordnames = 'Spherical'
# Set up the dictionary to find the data
pathdict = {'Ne':('/FittedParams/Ne', None),
'dNe':('/FittedParams/Ne',None),
'Vi':('/FittedParams/Fits', (0,3)),
'dVi':('/FittedParams/Errors',(0,3)),
'Ti':('/FittedParams/Fits', (0,1)),
'dTi':('/FittedParams/Errors',(0,1)),
'Te':('/FittedParams/Fits', (-1,1)),
'Ti':('/FittedParams/Errors',(-1,1))}
with h5py.File(str(h5fn),'r',libver='latest') as f:
# Get the times and time lims
times = f['/Time/UnixTime'].value
# get the sensor location
sensorloc = np.array([f['/Site/Latitude'].value,
f['/Site/Longitude'].value,
f['/Site/Altitude'].value])
# Get the locations of the data points
rng = f['/FittedParams/Range'].value / 1e3
angles = f['/BeamCodes'][:,1:3]
nt = times.shape[0]
if timelims is not None:
times = times[(times[:,0]>= timelims[0]) & (times[:,1]<timelims[1]) ,:]
nt = times.shape[0]
# allaz, allel corresponds to rng.ravel()
allaz = np.tile(angles[:,0],rng.shape[1])
allel = np.tile(angles[:,1],rng.shape[1])
dataloc =np.vstack((rng.ravel(),allaz,allel)).T
# Read in the data
data = {}
with h5py.File(str(h5fn),'r',libver='latest') as f:
for istr in params:
if not istr in pathdict.keys(): #list() NOT needed
logging.error('{} is not a valid parameter name.'.format(istr))
continue
curpath = pathdict[istr][0]
curint = pathdict[istr][-1]
if curint is None: #3-D data
tempdata = f[curpath]
else: #5-D data -> 3-D data
tempdata = f[curpath][:,:,:,curint[0],curint[1]]
data[istr] = np.array([tempdata[iT,:,:].ravel() for iT in range(nt)]).T
# remove nans from SRI file
nanlog = sp.any(sp.isnan(dataloc),1)
keeplog = sp.logical_not(nanlog)
dataloc = dataloc[keeplog]
for ikey in data.keys():
data[ikey]= data[ikey][keeplog]
return (data,coordnames,dataloc,sensorloc,times)
def read_h5_main(filename):
'''
Read in the structured h5 file.
use caution with this function -- indexing dicts is less safe
because the index order of dicts is not deterministic.
'''
h5fn = Path(filename).expanduser()
with tb.openFile(str(h5fn)) as f:
output={}
# Read in all of the info from the h5 file and put it in a dictionary.
for group in f.walkGroups(posixpath.sep):
output[group._v_pathname]={}
for array in f.listNodes(group, classname = 'Array'):
output[group._v_pathname][array.name]=array.read()
# find the base paOMTIdata.h5ths which could be dictionaries or the base directory
# outarr = [pathparts(ipath) for ipath in output.keys() if len(pathparts(ipath))>0]
outlist = {}
basekeys = output[posixpath.sep].keys()
# Determine assign the entries to each entry in the list of variables.
# Have to do this in order because of the input being a list instead of a dictionary
#dictionary
for ipath in output:
if ipath[1:] in VARNAMES:
outlist[ipath[1:]] = output[ipath]
continue
# for non-dictionary
for k in basekeys:
if k in VARNAMES:
# Have to check for MATLAB type strings, for some reason python does not like to register them as strings
curdata = output['/'][k]
if isinstance(curdata,np.ndarray):
if curdata.dtype.kind=='S':
curdata=str(curdata)
outlist[k] = curdata
newout = [outlist[x] for x in VARNAMES]
return newout
def pathparts(path):
'''This will return all of the parts of a posix path in a list. '''
components = []
while True:
(path,tail) = posixpath.split(path)
if tail == "":
components.reverse()
return components
components.append(tail)
def readOMTI(filename, paramstr):
"""
The data paths are known a priori, so read directly ~10% faster than pytables
"""
h5fn = Path(filename).expanduser()
with h5py.File(str(h5fn),'r',libver='latest') as f:
optical = {'optical':f['data/optical'].value} #for legacy API compatibility
dataloc = CT.enu2cartisian(f['dataloc'].value)
coordnames = 'Cartesian'
sensorloc = f['sensorloc'].value.squeeze()
times = f['times'].value
return optical, coordnames, dataloc, sensorloc, times
def readIono(iono,coordtype=None):
""" @author:John Swoboda
This function will bring in instances of the IonoContainer class into GeoData.
This is using the set up from the RadarDataSim codebase"""
pnames = iono.Param_Names
Param_List = iono.Param_List
(nloc,nt) = Param_List.shape[:2]
if type(pnames) == sp.ndarray:
if pnames.ndim>1:
ionkeys = pnames.flatten()
Param_List = Param_List.reshape(nloc,nt,len(ionkeys))
else:
ionkeys=pnames
else:
ionkeys=pnames
paramdict = {ikeys:Param_List[:,:,ikeyn] for ikeyn, ikeys in enumerate(ionkeys)}
if 'Ti' not in ionkeys:
Nis = {}
Tis = {}
# Add Ti
for ikey in ionkeys:
if 'Ti_' ==ikey[:3]:
Tis[ikey[3:]] = paramdict[ikey]
elif 'Ni_' ==ikey[:3]:
Nis[ikey[3:]] = paramdict[ikey]
Nisum = sp.zeros((nloc,nt),dtype=Param_List.dtype)
Ti = sp.zeros_like(Nisum)
for ikey in Tis.keys():
Ti =Tis[ikey]*Nis[ikey] +Ti
Nisum = Nis[ikey]+Nisum
if len(Ti)!=0:
paramdict['Ti'] = Ti/Nisum
# Get line of sight velocity
if not 'Vi' in paramdict.keys():
paramdict['Vi'] = iono.getDoppler()
if coordtype is None:
if iono.Coord_Vecs == ['r','theta','phi']:
coordnames = 'Spherical'
coords = CT.cartisian2Sphereical(iono.Cart_Coords)
elif iono.Coord_Vecs == ['x','y','z']:
coordnames = 'Cartesian'
coords = iono.Cart_Coords
elif coordtype.lower()=='cartesian':
coordnames = 'Cartesian'
coords = iono.Cart_Coords
elif coordtype.lower() == 'spherical':
coordnames = 'Spherical'
coords = CT.cartisian2Sphereical(iono.Cart_Coords)
return (paramdict,coordnames,coords,np.array(iono.Sensor_loc),iono.Time_Vector)
#data, coordnames, dataloc, sensorloc, times = readMad_hdf5('/Users/anna/Research/Ionosphere/2008WorldDaysPDB/son081001g.001.hdf5', ['ti', 'dti', 'nel'])
def readAllskyFITS(flist,azelfn,heightkm,timelims=[-sp.infty,sp.infty]):
""" :author: Michael Hirsch, Greg Starr
For example, this works with Poker Flat DASC all-sky, FITS data available from:
https://amisr.asf.alaska.edu/PKR/DASC/RAW/
This function will read a FITS file into the proper GeoData variables.
inputs:
------
flist - A list of Fits files that will be read in.
azelfn - A tuple of file names for az,el map files
heightkm - The height the data will be projected on to in km
timelims - A list of time limits in POSIX, the first element is the lower
limit, the second is the upper limit.
"""
azelfn = [Path(f).expanduser() for f in azelfn]
if isinstance(flist,string_types):
flist=[flist]
assert isinstance(flist,(list,tuple)) and len(flist)>0, 'I did not find any image files to read'
if azelfn is not None:
assert isinstance(heightkm,(integer_types,float)), 'specify one altitude'
assert isinstance(azelfn,(tuple,list)) and len(azelfn)==2, 'You must specify BOTH of the az/el files'
#%% priming read
with fits.open(str(flist[0]),mode='readonly') as h:
img = h[0].data
sensorloc = np.array([h[0].header['GLAT'], h[0].header['GLON'], 0.]) #TODO real sensor altitude in km
if isinstance(timelims[0],datetime):
timelims = [(t-EPOCH).total_seconds() for t in timelims]
#%% search through the times to see if anything is between the limits
times =[]
flist2 = []
for f in flist:
try: #KEEP THIS try
with fits.open(str(f),mode='readonly') as h:
expstart_dt = parse(h[0].header['OBSDATE'] + ' ' + h[0].header['OBSSTART']+'Z') #implied UTC
expstart_unix = (expstart_dt - EPOCH).total_seconds()
if (expstart_unix>=timelims[0]) & (expstart_unix<=timelims[1]):
times.append([expstart_unix,expstart_unix + h[0].header['EXPTIME']])
flist2.append(f)
except OSError as e:
logging.info('trouble reading time from {} {}'.format(f,e)) # so many corrupted files, we opt for INFO instead of WARNING
times = np.array(times)
#%% read in the data that is in between the time limits
img = np.empty((img.size,len(flist2)),dtype=img.dtype) #len(flist2) == len(times)
iok = np.zeros(len(flist2)).astype(bool)
for i,f in enumerate(flist2):
try:
with fits.open(str(f),mode='readonly') as h:
img[:,i] = np.rot90(h[0].data,1).ravel()
iok[i] = True
if not(i % 200) and i>0:
print('{}/{} FITS allsky read'.format(i+1,len(flist2)))
except OSError as e:
logging.error('trouble reading images from {} {}'.format(f,e))
#%% keep only good times
img = img[:,iok]
times = times[iok,:]
#%%
coordnames = "spherical"
if azelfn:
with fits.open(str(azelfn[0]),mode='readonly') as h:
az = h[0].data
with fits.open(str(azelfn[1]),mode='readonly') as h:
el = h[0].data
#%% Get rid of bad data
grad_thresh = 15.
(Fx,Fy) = np.gradient(az)
bad_datalog = np.hypot(Fx,Fy)>grad_thresh
zerodata = bad_datalog | ((az==0.) & (el==0.))
keepdata = ~(zerodata.ravel())
optical = {'image':img[keepdata]}
elfl = el.ravel()[keepdata]
sinel = sp.sin(np.radians(elfl))
dataloc = np.empty((keepdata.sum(),3))
dataloc[:,0] = sp.ones_like(sinel)*heightkm/sinel #ALITUDE
dataloc[:,1] = az.ravel()[keepdata] # AZIMUTH
dataloc[:,2] = el.ravel()[keepdata] # ELEVATION
else: # external program
az=el=dataloc=None
optical = {'image':img}
return optical,coordnames,dataloc,sensorloc,times
def readNeoCMOS(imgfn, azelfn, heightkm=None,treq=None):
"""
treq is pair or vector of UT1 unix epoch times to load--often file is so large we can't load all frames into RAM.
assumes that /rawimg is a 3-D array Nframe x Ny x Nx
"""
#assert isinstance(heightkm,(integer_types,float))
imgfn = Path(imgfn).expanduser()
azelfn = Path(azelfn).expanduser()
#%% load data
with h5py.File(str(azelfn),'r',libver='latest') as f:
az = f['/az'].value
el = f['/el'].value
with h5py.File(str(imgfn),'r',libver='latest') as f:
times = f['/ut1_unix'].value
sensorloc = f['/sensorloc'].value
if sensorloc.dtype.fields is not None: #recarray
sensorloc = sensorloc.view((float, len(sensorloc.dtype.names))).squeeze()
npix = np.prod(f['/rawimg'].shape[1:]) #number of pixels in one image
dataloc = np.empty((npix,3))
if treq is not None:
# note float() casts datetime64 to unix epoch for 'ms'
if isinstance(treq[0],np.datetime64):
treq = treq.astype(float)
elif isinstance(treq[0],datetime):
treq = np.array([(t-EPOCH).total_seconds() for t in treq])
mask = (treq[0] <= times) & (times <= treq[-1])
else: #load all
mask = np.ones(f['/rawimg'].shape[0]).astype(bool)
if mask.sum()*npix*2 > 1e9: # RAM
logging.warning('trying to load {:.1f} GB of image data, your program may crash'.format(mask.sum()*npix*2/1e9))
assert mask.sum()>0,'no times in {} within specified times.'.format(imgfn)
imgs = f['/rawimg'][mask,...]
#%% plate scale
if f['/params']['transpose']:
imgs = imgs.transpose(0,2,1)
az = az.T
el = el.T
if f['/params']['rotccw']: #NOT isinstance integer_types!
imgs = np.rot90(imgs.transpose(1,2,0),k=f['/params']['rotccw']).transpose(2,0,1)
az = np.rot90(az,k=f['/params']['rotccw'])
el = np.rot90(el,k=f['/params']['rotccw'])
if f['/params']['fliplr']:
imgs = np.fliplr(imgs)
az = np.fliplr(az)
el = np.fliplr(el)
if f['/params']['flipud']:
imgs = np.flipud(imgs.transpose(1,2,0)).transpose(2,0,1)
az = np.flipud(az)
el = np.flipud(el)
optical = {'optical':imgs}
coordnames = 'spherical'
dataloc[:,0] = heightkm
dataloc[:,1] = az.ravel()
dataloc[:,2] = el.ravel()
return optical, coordnames, dataloc, sensorloc, times[mask]
def readAVI(fn,fwaem):
"""
caution: this was for a one-off test. Needs a bit of touch-up to be generalized to all files.
"""
import cv2
vid = cv2.VideoCapture(fn)
width = vid.get(3)
height = vid.get(4)
fps = vid.get(5)
fcount = vid.get(7)
#data
data=np.zeros((width*height,fcount))
while 1:
op,frame = vid.read()
if not op:
break
data[:,vid.get(1)]=frame.flatten()
data={'image':data}
#coordnames
coordnames="spherical"
#dataloc
dataloc=np.zeros((width*height,3))
mapping = sp.io.loadmat(fwaem)
dataloc[:,2]=mapping['el'].flatten()
dataloc[:,1]=mapping['az'].flatten()
dataloc[:,0]=120/np.cos(90-mapping['el'].flatten())
#sensorloc
sensorloc=np.array([65,-148,0])
#times
times=np.zeros((fcount+1,2))
begin = (datetime(2007,3,23,11,20,5)-datetime(1970,1,1,0,0,0)).total_seconds()
end = begin+fcount/fps
times[:,0]=np.arange(begin,end,1/fps)
times[:,1]=np.arange(begin+(1/fps),end+(1/fps),1/fps)
return data,coordnames,dataloc,sensorloc,times
vid = cv2.VideoCapture(fn)
width = vid.get(3)
height = vid.get(4)
fps = vid.get(5)
fcount = vid.get(7)
#data
data=np.zeros((width*height,fcount))
while 1:
op,frame = vid.read()
if not op:
break
data[:,vid.get(1)]=frame.flatten()
data={'image':data}
#coordnames
coordnames="spherical"
#dataloc
dataloc=np.zeros((width*height,3))
mapping = sp.io.loadmat(fwaem)
dataloc[:,2]=mapping['el'].flatten()
dataloc[:,1]=mapping['az'].flatten()
dataloc[:,0]=120/np.cos(90-mapping['el'].flatten())
#sensorloc
sensorloc=np.array([65,-148,0])
#times
times=np.zeros((fcount+1,2))
begin = (datetime(2007,3,23,11,20,5)-datetime(1970,1,1,0,0,0)).total_seconds()
end = begin+fcount/fps
times[:,0]=np.arange(begin,end,1/fps)
times[:,1]=np.arange(begin+(1/fps),end+(1/fps),1/fps)
return data,coordnames,dataloc,sensorloc,times
#%% Mahali
def readIonofiles(filename):
"""iono file format
1) time (as float day of year 0.0 - 366.0)
2) year
3) rec. latitude
4) rec. longitude
5) line-of-sight tec (TECu)
6) error in line-of-sight tec (TECu)
7) vertical tec (TECu)
8) azimuth to satellite
9) elevation to satellite
10) mapping function (line of sight / vertical)
11) pierce point latitude (350 km)
12) pierce point longitude (350 km)
13) satellite number (1-32)
14) site (4 char)
15) recBias (TECu)
16) recBiasErr(TECu)
#%f %f %f %f %f %f %f %f %f %f %f %f %f %s %s %s'
"""
# fd = open(filename,'r')
# data = np.loadtxt(fd,
# dtype={'names': ('ToY', 'year', 'rlat', 'rlong', 'TEC', 'nTEC','vTEC','az','el','mf','plat','plon','sat','site','rbias','nrbias'),
# 'formats': ('float', 'float','float','float','float','float','float','float','float','float','float','float','float','S4', 'float','float')})
# fd.close()
data = np.genfromtxt(filename).T #NOTE this takes a long time, new data uses HDF5
#%% Get in GeoData format
doy = data[0]
year=data[1].astype(int)
if (year==year[1]).all():
unixyear =(datetime(year[0],1,1,0,0,0,tzinfo=UTC) - EPOCH).total_seconds()
uttime = unixyear+24*3600*sp.column_stack((doy,doy+1./24./60.)) # Making the difference in time to be a minute
else:
(y_u,y_iv) = np.unique(year,return_inverse=True)
unixyearu = sp.array([(datetime(iy,1,1,0,0,0,tzinfo=UTC) - EPOCH).total_seconds() for iy in y_u])
unixyear = unixyearu[y_iv]
uttime = unixyear+24*3600*sp.column_stack((doy,doy+1))
reclat = data[2]
reclong = data[3]
TEC = data[4]
nTEC = data[5]
vTEC = data[6]
az2sat = data[7]
el2sat = data[8]
#mapfunc = data[9]
piercelat = data[10]
piercelong = data[11]
satnum= data[12]
# site = data[13]
recBias = data[14]
nrecBias = data[15]
data = {'TEC':TEC,'nTEC':nTEC,'vTEC':vTEC,'recBias':recBias,'nrecBias':nrecBias,'satnum':satnum,'az2sat':az2sat,'el2sat':el2sat,
'rlat':reclat,'rlong':reclong}
coordnames = 'WGS84'
sensorloc = sp.nan*sp.ones(3)
dataloc = sp.column_stack((piercelat,piercelong,350e3*sp.ones_like(piercelat)))
return (data,coordnames,dataloc,sensorloc,uttime)
def readMahalih5(filename,des_site):
""" This function will read the mahali GPS data into a GeoData data structure.
The user only has to give a filename and name of the desired site.
Input
filename - A string that holds the file name.
des_site - The site name. Should be listed in the h5 file in the
table sites.
"""
h5fn = Path(filename).expanduser()
with h5py.File(str(h5fn), "r", libver='latest') as f:
despnts = sp.where(f['data']['site']==des_site)[0]
# TODO: hard coded for now
doy = doy= f['data']['time'][despnts]
year = 2015*sp.ones_like(doy,dtype=int)
TEC = f['data']['los_tec'][despnts]
nTEC = f['data']['err_los_tec'][despnts]
vTEC = f['data']['vtec'][despnts]
az2sat = f['data']['az'][despnts]
el2sat = f['data']['az'][despnts]
piercelat = f['data']['pplat'][despnts]
piercelong = f['data']['pplon'][despnts]
satnum= f['data']['prn'][despnts]
recBias = f['data']['rec_bias'][despnts]
nrecBias = f['data']['err_rec_bias'][despnts]
# Make the integration time on the order of 15 seconds.
if (year==year[1]).all():
unixyear =(datetime(year[0],1,1,0,0,0,tzinfo=UTC) - EPOCH).total_seconds()
uttime = unixyear + sp.round_(24*3600*sp.column_stack((doy,doy+15./24./3600.))) # Making the difference in time to be a minute
else:
(y_u,y_iv) = np.unique(year,return_inverse=True)
unixyearu = sp.array([(datetime(iy,1,1,0,0,0,tzinfo=UTC) - EPOCH).total_seconds() for iy in y_u])
unixyear = unixyearu[y_iv]
uttime = unixyear + 24*3600*sp.column_stack((doy,doy+15./24./3600.))
data = {'TEC':TEC,'nTEC':nTEC,'vTEC':vTEC,'recBias':recBias,'nrecBias':nrecBias,'satnum':satnum,'az2sat':az2sat,'el2sat':el2sat}
coordnames = 'WGS84'
sensorloc = sp.nan*sp.ones(3)
dataloc = sp.column_stack((piercelat,piercelong, 350e3*sp.ones_like(piercelat)))
return (data,coordnames,dataloc,sensorloc,uttime)
| mit |
michrawson/nyu_ml_lectures | notebooks/figures/plot_digits_datasets.py | 19 | 2750 | # Taken from example in scikit-learn examples
# Authors: Fabian Pedregosa <fabian.pedregosa@inria.fr>
# Olivier Grisel <olivier.grisel@ensta.org>
# Mathieu Blondel <mathieu@mblondel.org>
# Gael Varoquaux
# License: BSD 3 clause (C) INRIA 2011
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import offsetbox
from sklearn import (manifold, datasets, decomposition, ensemble, lda,
random_projection)
def digits_plot():
digits = datasets.load_digits(n_class=6)
n_digits = 500
X = digits.data[:n_digits]
y = digits.target[:n_digits]
n_samples, n_features = X.shape
n_neighbors = 30
def plot_embedding(X, title=None):
x_min, x_max = np.min(X, 0), np.max(X, 0)
X = (X - x_min) / (x_max - x_min)
plt.figure()
ax = plt.subplot(111)
for i in range(X.shape[0]):
plt.text(X[i, 0], X[i, 1], str(digits.target[i]),
color=plt.cm.Set1(y[i] / 10.),
fontdict={'weight': 'bold', 'size': 9})
if hasattr(offsetbox, 'AnnotationBbox'):
# only print thumbnails with matplotlib > 1.0
shown_images = np.array([[1., 1.]]) # just something big
for i in range(X.shape[0]):
dist = np.sum((X[i] - shown_images) ** 2, 1)
if np.min(dist) < 1e5:
# don't show points that are too close
# set a high threshold to basically turn this off
continue
shown_images = np.r_[shown_images, [X[i]]]
imagebox = offsetbox.AnnotationBbox(
offsetbox.OffsetImage(digits.images[i], cmap=plt.cm.gray_r),
X[i])
ax.add_artist(imagebox)
plt.xticks([]), plt.yticks([])
if title is not None:
plt.title(title)
n_img_per_row = 10
img = np.zeros((10 * n_img_per_row, 10 * n_img_per_row))
for i in range(n_img_per_row):
ix = 10 * i + 1
for j in range(n_img_per_row):
iy = 10 * j + 1
img[ix:ix + 8, iy:iy + 8] = X[i * n_img_per_row + j].reshape((8, 8))
plt.imshow(img, cmap=plt.cm.binary)
plt.xticks([])
plt.yticks([])
plt.title('A selection from the 64-dimensional digits dataset')
print("Computing PCA projection")
pca = decomposition.PCA(n_components=2).fit(X)
X_pca = pca.transform(X)
plot_embedding(X_pca, "Principal Components projection of the digits")
plt.figure()
plt.matshow(pca.components_[0, :].reshape(8, 8), cmap="gray")
plt.axis('off')
plt.figure()
plt.matshow(pca.components_[1, :].reshape(8, 8), cmap="gray")
plt.axis('off')
plt.show()
| cc0-1.0 |
GuessWhoSamFoo/pandas | pandas/tests/indexes/multi/test_integrity.py | 1 | 9162 | # -*- coding: utf-8 -*-
import re
import numpy as np
import pytest
from pandas.compat import lrange, range
from pandas.core.dtypes.cast import construct_1d_object_array_from_listlike
import pandas as pd
from pandas import IntervalIndex, MultiIndex, RangeIndex
import pandas.util.testing as tm
def test_labels_dtypes():
# GH 8456
i = MultiIndex.from_tuples([('A', 1), ('A', 2)])
assert i.codes[0].dtype == 'int8'
assert i.codes[1].dtype == 'int8'
i = MultiIndex.from_product([['a'], range(40)])
assert i.codes[1].dtype == 'int8'
i = MultiIndex.from_product([['a'], range(400)])
assert i.codes[1].dtype == 'int16'
i = MultiIndex.from_product([['a'], range(40000)])
assert i.codes[1].dtype == 'int32'
i = pd.MultiIndex.from_product([['a'], range(1000)])
assert (i.codes[0] >= 0).all()
assert (i.codes[1] >= 0).all()
def test_values_boxed():
tuples = [(1, pd.Timestamp('2000-01-01')), (2, pd.NaT),
(3, pd.Timestamp('2000-01-03')),
(1, pd.Timestamp('2000-01-04')),
(2, pd.Timestamp('2000-01-02')),
(3, pd.Timestamp('2000-01-03'))]
result = pd.MultiIndex.from_tuples(tuples)
expected = construct_1d_object_array_from_listlike(tuples)
tm.assert_numpy_array_equal(result.values, expected)
# Check that code branches for boxed values produce identical results
tm.assert_numpy_array_equal(result.values[:4], result[:4].values)
def test_values_multiindex_datetimeindex():
# Test to ensure we hit the boxing / nobox part of MI.values
ints = np.arange(10 ** 18, 10 ** 18 + 5)
naive = pd.DatetimeIndex(ints)
# TODO(GH-24559): Remove the FutureWarning
with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
aware = pd.DatetimeIndex(ints, tz='US/Central')
idx = pd.MultiIndex.from_arrays([naive, aware])
result = idx.values
outer = pd.DatetimeIndex([x[0] for x in result])
tm.assert_index_equal(outer, naive)
inner = pd.DatetimeIndex([x[1] for x in result])
tm.assert_index_equal(inner, aware)
# n_lev > n_lab
result = idx[:2].values
outer = pd.DatetimeIndex([x[0] for x in result])
tm.assert_index_equal(outer, naive[:2])
inner = pd.DatetimeIndex([x[1] for x in result])
tm.assert_index_equal(inner, aware[:2])
def test_values_multiindex_periodindex():
# Test to ensure we hit the boxing / nobox part of MI.values
ints = np.arange(2007, 2012)
pidx = pd.PeriodIndex(ints, freq='D')
idx = pd.MultiIndex.from_arrays([ints, pidx])
result = idx.values
outer = pd.Int64Index([x[0] for x in result])
tm.assert_index_equal(outer, pd.Int64Index(ints))
inner = pd.PeriodIndex([x[1] for x in result])
tm.assert_index_equal(inner, pidx)
# n_lev > n_lab
result = idx[:2].values
outer = pd.Int64Index([x[0] for x in result])
tm.assert_index_equal(outer, pd.Int64Index(ints[:2]))
inner = pd.PeriodIndex([x[1] for x in result])
tm.assert_index_equal(inner, pidx[:2])
def test_consistency():
# need to construct an overflow
major_axis = lrange(70000)
minor_axis = lrange(10)
major_codes = np.arange(70000)
minor_codes = np.repeat(lrange(10), 7000)
# the fact that is works means it's consistent
index = MultiIndex(levels=[major_axis, minor_axis],
codes=[major_codes, minor_codes])
# inconsistent
major_codes = np.array([0, 0, 1, 1, 1, 2, 2, 3, 3])
minor_codes = np.array([0, 1, 0, 1, 1, 0, 1, 0, 1])
index = MultiIndex(levels=[major_axis, minor_axis],
codes=[major_codes, minor_codes])
assert index.is_unique is False
def test_hash_collisions():
# non-smoke test that we don't get hash collisions
index = MultiIndex.from_product([np.arange(1000), np.arange(1000)],
names=['one', 'two'])
result = index.get_indexer(index.values)
tm.assert_numpy_array_equal(result, np.arange(
len(index), dtype='intp'))
for i in [0, 1, len(index) - 2, len(index) - 1]:
result = index.get_loc(index[i])
assert result == i
def test_dims():
pass
def take_invalid_kwargs():
vals = [['A', 'B'],
[pd.Timestamp('2011-01-01'), pd.Timestamp('2011-01-02')]]
idx = pd.MultiIndex.from_product(vals, names=['str', 'dt'])
indices = [1, 2]
msg = r"take\(\) got an unexpected keyword argument 'foo'"
with pytest.raises(TypeError, match=msg):
idx.take(indices, foo=2)
msg = "the 'out' parameter is not supported"
with pytest.raises(ValueError, match=msg):
idx.take(indices, out=indices)
msg = "the 'mode' parameter is not supported"
with pytest.raises(ValueError, match=msg):
idx.take(indices, mode='clip')
def test_isna_behavior(idx):
# should not segfault GH5123
# NOTE: if MI representation changes, may make sense to allow
# isna(MI)
with pytest.raises(NotImplementedError):
pd.isna(idx)
def test_large_multiindex_error():
# GH12527
df_below_1000000 = pd.DataFrame(
1, index=pd.MultiIndex.from_product([[1, 2], range(499999)]),
columns=['dest'])
with pytest.raises(KeyError):
df_below_1000000.loc[(-1, 0), 'dest']
with pytest.raises(KeyError):
df_below_1000000.loc[(3, 0), 'dest']
df_above_1000000 = pd.DataFrame(
1, index=pd.MultiIndex.from_product([[1, 2], range(500001)]),
columns=['dest'])
with pytest.raises(KeyError):
df_above_1000000.loc[(-1, 0), 'dest']
with pytest.raises(KeyError):
df_above_1000000.loc[(3, 0), 'dest']
def test_million_record_attribute_error():
# GH 18165
r = list(range(1000000))
df = pd.DataFrame({'a': r, 'b': r},
index=pd.MultiIndex.from_tuples([(x, x) for x in r]))
msg = "'Series' object has no attribute 'foo'"
with pytest.raises(AttributeError, match=msg):
df['a'].foo()
def test_can_hold_identifiers(idx):
key = idx[0]
assert idx._can_hold_identifiers_and_holds_name(key) is True
def test_metadata_immutable(idx):
levels, codes = idx.levels, idx.codes
# shouldn't be able to set at either the top level or base level
mutable_regex = re.compile('does not support mutable operations')
with pytest.raises(TypeError, match=mutable_regex):
levels[0] = levels[0]
with pytest.raises(TypeError, match=mutable_regex):
levels[0][0] = levels[0][0]
# ditto for labels
with pytest.raises(TypeError, match=mutable_regex):
codes[0] = codes[0]
with pytest.raises(TypeError, match=mutable_regex):
codes[0][0] = codes[0][0]
# and for names
names = idx.names
with pytest.raises(TypeError, match=mutable_regex):
names[0] = names[0]
def test_level_setting_resets_attributes():
ind = pd.MultiIndex.from_arrays([
['A', 'A', 'B', 'B', 'B'], [1, 2, 1, 2, 3]
])
assert ind.is_monotonic
ind.set_levels([['A', 'B'], [1, 3, 2]], inplace=True)
# if this fails, probably didn't reset the cache correctly.
assert not ind.is_monotonic
def test_rangeindex_fallback_coercion_bug():
# GH 12893
foo = pd.DataFrame(np.arange(100).reshape((10, 10)))
bar = pd.DataFrame(np.arange(100).reshape((10, 10)))
df = pd.concat({'foo': foo.stack(), 'bar': bar.stack()}, axis=1)
df.index.names = ['fizz', 'buzz']
str(df)
expected = pd.DataFrame({'bar': np.arange(100),
'foo': np.arange(100)},
index=pd.MultiIndex.from_product(
[range(10), range(10)],
names=['fizz', 'buzz']))
tm.assert_frame_equal(df, expected, check_like=True)
result = df.index.get_level_values('fizz')
expected = pd.Int64Index(np.arange(10), name='fizz').repeat(10)
tm.assert_index_equal(result, expected)
result = df.index.get_level_values('buzz')
expected = pd.Int64Index(np.tile(np.arange(10), 10), name='buzz')
tm.assert_index_equal(result, expected)
def test_hash_error(indices):
index = indices
with pytest.raises(TypeError, match=("unhashable type: %r" %
type(index).__name__)):
hash(indices)
def test_mutability(indices):
if not len(indices):
return
pytest.raises(TypeError, indices.__setitem__, 0, indices[0])
def test_wrong_number_names(indices):
with pytest.raises(ValueError, match="^Length"):
indices.names = ["apple", "banana", "carrot"]
def test_memory_usage(idx):
result = idx.memory_usage()
if len(idx):
idx.get_loc(idx[0])
result2 = idx.memory_usage()
result3 = idx.memory_usage(deep=True)
# RangeIndex, IntervalIndex
# don't have engines
if not isinstance(idx, (RangeIndex, IntervalIndex)):
assert result2 > result
if idx.inferred_type == 'object':
assert result3 > result2
else:
# we report 0 for no-length
assert result == 0
def test_nlevels(idx):
assert idx.nlevels == 2
| bsd-3-clause |
jzt5132/scikit-learn | examples/svm/plot_svm_nonlinear.py | 268 | 1091 | """
==============
Non-linear SVM
==============
Perform binary classification using non-linear SVC
with RBF kernel. The target to predict is a XOR of the
inputs.
The color map illustrates the decision function learned by the SVC.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn import svm
xx, yy = np.meshgrid(np.linspace(-3, 3, 500),
np.linspace(-3, 3, 500))
np.random.seed(0)
X = np.random.randn(300, 2)
Y = np.logical_xor(X[:, 0] > 0, X[:, 1] > 0)
# fit the model
clf = svm.NuSVC()
clf.fit(X, Y)
# plot the decision function for each datapoint on the grid
Z = clf.decision_function(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
plt.imshow(Z, interpolation='nearest',
extent=(xx.min(), xx.max(), yy.min(), yy.max()), aspect='auto',
origin='lower', cmap=plt.cm.PuOr_r)
contours = plt.contour(xx, yy, Z, levels=[0], linewidths=2,
linetypes='--')
plt.scatter(X[:, 0], X[:, 1], s=30, c=Y, cmap=plt.cm.Paired)
plt.xticks(())
plt.yticks(())
plt.axis([-3, 3, -3, 3])
plt.show()
| bsd-3-clause |
wangjohn/wallace | wallace/predictive_models/gradient_boosting_regression.py | 1 | 1550 | from sklearn import ensemble
from wallace.predictive_models.sklearn_model import SklearnModel, TrainedSklearnModel
from wallace.parameters import ParametersGeneralValidityCheck
class GradientBoostingRegression(SklearnModel):
def train(self, dataset):
model = ensemble.GradientBoostingRegressor(learning_rate=self.get_learning_rate(), \
n_estimators=self.get_number_estimators(), \
max_depth=self.get_max_depth()
)
independent_data = self.get_independent_variable_data(dataset)
dependent_data = self.get_dependent_variable_data(dataset)
trained_regression = model.fit(independent_data, dependent_data)
return TrainedSklearnModel(self, trained_regression)
@classmethod
def validity_check(klass):
validity_check = ParametersGeneralValidityCheck()
validity_check.set_range_parameter("gradient_boosting_regression.learning_rate", 0.0, 1.0)
validity_check.set_integer_range_parameter("gradient_boosting_regression.number_estimators", 1, 1000)
validity_check.set_integer_range_parameter("gradient_boosting_regression.max_depth", 1, 100)
return validity_check
def get_number_estimators(self):
return self.parameter_set.get("gradient_boosting_regression.number_estimators")
def get_learning_rate(self):
return self.parameter_set.get("gradient_boosting_regression.learning_rate")
def get_max_depth(self):
return self.parameter_set.get("gradient_boosting_regression.max_depth")
| mit |
leesavide/pythonista-docs | Documentation/matplotlib/examples/misc/rec_groupby_demo.py | 9 | 2060 | from __future__ import print_function
import numpy as np
import matplotlib.mlab as mlab
import matplotlib.cbook as cbook
datafile = cbook.get_sample_data('aapl.csv', asfileobj=False)
print('loading', datafile)
r = mlab.csv2rec(datafile)
r.sort()
def daily_return(prices):
'an array of daily returns from price array'
g = np.zeros_like(prices)
g[1:] = (prices[1:]-prices[:-1])/prices[:-1]
return g
def volume_code(volume):
'code the continuous volume data categorically'
ind = np.searchsorted([1e5,1e6, 5e6,10e6, 1e7], volume)
return ind
# a list of (dtype_name, summary_function, output_dtype_name).
# rec_summarize will call on each function on the indicated recarray
# attribute, and the result assigned to output name in the return
# record array.
summaryfuncs = (
('date', lambda x: [thisdate.year for thisdate in x], 'years'),
('date', lambda x: [thisdate.month for thisdate in x], 'months'),
('date', lambda x: [thisdate.weekday() for thisdate in x], 'weekday'),
('adj_close', daily_return, 'dreturn'),
('volume', volume_code, 'volcode'),
)
rsum = mlab.rec_summarize(r, summaryfuncs)
# stats is a list of (dtype_name, function, output_dtype_name).
# rec_groupby will summarize the attribute identified by the
# dtype_name over the groups in the groupby list, and assign the
# result to the output_dtype_name
stats = (
('dreturn', len, 'rcnt'),
('dreturn', np.mean, 'rmean'),
('dreturn', np.median, 'rmedian'),
('dreturn', np.std, 'rsigma'),
)
# you can summarize over a single variable, like years or months
print('summary by years')
ry = mlab.rec_groupby(rsum, ('years',), stats)
print(mlab. rec2txt(ry))
print('summary by months')
rm = mlab.rec_groupby(rsum, ('months',), stats)
print(mlab.rec2txt(rm))
# or over multiple variables like years and months
print('summary by year and month')
rym = mlab.rec_groupby(rsum, ('years','months'), stats)
print(mlab.rec2txt(rym))
print('summary by volume')
rv = mlab.rec_groupby(rsum, ('volcode',), stats)
print(mlab.rec2txt(rv))
| apache-2.0 |
Keleir/glances | glances/outputs/glances_curses.py | 11 | 44601 | # -*- coding: utf-8 -*-
#
# This file is part of Glances.
#
# Copyright (C) 2015 Nicolargo <nicolas@nicolargo.com>
#
# Glances is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Glances is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""Curses interface class."""
# Import system lib
import re
import sys
# Import Glances lib
from glances.core.glances_globals import is_mac, is_windows
from glances.core.glances_logging import logger
from glances.core.glances_logs import glances_logs
from glances.core.glances_processes import glances_processes
from glances.core.glances_timer import Timer
# Import curses lib for "normal" operating system and consolelog for Windows
if not is_windows:
try:
import curses
import curses.panel
from curses.textpad import Textbox
except ImportError:
logger.critical(
"Curses module not found. Glances cannot start in standalone mode.")
sys.exit(1)
else:
from glances.outputs.glances_colorconsole import WCurseLight
curses = WCurseLight()
class _GlancesCurses(object):
"""This class manages the curses display (and key pressed).
Note: It is a private class, use GlancesCursesClient or GlancesCursesBrowser.
"""
def __init__(self, args=None):
# Init args
self.args = args
# Init windows positions
self.term_w = 80
self.term_h = 24
# Space between stats
self.space_between_column = 3
self.space_between_line = 2
# Init the curses screen
self.screen = curses.initscr()
if not self.screen:
logger.critical("Cannot init the curses library.\n")
sys.exit(1)
# Set curses options
if hasattr(curses, 'start_color'):
curses.start_color()
if hasattr(curses, 'use_default_colors'):
curses.use_default_colors()
if hasattr(curses, 'noecho'):
curses.noecho()
if hasattr(curses, 'cbreak'):
curses.cbreak()
self.set_cursor(0)
# Init colors
self.hascolors = False
if curses.has_colors() and curses.COLOR_PAIRS > 8:
self.hascolors = True
# FG color, BG color
if args.theme_white:
curses.init_pair(1, curses.COLOR_BLACK, -1)
else:
curses.init_pair(1, curses.COLOR_WHITE, -1)
curses.init_pair(2, curses.COLOR_WHITE, curses.COLOR_RED)
curses.init_pair(3, curses.COLOR_WHITE, curses.COLOR_GREEN)
curses.init_pair(4, curses.COLOR_WHITE, curses.COLOR_BLUE)
curses.init_pair(5, curses.COLOR_WHITE, curses.COLOR_MAGENTA)
curses.init_pair(6, curses.COLOR_RED, -1)
curses.init_pair(7, curses.COLOR_GREEN, -1)
curses.init_pair(8, curses.COLOR_BLUE, -1)
try:
curses.init_pair(9, curses.COLOR_MAGENTA, -1)
except Exception:
if args.theme_white:
curses.init_pair(9, curses.COLOR_BLACK, -1)
else:
curses.init_pair(9, curses.COLOR_WHITE, -1)
try:
curses.init_pair(10, curses.COLOR_CYAN, -1)
except Exception:
if args.theme_white:
curses.init_pair(10, curses.COLOR_BLACK, -1)
else:
curses.init_pair(10, curses.COLOR_WHITE, -1)
else:
self.hascolors = False
if args.disable_bold:
A_BOLD = curses.A_BOLD
else:
A_BOLD = 0
self.title_color = A_BOLD
self.title_underline_color = A_BOLD | curses.A_UNDERLINE
self.help_color = A_BOLD
if self.hascolors:
# Colors text styles
self.no_color = curses.color_pair(1)
self.default_color = curses.color_pair(3) | A_BOLD
self.nice_color = curses.color_pair(9) | A_BOLD
self.cpu_time_color = curses.color_pair(9) | A_BOLD
self.ifCAREFUL_color = curses.color_pair(4) | A_BOLD
self.ifWARNING_color = curses.color_pair(5) | A_BOLD
self.ifCRITICAL_color = curses.color_pair(2) | A_BOLD
self.default_color2 = curses.color_pair(7) | A_BOLD
self.ifCAREFUL_color2 = curses.color_pair(8) | A_BOLD
self.ifWARNING_color2 = curses.color_pair(9) | A_BOLD
self.ifCRITICAL_color2 = curses.color_pair(6) | A_BOLD
self.filter_color = curses.color_pair(10) | A_BOLD
else:
# B&W text styles
self.no_color = curses.A_NORMAL
self.default_color = curses.A_NORMAL
self.nice_color = A_BOLD
self.cpu_time_color = A_BOLD
self.ifCAREFUL_color = curses.A_UNDERLINE
self.ifWARNING_color = A_BOLD
self.ifCRITICAL_color = curses.A_REVERSE
self.default_color2 = curses.A_NORMAL
self.ifCAREFUL_color2 = curses.A_UNDERLINE
self.ifWARNING_color2 = A_BOLD
self.ifCRITICAL_color2 = curses.A_REVERSE
self.filter_color = A_BOLD
# Define the colors list (hash table) for stats
self.colors_list = {
'DEFAULT': self.no_color,
'UNDERLINE': curses.A_UNDERLINE,
'BOLD': A_BOLD,
'SORT': A_BOLD,
'OK': self.default_color2,
'FILTER': self.filter_color,
'TITLE': self.title_color,
'PROCESS': self.default_color2,
'STATUS': self.default_color2,
'NICE': self.nice_color,
'CPU_TIME': self.cpu_time_color,
'CAREFUL': self.ifCAREFUL_color2,
'WARNING': self.ifWARNING_color2,
'CRITICAL': self.ifCRITICAL_color2,
'OK_LOG': self.default_color,
'CAREFUL_LOG': self.ifCAREFUL_color,
'WARNING_LOG': self.ifWARNING_color,
'CRITICAL_LOG': self.ifCRITICAL_color
}
# Init main window
self.term_window = self.screen.subwin(0, 0)
# Init refresh time
self.__refresh_time = args.time
# Init edit filter tag
self.edit_filter = False
# Catch key pressed with non blocking mode
self.term_window.keypad(1)
self.term_window.nodelay(1)
self.pressedkey = -1
# History tag
self.reset_history_tag = False
self.history_tag = False
if args.enable_history:
logger.info('Stats history enabled with output path %s' %
args.path_history)
from glances.exports.glances_history import GlancesHistory
self.glances_history = GlancesHistory(args.path_history)
if not self.glances_history.graph_enabled():
args.enable_history = False
logger.error(
'Stats history disabled because MatPlotLib is not installed')
def set_cursor(self, value):
"""Configure the curse cursor apparence.
0: invisible
1: visible
2: very visible
"""
if hasattr(curses, 'curs_set'):
try:
curses.curs_set(value)
except Exception:
pass
def get_key(self, window):
# Catch ESC key AND numlock key (issue #163)
keycode = [0, 0]
keycode[0] = window.getch()
keycode[1] = window.getch()
if keycode != [-1, -1]:
logger.debug("Keypressed (code: %s)" % keycode)
if keycode[0] == 27 and keycode[1] != -1:
# Do not escape on specials keys
return -1
else:
return keycode[0]
def __catch_key(self, return_to_browser=False):
# Catch the pressed key
self.pressedkey = self.get_key(self.term_window)
# Actions...
if self.pressedkey == ord('\x1b') or self.pressedkey == ord('q'):
# 'ESC'|'q' > Quit
if return_to_browser:
logger.info("Stop Glances client and return to the browser")
else:
self.end()
logger.info("Stop Glances")
sys.exit(0)
elif self.pressedkey == 10:
# 'ENTER' > Edit the process filter
self.edit_filter = not self.edit_filter
elif self.pressedkey == ord('1'):
# '1' > Switch between CPU and PerCPU information
self.args.percpu = not self.args.percpu
elif self.pressedkey == ord('2'):
# '2' > Enable/disable left sidebar
self.args.disable_left_sidebar = not self.args.disable_left_sidebar
elif self.pressedkey == ord('3'):
# '3' > Enable/disable quicklook
self.args.disable_quicklook = not self.args.disable_quicklook
elif self.pressedkey == ord('/'):
# '/' > Switch between short/long name for processes
self.args.process_short_name = not self.args.process_short_name
elif self.pressedkey == ord('a'):
# 'a' > Sort processes automatically and reset to 'cpu_percent'
glances_processes.auto_sort = True
glances_processes.sort_key = 'cpu_percent'
elif self.pressedkey == ord('b'):
# 'b' > Switch between bit/s and Byte/s for network IO
# self.net_byteps_tag = not self.net_byteps_tag
self.args.byte = not self.args.byte
elif self.pressedkey == ord('c'):
# 'c' > Sort processes by CPU usage
glances_processes.auto_sort = False
glances_processes.sort_key = 'cpu_percent'
elif self.pressedkey == ord('d'):
# 'd' > Show/hide disk I/O stats
self.args.disable_diskio = not self.args.disable_diskio
elif self.pressedkey == ord('D'):
# 'D' > Show/hide Docker stats
self.args.disable_docker = not self.args.disable_docker
elif self.pressedkey == ord('e'):
# 'e' > Enable/Disable extended stats for top process
self.args.enable_process_extended = not self.args.enable_process_extended
if not self.args.enable_process_extended:
glances_processes.disable_extended()
else:
glances_processes.enable_extended()
elif self.pressedkey == ord('F'):
# 'F' > Switch between FS available and free space
self.args.fs_free_space = not self.args.fs_free_space
elif self.pressedkey == ord('f'):
# 'f' > Show/hide fs stats
self.args.disable_fs = not self.args.disable_fs
elif self.pressedkey == ord('g'):
# 'g' > History
self.history_tag = not self.history_tag
elif self.pressedkey == ord('h'):
# 'h' > Show/hide help
self.args.help_tag = not self.args.help_tag
elif self.pressedkey == ord('i'):
# 'i' > Sort processes by IO rate (not available on OS X)
glances_processes.auto_sort = False
glances_processes.sort_key = 'io_counters'
elif self.pressedkey == ord('I'):
# 'I' > Show/hide IP module
self.args.disable_ip = not self.args.disable_ip
elif self.pressedkey == ord('l'):
# 'l' > Show/hide log messages
self.args.disable_log = not self.args.disable_log
elif self.pressedkey == ord('m'):
# 'm' > Sort processes by MEM usage
glances_processes.auto_sort = False
glances_processes.sort_key = 'memory_percent'
elif self.pressedkey == ord('n'):
# 'n' > Show/hide network stats
self.args.disable_network = not self.args.disable_network
elif self.pressedkey == ord('p'):
# 'p' > Sort processes by name
glances_processes.auto_sort = False
glances_processes.sort_key = 'name'
elif self.pressedkey == ord('r'):
# 'r' > Reset history
self.reset_history_tag = not self.reset_history_tag
elif self.pressedkey == ord('R'):
# 'R' > Hide RAID plugins
self.args.disable_raid = not self.args.disable_raid
elif self.pressedkey == ord('s'):
# 's' > Show/hide sensors stats (Linux-only)
self.args.disable_sensors = not self.args.disable_sensors
elif self.pressedkey == ord('t'):
# 't' > Sort processes by TIME usage
glances_processes.auto_sort = False
glances_processes.sort_key = 'cpu_times'
elif self.pressedkey == ord('T'):
# 'T' > View network traffic as sum Rx+Tx
self.args.network_sum = not self.args.network_sum
elif self.pressedkey == ord('u'):
# 'u' > Sort processes by USER
glances_processes.auto_sort = False
glances_processes.sort_key = 'username'
elif self.pressedkey == ord('U'):
# 'U' > View cumulative network I/O (instead of bitrate)
self.args.network_cumul = not self.args.network_cumul
elif self.pressedkey == ord('w'):
# 'w' > Delete finished warning logs
glances_logs.clean()
elif self.pressedkey == ord('x'):
# 'x' > Delete finished warning and critical logs
glances_logs.clean(critical=True)
elif self.pressedkey == ord('z'):
# 'z' > Enable/Disable processes stats (count + list + monitor)
# Enable/Disable display
self.args.disable_process = not self.args.disable_process
# Enable/Disable update
if self.args.disable_process:
glances_processes.disable()
else:
glances_processes.enable()
# Return the key code
return self.pressedkey
def end(self):
"""Shutdown the curses window."""
if hasattr(curses, 'echo'):
curses.echo()
if hasattr(curses, 'nocbreak'):
curses.nocbreak()
if hasattr(curses, 'curs_set'):
try:
curses.curs_set(1)
except Exception:
pass
curses.endwin()
def init_line_column(self):
"""Init the line and column position for the curses inteface."""
self.init_line()
self.init_column()
def init_line(self):
"""Init the line position for the curses inteface."""
self.line = 0
self.next_line = 0
def init_column(self):
"""Init the column position for the curses inteface."""
self.column = 0
self.next_column = 0
def new_line(self):
"""New line in the curses interface."""
self.line = self.next_line
def new_column(self):
"""New column in the curses interface."""
self.column = self.next_column
def display(self, stats, cs_status=None):
"""Display stats on the screen.
stats: Stats database to display
cs_status:
"None": standalone or server mode
"Connected": Client is connected to a Glances server
"SNMP": Client is connected to a SNMP server
"Disconnected": Client is disconnected from the server
Return:
True if the stats have been displayed
False if the help have been displayed
"""
# Init the internal line/column for Glances Curses
self.init_line_column()
# Get the screen size
screen_x = self.screen.getmaxyx()[1]
screen_y = self.screen.getmaxyx()[0]
# No processes list in SNMP mode
if cs_status == 'SNMP':
# so... more space for others plugins
plugin_max_width = 43
else:
plugin_max_width = None
# Update the stats messages
###########################
# Update the client server status
self.args.cs_status = cs_status
stats_system = stats.get_plugin(
'system').get_stats_display(args=self.args)
stats_uptime = stats.get_plugin('uptime').get_stats_display()
if self.args.percpu:
stats_percpu = stats.get_plugin('percpu').get_stats_display()
else:
stats_cpu = stats.get_plugin('cpu').get_stats_display()
stats_load = stats.get_plugin('load').get_stats_display()
stats_mem = stats.get_plugin('mem').get_stats_display()
stats_memswap = stats.get_plugin('memswap').get_stats_display()
stats_network = stats.get_plugin('network').get_stats_display(
args=self.args, max_width=plugin_max_width)
try:
stats_ip = stats.get_plugin('ip').get_stats_display(args=self.args)
except AttributeError:
stats_ip = None
stats_diskio = stats.get_plugin(
'diskio').get_stats_display(args=self.args)
stats_fs = stats.get_plugin('fs').get_stats_display(
args=self.args, max_width=plugin_max_width)
stats_raid = stats.get_plugin('raid').get_stats_display(
args=self.args)
stats_sensors = stats.get_plugin(
'sensors').get_stats_display(args=self.args)
stats_now = stats.get_plugin('now').get_stats_display()
stats_docker = stats.get_plugin('docker').get_stats_display(
args=self.args)
stats_processcount = stats.get_plugin(
'processcount').get_stats_display(args=self.args)
stats_monitor = stats.get_plugin(
'monitor').get_stats_display(args=self.args)
stats_alert = stats.get_plugin(
'alert').get_stats_display(args=self.args)
# Adapt number of processes to the available space
max_processes_displayed = screen_y - 11 - \
self.get_stats_display_height(stats_alert) - \
self.get_stats_display_height(stats_docker)
try:
if self.args.enable_process_extended and not self.args.process_tree:
max_processes_displayed -= 4
except AttributeError:
pass
if max_processes_displayed < 0:
max_processes_displayed = 0
if (glances_processes.max_processes is None or
glances_processes.max_processes != max_processes_displayed):
logger.debug("Set number of displayed processes to {0}".format(max_processes_displayed))
glances_processes.max_processes = max_processes_displayed
stats_processlist = stats.get_plugin(
'processlist').get_stats_display(args=self.args)
# Display the stats on the curses interface
###########################################
# Help screen (on top of the other stats)
if self.args.help_tag:
# Display the stats...
self.display_plugin(
stats.get_plugin('help').get_stats_display(args=self.args))
# ... and exit
return False
# ==================================
# Display first line (system+uptime)
# ==================================
# Space between column
self.space_between_column = 0
self.new_line()
l_uptime = self.get_stats_display_width(
stats_system) + self.space_between_column + self.get_stats_display_width(stats_ip) + 3 + self.get_stats_display_width(stats_uptime)
self.display_plugin(
stats_system, display_optional=(screen_x >= l_uptime))
self.new_column()
self.display_plugin(stats_ip)
# Space between column
self.space_between_column = 3
self.new_column()
self.display_plugin(stats_uptime)
# ========================================================
# Display second line (<SUMMARY>+CPU|PERCPU+LOAD+MEM+SWAP)
# ========================================================
self.init_column()
self.new_line()
# Init quicklook
stats_quicklook = {'msgdict': []}
# Start with the mandatory stats:
# CPU | PERCPU
if self.args.percpu:
cpu_width = self.get_stats_display_width(stats_percpu)
quicklook_adapt = 114
else:
cpu_width = self.get_stats_display_width(
stats_cpu, without_option=(screen_x < 80))
quicklook_adapt = 108
l = cpu_width
# MEM & SWAP & LOAD
l += self.get_stats_display_width(stats_mem,
without_option=(screen_x < 100))
l += self.get_stats_display_width(stats_memswap)
l += self.get_stats_display_width(stats_load)
# Quicklook plugin size is dynamic
l_ql = 0
if screen_x > 126 and not self.args.disable_quicklook:
# Limit the size to be align with the process
quicklook_width = min(screen_x - quicklook_adapt, 87)
try:
stats_quicklook = stats.get_plugin(
'quicklook').get_stats_display(max_width=quicklook_width, args=self.args)
except AttributeError as e:
logger.debug("Quicklook plugin not available (%s)" % e)
else:
l_ql = self.get_stats_display_width(stats_quicklook)
# Display Quicklook
self.display_plugin(stats_quicklook)
self.new_column()
# Compute space between column
space_number = int(stats_quicklook['msgdict'] != [])
space_number += int(stats_mem['msgdict'] != [])
space_number += int(stats_memswap['msgdict'] != [])
space_number += int(stats_load['msgdict'] != [])
if space_number < 1:
space_number = 1
if screen_x > (space_number * self.space_between_column + l):
self.space_between_column = int((screen_x - l_ql - l) / space_number)
# Display others stats
if self.args.percpu:
self.display_plugin(stats_percpu)
else:
self.display_plugin(stats_cpu, display_optional=(screen_x >= 80))
self.new_column()
self.display_plugin(stats_mem, display_optional=(screen_x >= 100))
self.new_column()
self.display_plugin(stats_memswap)
self.new_column()
self.display_plugin(stats_load)
# Space between column
self.space_between_column = 3
# Backup line position
self.saved_line = self.next_line
# ==================================================================
# Display left sidebar (NETWORK+DISKIO+FS+SENSORS+Current time)
# ==================================================================
self.init_column()
if not (self.args.disable_network and self.args.disable_diskio and
self.args.disable_fs and self.args.disable_raid and
self.args.disable_sensors) and not self.args.disable_left_sidebar:
self.new_line()
self.display_plugin(stats_network)
self.new_line()
self.display_plugin(stats_diskio)
self.new_line()
self.display_plugin(stats_fs)
self.new_line()
self.display_plugin(stats_raid)
self.new_line()
self.display_plugin(stats_sensors)
self.new_line()
self.display_plugin(stats_now)
# ====================================
# Display right stats (process and co)
# ====================================
# If space available...
if screen_x > 52:
# Restore line position
self.next_line = self.saved_line
# Display right sidebar
# ((DOCKER)+PROCESS_COUNT+(MONITORED)+PROCESS_LIST+ALERT)
self.new_column()
self.new_line()
self.display_plugin(stats_docker)
self.new_line()
self.display_plugin(stats_processcount)
if glances_processes.process_filter is None and cs_status is None:
# Do not display stats monitor list if a filter exist
self.new_line()
self.display_plugin(stats_monitor)
self.new_line()
self.display_plugin(stats_processlist,
display_optional=(screen_x > 102),
display_additional=(not is_mac),
max_y=(screen_y - self.get_stats_display_height(stats_alert) - 2))
self.new_line()
self.display_plugin(stats_alert)
# History option
# Generate history graph
if self.history_tag and self.args.enable_history:
self.display_popup(
'Generate graphs history in {0}\nPlease wait...'.format(
self.glances_history.get_output_folder()))
self.display_popup(
'Generate graphs history in {0}\nDone: {1} graphs generated'.format(
self.glances_history.get_output_folder(),
self.glances_history.generate_graph(stats)))
elif self.reset_history_tag and self.args.enable_history:
self.display_popup('Reset history')
self.glances_history.reset(stats)
elif (self.history_tag or self.reset_history_tag) and not self.args.enable_history:
try:
self.glances_history.graph_enabled()
except Exception:
self.display_popup('History disabled\nEnable it using --enable-history')
else:
self.display_popup('History disabled\nPlease install matplotlib')
self.history_tag = False
self.reset_history_tag = False
# Display edit filter popup
# Only in standalone mode (cs_status is None)
if self.edit_filter and cs_status is None:
new_filter = self.display_popup(
'Process filter pattern: ', is_input=True,
input_value=glances_processes.process_filter)
glances_processes.process_filter = new_filter
elif self.edit_filter and cs_status != 'None':
self.display_popup('Process filter only available in standalone mode')
self.edit_filter = False
return True
def display_popup(self, message,
size_x=None, size_y=None,
duration=3,
is_input=False,
input_size=30,
input_value=None):
"""
Display a centered popup.
If is_input is False:
Display a centered popup with the given message during duration seconds
If size_x and size_y: set the popup size
else set it automatically
Return True if the popup could be displayed
If is_input is True:
Display a centered popup with the given message and a input field
If size_x and size_y: set the popup size
else set it automatically
Return the input string or None if the field is empty
"""
# Center the popup
sentence_list = message.split('\n')
if size_x is None:
size_x = len(max(sentence_list, key=len)) + 4
# Add space for the input field
if is_input:
size_x += input_size
if size_y is None:
size_y = len(sentence_list) + 4
screen_x = self.screen.getmaxyx()[1]
screen_y = self.screen.getmaxyx()[0]
if size_x > screen_x or size_y > screen_y:
# No size to display the popup => abord
return False
pos_x = int((screen_x - size_x) / 2)
pos_y = int((screen_y - size_y) / 2)
# Create the popup
popup = curses.newwin(size_y, size_x, pos_y, pos_x)
# Fill the popup
popup.border()
# Add the message
for y, m in enumerate(message.split('\n')):
popup.addnstr(2 + y, 2, m, len(m))
if is_input and not is_windows:
# Create a subwindow for the text field
subpop = popup.derwin(1, input_size, 2, 2 + len(m))
subpop.attron(self.colors_list['FILTER'])
# Init the field with the current value
if input_value is not None:
subpop.addnstr(0, 0, input_value, len(input_value))
# Display the popup
popup.refresh()
subpop.refresh()
# Create the textbox inside the subwindows
self.set_cursor(2)
textbox = GlancesTextbox(subpop, insert_mode=False)
textbox.edit()
self.set_cursor(0)
if textbox.gather() != '':
logger.debug(
"User enters the following process filter patern: %s" % textbox.gather())
return textbox.gather()[:-1]
else:
logger.debug("User clears the process filter patern")
return None
else:
# Display the popup
popup.refresh()
curses.napms(duration * 1000)
return True
def display_plugin(self, plugin_stats,
display_optional=True,
display_additional=True,
max_y=65535):
"""Display the plugin_stats on the screen.
If display_optional=True display the optional stats
If display_additional=True display additionnal stats
max_y do not display line > max_y
"""
# Exit if:
# - the plugin_stats message is empty
# - the display tag = False
if plugin_stats is None or not plugin_stats['msgdict'] or not plugin_stats['display']:
# Exit
return 0
# Get the screen size
screen_x = self.screen.getmaxyx()[1]
screen_y = self.screen.getmaxyx()[0]
# Set the upper/left position of the message
if plugin_stats['align'] == 'right':
# Right align (last column)
display_x = screen_x - self.get_stats_display_width(plugin_stats)
else:
display_x = self.column
if plugin_stats['align'] == 'bottom':
# Bottom (last line)
display_y = screen_y - self.get_stats_display_height(plugin_stats)
else:
display_y = self.line
# Display
x = display_x
x_max = x
y = display_y
for m in plugin_stats['msgdict']:
# New line
if m['msg'].startswith('\n'):
# Go to the next line
y += 1
# Return to the first column
x = display_x
continue
# Do not display outside the screen
if x < 0:
continue
if not m['splittable'] and (x + len(m['msg']) > screen_x):
continue
if y < 0 or (y + 1 > screen_y) or (y > max_y):
break
# If display_optional = False do not display optional stats
if not display_optional and m['optional']:
continue
# If display_additional = False do not display additional stats
if not display_additional and m['additional']:
continue
# Is it possible to display the stat with the current screen size
# !!! Crach if not try/except... Why ???
try:
self.term_window.addnstr(y, x,
m['msg'],
# Do not disply outside the screen
screen_x - x,
self.colors_list[m['decoration']])
except Exception:
pass
else:
# New column
try:
# Python 2: we need to decode to get real screen size because utf-8 special tree chars
# occupy several bytes
offset = len(m['msg'].decode("utf-8", "replace"))
except AttributeError:
# Python 3: strings are strings and bytes are bytes, all is
# good
offset = len(m['msg'])
x += offset
if x > x_max:
x_max = x
# Compute the next Glances column/line position
self.next_column = max(
self.next_column, x_max + self.space_between_column)
self.next_line = max(self.next_line, y + self.space_between_line)
def erase(self):
"""Erase the content of the screen."""
self.term_window.erase()
def flush(self, stats, cs_status=None):
"""Clear and update the screen.
stats: Stats database to display
cs_status:
"None": standalone or server mode
"Connected": Client is connected to the server
"Disconnected": Client is disconnected from the server
"""
self.erase()
self.display(stats, cs_status=cs_status)
def update(self, stats, cs_status=None, return_to_browser=False):
"""Update the screen.
Wait for __refresh_time sec / catch key every 100 ms.
INPUT
stats: Stats database to display
cs_status:
"None": standalone or server mode
"Connected": Client is connected to the server
"Disconnected": Client is disconnected from the server
return_to_browser:
True: Do not exist, return to the browser list
False: Exit and return to the shell
OUPUT
True: Exit key has been pressed
False: Others cases...
"""
# Flush display
self.flush(stats, cs_status=cs_status)
# Wait
exitkey = False
countdown = Timer(self.__refresh_time)
while not countdown.finished() and not exitkey:
# Getkey
pressedkey = self.__catch_key(return_to_browser=return_to_browser)
# Is it an exit key ?
exitkey = (pressedkey == ord('\x1b') or pressedkey == ord('q'))
if not exitkey and pressedkey > -1:
# Redraw display
self.flush(stats, cs_status=cs_status)
# Wait 100ms...
curses.napms(100)
return exitkey
def get_stats_display_width(self, curse_msg, without_option=False):
"""Return the width of the formatted curses message.
The height is defined by the maximum line.
"""
try:
if without_option:
# Size without options
c = len(max(''.join([(re.sub(r'[^\x00-\x7F]+', ' ', i['msg']) if not i['optional'] else "")
for i in curse_msg['msgdict']]).split('\n'), key=len))
else:
# Size with all options
c = len(max(''.join([re.sub(r'[^\x00-\x7F]+', ' ', i['msg'])
for i in curse_msg['msgdict']]).split('\n'), key=len))
except Exception:
return 0
else:
return c
def get_stats_display_height(self, curse_msg):
r"""Return the height of the formatted curses message.
The height is defined by the number of '\n' (new line).
"""
try:
c = [i['msg'] for i in curse_msg['msgdict']].count('\n')
except Exception:
return 0
else:
return c + 1
class GlancesCursesStandalone(_GlancesCurses):
"""Class for the Glances curse standalone."""
pass
class GlancesCursesClient(_GlancesCurses):
"""Class for the Glances curse client."""
pass
class GlancesCursesBrowser(_GlancesCurses):
"""Class for the Glances curse client browser."""
def __init__(self, args=None):
# Init the father class
_GlancesCurses.__init__(self, args=args)
_colors_list = {
'UNKNOWN': self.no_color,
'SNMP': self.default_color2,
'ONLINE': self.default_color2,
'OFFLINE': self.ifCRITICAL_color2,
'PROTECTED': self.ifWARNING_color2,
}
self.colors_list.update(_colors_list)
# First time scan tag
# Used to display a specific message when the browser is started
self.first_scan = True
# Init refresh time
self.__refresh_time = args.time
# Init the cursor position for the client browser
self.cursor_position = 0
# Active Glances server number
self._active_server = None
@property
def active_server(self):
"""Return the active server or None if it's the browser list."""
return self._active_server
@active_server.setter
def active_server(self, index):
"""Set the active server or None if no server selected."""
self._active_server = index
@property
def cursor(self):
"""Get the cursor position."""
return self.cursor_position
@cursor.setter
def cursor(self, position):
"""Set the cursor position."""
self.cursor_position = position
def cursor_up(self, servers_list):
"""Set the cursor to position N-1 in the list."""
if self.cursor_position > 0:
self.cursor_position -= 1
else:
self.cursor_position = len(servers_list) - 1
def cursor_down(self, servers_list):
"""Set the cursor to position N-1 in the list."""
if self.cursor_position < len(servers_list) - 1:
self.cursor_position += 1
else:
self.cursor_position = 0
def __catch_key(self, servers_list):
# Catch the browser pressed key
self.pressedkey = self.get_key(self.term_window)
# Actions...
if self.pressedkey == ord('\x1b') or self.pressedkey == ord('q'):
# 'ESC'|'q' > Quit
self.end()
logger.info("Stop Glances client browser")
sys.exit(0)
elif self.pressedkey == 10:
# 'ENTER' > Run Glances on the selected server
logger.debug("Server number {0} selected".format(self.cursor + 1))
self.active_server = self.cursor
elif self.pressedkey == 259:
# 'UP' > Up in the server list
self.cursor_up(servers_list)
elif self.pressedkey == 258:
# 'DOWN' > Down in the server list
self.cursor_down(servers_list)
# Return the key code
return self.pressedkey
def update(self, servers_list):
"""Update the servers' list screen.
Wait for __refresh_time sec / catch key every 100 ms.
servers_list: Dict of dict with servers stats
"""
# Flush display
logger.debug("Servers list: {}".format(servers_list))
self.flush(servers_list)
# Wait
exitkey = False
countdown = Timer(self.__refresh_time)
while not countdown.finished() and not exitkey:
# Getkey
pressedkey = self.__catch_key(servers_list)
# Is it an exit or select server key ?
exitkey = (
pressedkey == ord('\x1b') or pressedkey == ord('q') or pressedkey == 10)
if not exitkey and pressedkey > -1:
# Redraw display
self.flush(servers_list)
# Wait 100ms...
curses.napms(100)
return self.active_server
def flush(self, servers_list):
"""Update the servers' list screen.
servers_list: List of dict with servers stats
"""
self.erase()
self.display(servers_list)
def display(self, servers_list):
"""Display the servers list.
Return:
True if the stats have been displayed
False if the stats have not been displayed (no server available)
"""
# Init the internal line/column for Glances Curses
self.init_line_column()
# Get the current screen size
screen_x = self.screen.getmaxyx()[1]
screen_y = self.screen.getmaxyx()[0]
# Init position
x = 0
y = 0
# Display top header
if len(servers_list) == 0:
if self.first_scan and not self.args.disable_autodiscover:
msg = 'Glances is scanning your network. Please wait...'
self.first_scan = False
else:
msg = 'No Glances server available'
elif len(servers_list) == 1:
msg = 'One Glances server available'
else:
msg = '{0} Glances servers available'.format(len(servers_list))
if self.args.disable_autodiscover:
msg += ' ' + '(auto discover is disabled)'
self.term_window.addnstr(y, x,
msg,
screen_x - x,
self.colors_list['TITLE'])
if len(servers_list) == 0:
return False
# Display the Glances server list
# ================================
# Table of table
# Item description: [stats_id, column name, column size]
column_def = [
['name', 'Name', 16],
['alias', None, None],
['load_min5', 'LOAD', 6],
['cpu_percent', 'CPU%', 5],
['mem_percent', 'MEM%', 5],
['status', 'STATUS', 8],
['ip', 'IP', 15],
# ['port', 'PORT', 5],
['hr_name', 'OS', 16],
]
y = 2
# Display table header
xc = x + 2
for cpt, c in enumerate(column_def):
if xc < screen_x and y < screen_y and c[1] is not None:
self.term_window.addnstr(y, xc,
c[1],
screen_x - x,
self.colors_list['BOLD'])
xc += c[2] + self.space_between_column
y += 1
# If a servers has been deleted from the list...
# ... and if the cursor is in the latest position
if self.cursor > len(servers_list) - 1:
# Set the cursor position to the latest item
self.cursor = len(servers_list) - 1
# Display table
line = 0
for v in servers_list:
# Get server stats
server_stat = {}
for c in column_def:
try:
server_stat[c[0]] = v[c[0]]
except KeyError as e:
logger.debug(
"Cannot grab stats {0} from server (KeyError: {1})".format(c[0], e))
server_stat[c[0]] = '?'
# Display alias instead of name
try:
if c[0] == 'alias' and v[c[0]] is not None:
server_stat['name'] = v[c[0]]
except KeyError:
pass
# Display line for server stats
cpt = 0
xc = x
# Is the line selected ?
if line == self.cursor:
# Display cursor
self.term_window.addnstr(
y, xc, ">", screen_x - xc, self.colors_list['BOLD'])
# Display the line
xc += 2
for c in column_def:
if xc < screen_x and y < screen_y and c[1] is not None:
# Display server stats
self.term_window.addnstr(
y, xc, format(server_stat[c[0]]), c[2], self.colors_list[v['status']])
xc += c[2] + self.space_between_column
cpt += 1
# Next line, next server...
y += 1
line += 1
return True
if not is_windows:
class GlancesTextbox(Textbox):
def __init__(*args, **kwargs):
Textbox.__init__(*args, **kwargs)
def do_command(self, ch):
if ch == 10: # Enter
return 0
if ch == 127: # Enter
return 8
return Textbox.do_command(self, ch)
| lgpl-3.0 |
CodeForPhilly/chime | tests/penn_chime/model/test_sir.py | 1 | 6343 | from datetime import date
import pytest
import pandas as pd
import numpy as np
from datetime import timedelta
from penn_chime.constants import EPSILON
from penn_chime.model.sir import (
sir,
sim_sir,
get_growth_rate,
Sir,
)
def test_sir():
"""
Someone who is good at testing, help
"""
sir_test = sir(100, 1, 0, 0.2, 0.5, 1)
assert sir_test == (
0.7920792079207921,
0.20297029702970298,
0.0049504950495049506,
), "This contrived example should work"
assert isinstance(sir_test, tuple)
for v in sir_test:
assert isinstance(v, float)
assert v >= 0
# Certain things should *not* work
with pytest.raises(TypeError) as error:
sir("S", 1, 0, 0.2, 0.5, 1)
assert str(error.value) == "can't multiply sequence by non-int of type 'float'"
with pytest.raises(TypeError) as error:
sir(100, "I", 0, 0.2, 0.5, 1)
assert str(error.value) == "can't multiply sequence by non-int of type 'float'"
with pytest.raises(TypeError) as error:
sir(100, 1, "R", 0.2, 0.5, 1)
assert str(error.value) == "unsupported operand type(s) for +: 'float' and 'str'"
with pytest.raises(TypeError) as error:
sir(100, 1, 0, "beta", 0.5, 1)
assert str(error.value) == "bad operand type for unary -: 'str'"
with pytest.raises(TypeError) as error:
sir(100, 1, 0, 0.2, "gamma", 1)
assert str(error.value) == "unsupported operand type(s) for -: 'float' and 'str'"
with pytest.raises(TypeError) as error:
sir(100, 1, 0, 0.2, 0.5, "N")
assert str(error.value) == "unsupported operand type(s) for /: 'str' and 'float'"
# Zeros across the board should fail
with pytest.raises(ZeroDivisionError):
sir(0, 0, 0, 0, 0, 0)
def test_sim_sir():
"""
Rounding to move fast past decimal place issues
"""
raw = sim_sir(
5, 6, 7, 0.1, 0, [(0.1, 40)], # s # i # r # gamma # i_day # beta1 # n_days1
)
assert round(raw["susceptible"][0], 0) == 5
assert round(raw["infected"][0], 2) == 6
assert round(raw["recovered"][0], 0) == 7
assert round(raw["susceptible"][-1], 2) == 0
assert round(raw["infected"][-1], 2) == 0.18
assert round(raw["recovered"][-1], 2) == 17.82
def test_growth_rate():
assert np.round(get_growth_rate(5) * 100.0, decimals=4) == 14.8698
assert np.round(get_growth_rate(0) * 100.0, decimals=4) == 0.0
assert np.round(get_growth_rate(-4) * 100.0, decimals=4) == -15.9104
def test_model(model, param):
# test the Model
assert round(model.infected, 0) == 45810.0
assert isinstance(model.infected, float) # based off note in models.py
# test the class-calculated attributes
# we're talking about getting rid of detection probability
# assert model.detection_probability == 0.125
assert model.intrinsic_growth_rate == 0.12246204830937302
assert abs(model.beta - 4.21501347256401e-07) < EPSILON
assert model.r_t == 2.307298374881539
assert model.r_naught == 2.7144686763312222
assert model.doubling_time_t == 7.764405988534983
assert model.i_day == 43
def test_model_first_hosp_fit(param):
param.date_first_hospitalized = param.current_date - timedelta(days=43)
param.doubling_time = None
my_model = Sir(param)
assert abs(my_model.intrinsic_growth_rate - 0.123) / 0.123 < 0.01
assert abs(my_model.beta - 4.21501347256401e-07) < EPSILON
assert abs(my_model.r_t - 2.32) / 2.32 < 0.01
assert abs(my_model.r_naught - 2.72) / 2.72 < 0.01
assert abs(my_model.doubling_time_t - 7.71)/7.71 < 0.01
def test_model_raw_start(model, param):
raw_df = model.raw_df
# test the things n_days creates, which in turn tests sim_sir, sir, and get_dispositions
# print('n_days: %s; i_day: %s' % (param.n_days, model.i_day))
assert len(raw_df) == (len(np.arange(-model.i_day, param.n_days + 1))) == 104
first = raw_df.iloc[0, :]
second = raw_df.iloc[1, :]
assert first.susceptible == 499600.0
assert round(second.infected, 0) == 449.0
assert list(model.dispositions_df.loc[0, [
"day",
"date",
"ever_hospitalized",
"ever_icu",
"ever_ventilated",
]]) == [
-43,
date(year=2020, month=2, day=14),
1.0,
0.4,
0.2,
]
assert round(raw_df.recovered[30], 0) == 7083.0
d, dt, hosp, icu, vent = list(model.dispositions_df.loc[60, [
"day",
"date",
"ever_hospitalized",
"ever_icu",
"ever_ventilated",
]])
assert dt == date(year=2020, month=4, day=14)
assert [round(v, 0) for v in (d, hosp, icu, vent)] == [17, 549.0, 220.0, 110.0]
def test_model_conservation(param, model):
raw_df = model.raw_df
assert (0.0 <= raw_df.susceptible).all()
assert (0.0 <= raw_df.infected).all()
assert (0.0 <= raw_df.recovered).all()
diff = raw_df.susceptible + raw_df.infected + raw_df.recovered - param.population
assert (diff < 0.1).all()
assert (raw_df.susceptible <= param.population).all()
assert (raw_df.infected <= param.population).all()
assert (raw_df.recovered <= param.population).all()
def test_model_raw_end(param, model):
raw_df = model.raw_df
last = raw_df.iloc[-1, :]
assert round(last.susceptible, 0) == 83391.0
def test_model_monotonicity(param, model):
raw_df = model.raw_df
# Susceptible population should be non-increasing, and Recovered non-decreasing
assert (raw_df.susceptible[1:] - raw_df.susceptible.shift(1)[1:] <= 0).all()
assert (raw_df.recovered[1:] - raw_df.recovered.shift(1)[1:] >= 0).all()
def test_model_cumulative_census(param, model):
# test that census is being properly calculated
raw_df = model.raw_df
admits_df = model.admits_df
df = pd.DataFrame(
{
"hospitalized": admits_df.admits_hospitalized,
"icu": admits_df.admits_icu,
"ventilated": admits_df.admits_ventilated,
}
)
admits = df.cumsum()
# 1.0 is for the one hospital patient on the first day, who won't appear in the admissions
diff = admits.hospitalized[1:-1] - (
param.market_share * param.hospitalized.rate * (raw_df.infected[1:-1] + raw_df.recovered[1:-1]) - 1.0
)
assert (diff.abs() < 0.1).all()
| mit |
jstoxrocky/statsmodels | statsmodels/tsa/arima_process.py | 26 | 30878 | '''ARMA process and estimation with scipy.signal.lfilter
2009-09-06: copied from try_signal.py
reparameterized same as signal.lfilter (positive coefficients)
Notes
-----
* pretty fast
* checked with Monte Carlo and cross comparison with statsmodels yule_walker
for AR numbers are close but not identical to yule_walker
not compared to other statistics packages, no degrees of freedom correction
* ARMA(2,2) estimation (in Monte Carlo) requires longer time series to estimate parameters
without large variance. There might be different ARMA parameters
with similar impulse response function that cannot be well
distinguished with small samples (e.g. 100 observations)
* good for one time calculations for entire time series, not for recursive
prediction
* class structure not very clean yet
* many one-liners with scipy.signal, but takes time to figure out usage
* missing result statistics, e.g. t-values, but standard errors in examples
* no criteria for choice of number of lags
* no constant term in ARMA process
* no integration, differencing for ARIMA
* written without textbook, works but not sure about everything
briefly checked and it looks to be standard least squares, see below
* theoretical autocorrelation function of general ARMA
Done, relatively easy to guess solution, time consuming to get
theoretical test cases,
example file contains explicit formulas for acovf of MA(1), MA(2) and ARMA(1,1)
* two names for lag polynomials ar = rhoy, ma = rhoe ?
Properties:
Judge, ... (1985): The Theory and Practise of Econometrics
BigJudge p. 237ff:
If the time series process is a stationary ARMA(p,q), then
minimizing the sum of squares is asymptoticaly (as T-> inf)
equivalent to the exact Maximum Likelihood Estimator
Because Least Squares conditional on the initial information
does not use all information, in small samples exact MLE can
be better.
Without the normality assumption, the least squares estimator
is still consistent under suitable conditions, however not
efficient
Author: josefpktd
License: BSD
'''
from __future__ import print_function
from statsmodels.compat.python import range
import numpy as np
from scipy import signal, optimize, linalg
def arma_generate_sample(ar, ma, nsample, sigma=1, distrvs=np.random.randn,
burnin=0):
"""
Generate a random sample of an ARMA process
Parameters
----------
ar : array_like, 1d
coefficient for autoregressive lag polynomial, including zero lag
ma : array_like, 1d
coefficient for moving-average lag polynomial, including zero lag
nsample : int
length of simulated time series
sigma : float
standard deviation of noise
distrvs : function, random number generator
function that generates the random numbers, and takes sample size
as argument
default: np.random.randn
TODO: change to size argument
burnin : integer (default: 0)
to reduce the effect of initial conditions, burnin observations at the
beginning of the sample are dropped
Returns
-------
sample : array
sample of ARMA process given by ar, ma of length nsample
Notes
-----
As mentioned above, both the AR and MA components should include the
coefficient on the zero-lag. This is typically 1. Further, due to the
conventions used in signal processing used in signal.lfilter vs.
conventions in statistics for ARMA processes, the AR paramters should
have the opposite sign of what you might expect. See the examples below.
Examples
--------
>>> import numpy as np
>>> np.random.seed(12345)
>>> arparams = np.array([.75, -.25])
>>> maparams = np.array([.65, .35])
>>> ar = np.r_[1, -arparams] # add zero-lag and negate
>>> ma = np.r_[1, maparams] # add zero-lag
>>> y = sm.tsa.arma_generate_sample(ar, ma, 250)
>>> model = sm.tsa.ARMA(y, (2, 2)).fit(trend='nc', disp=0)
>>> model.params
array([ 0.79044189, -0.23140636, 0.70072904, 0.40608028])
"""
#TODO: unify with ArmaProcess method
eta = sigma * distrvs(nsample+burnin)
return signal.lfilter(ma, ar, eta)[burnin:]
def arma_acovf(ar, ma, nobs=10):
'''theoretical autocovariance function of ARMA process
Parameters
----------
ar : array_like, 1d
coefficient for autoregressive lag polynomial, including zero lag
ma : array_like, 1d
coefficient for moving-average lag polynomial, including zero lag
nobs : int
number of terms (lags plus zero lag) to include in returned acovf
Returns
-------
acovf : array
autocovariance of ARMA process given by ar, ma
See Also
--------
arma_acf
acovf
Notes
-----
Tries to do some crude numerical speed improvements for cases
with high persistance. However, this algorithm is slow if the process is
highly persistent and only a few autocovariances are desired.
'''
#increase length of impulse response for AR closer to 1
#maybe cheap/fast enough to always keep nobs for ir large
if np.abs(np.sum(ar)-1) > 0.9:
nobs_ir = max(1000, 2 * nobs) # no idea right now how large is needed
else:
nobs_ir = max(100, 2 * nobs) # no idea right now
ir = arma_impulse_response(ar, ma, nobs=nobs_ir)
#better save than sorry (?), I have no idea about the required precision
#only checked for AR(1)
while ir[-1] > 5*1e-5:
nobs_ir *= 10
ir = arma_impulse_response(ar, ma, nobs=nobs_ir)
#again no idea where the speed break points are:
if nobs_ir > 50000 and nobs < 1001:
acovf = np.array([np.dot(ir[:nobs-t], ir[t:nobs])
for t in range(nobs)])
else:
acovf = np.correlate(ir, ir, 'full')[len(ir)-1:]
return acovf[:nobs]
def arma_acf(ar, ma, nobs=10):
'''theoretical autocorrelation function of an ARMA process
Parameters
----------
ar : array_like, 1d
coefficient for autoregressive lag polynomial, including zero lag
ma : array_like, 1d
coefficient for moving-average lag polynomial, including zero lag
nobs : int
number of terms (lags plus zero lag) to include in returned acf
Returns
-------
acf : array
autocorrelation of ARMA process given by ar, ma
See Also
--------
arma_acovf
acf
acovf
'''
acovf = arma_acovf(ar, ma, nobs)
return acovf/acovf[0]
def arma_pacf(ar, ma, nobs=10):
'''partial autocorrelation function of an ARMA process
Parameters
----------
ar : array_like, 1d
coefficient for autoregressive lag polynomial, including zero lag
ma : array_like, 1d
coefficient for moving-average lag polynomial, including zero lag
nobs : int
number of terms (lags plus zero lag) to include in returned pacf
Returns
-------
pacf : array
partial autocorrelation of ARMA process given by ar, ma
Notes
-----
solves yule-walker equation for each lag order up to nobs lags
not tested/checked yet
'''
apacf = np.zeros(nobs)
acov = arma_acf(ar, ma, nobs=nobs+1)
apacf[0] = 1.
for k in range(2, nobs+1):
r = acov[:k]
apacf[k-1] = linalg.solve(linalg.toeplitz(r[:-1]), r[1:])[-1]
return apacf
def arma_periodogram(ar, ma, worN=None, whole=0):
'''periodogram for ARMA process given by lag-polynomials ar and ma
Parameters
----------
ar : array_like
autoregressive lag-polynomial with leading 1 and lhs sign
ma : array_like
moving average lag-polynomial with leading 1
worN : {None, int}, optional
option for scipy.signal.freqz (read "w or N")
If None, then compute at 512 frequencies around the unit circle.
If a single integer, the compute at that many frequencies.
Otherwise, compute the response at frequencies given in worN
whole : {0,1}, optional
options for scipy.signal.freqz
Normally, frequencies are computed from 0 to pi (upper-half of
unit-circle. If whole is non-zero compute frequencies from 0 to 2*pi.
Returns
-------
w : array
frequencies
sd : array
periodogram, spectral density
Notes
-----
Normalization ?
This uses signal.freqz, which does not use fft. There is a fft version
somewhere.
'''
w, h = signal.freqz(ma, ar, worN=worN, whole=whole)
sd = np.abs(h)**2/np.sqrt(2*np.pi)
if np.sum(np.isnan(h)) > 0:
# this happens with unit root or seasonal unit root'
print('Warning: nan in frequency response h, maybe a unit root')
return w, sd
def arma_impulse_response(ar, ma, nobs=100):
'''get the impulse response function (MA representation) for ARMA process
Parameters
----------
ma : array_like, 1d
moving average lag polynomial
ar : array_like, 1d
auto regressive lag polynomial
nobs : int
number of observations to calculate
Returns
-------
ir : array, 1d
impulse response function with nobs elements
Notes
-----
This is the same as finding the MA representation of an ARMA(p,q).
By reversing the role of ar and ma in the function arguments, the
returned result is the AR representation of an ARMA(p,q), i.e
ma_representation = arma_impulse_response(ar, ma, nobs=100)
ar_representation = arma_impulse_response(ma, ar, nobs=100)
fully tested against matlab
Examples
--------
AR(1)
>>> arma_impulse_response([1.0, -0.8], [1.], nobs=10)
array([ 1. , 0.8 , 0.64 , 0.512 , 0.4096 ,
0.32768 , 0.262144 , 0.2097152 , 0.16777216, 0.13421773])
this is the same as
>>> 0.8**np.arange(10)
array([ 1. , 0.8 , 0.64 , 0.512 , 0.4096 ,
0.32768 , 0.262144 , 0.2097152 , 0.16777216, 0.13421773])
MA(2)
>>> arma_impulse_response([1.0], [1., 0.5, 0.2], nobs=10)
array([ 1. , 0.5, 0.2, 0. , 0. , 0. , 0. , 0. , 0. , 0. ])
ARMA(1,2)
>>> arma_impulse_response([1.0, -0.8], [1., 0.5, 0.2], nobs=10)
array([ 1. , 1.3 , 1.24 , 0.992 , 0.7936 ,
0.63488 , 0.507904 , 0.4063232 , 0.32505856, 0.26004685])
'''
impulse = np.zeros(nobs)
impulse[0] = 1.
return signal.lfilter(ma, ar, impulse)
#alias, easier to remember
arma2ma = arma_impulse_response
#alias, easier to remember
def arma2ar(ar, ma, nobs=100):
'''get the AR representation of an ARMA process
Parameters
----------
ar : array_like, 1d
auto regressive lag polynomial
ma : array_like, 1d
moving average lag polynomial
nobs : int
number of observations to calculate
Returns
-------
ar : array, 1d
coefficients of AR lag polynomial with nobs elements
`
Notes
-----
This is just an alias for
``ar_representation = arma_impulse_response(ma, ar, nobs=100)``
fully tested against matlab
Examples
--------
'''
return arma_impulse_response(ma, ar, nobs=nobs)
#moved from sandbox.tsa.try_fi
def ar2arma(ar_des, p, q, n=20, mse='ar', start=None):
'''find arma approximation to ar process
This finds the ARMA(p,q) coefficients that minimize the integrated
squared difference between the impulse_response functions
(MA representation) of the AR and the ARMA process. This does
currently not check whether the MA lagpolynomial of the ARMA
process is invertible, neither does it check the roots of the AR
lagpolynomial.
Parameters
----------
ar_des : array_like
coefficients of original AR lag polynomial, including lag zero
p, q : int
length of desired ARMA lag polynomials
n : int
number of terms of the impuls_response function to include in the
objective function for the approximation
mse : string, 'ar'
not used yet,
Returns
-------
ar_app, ma_app : arrays
coefficients of the AR and MA lag polynomials of the approximation
res : tuple
result of optimize.leastsq
Notes
-----
Extension is possible if we want to match autocovariance instead
of impulse response function.
TODO: convert MA lag polynomial, ma_app, to be invertible, by mirroring
roots outside the unit intervall to ones that are inside. How do we do
this?
'''
#p,q = pq
def msear_err(arma, ar_des):
ar, ma = np.r_[1, arma[:p-1]], np.r_[1, arma[p-1:]]
ar_approx = arma_impulse_response(ma, ar, n)
## print(ar,ma)
## print(ar_des.shape, ar_approx.shape)
## print(ar_des)
## print(ar_approx)
return (ar_des - ar_approx) # ((ar - ar_approx)**2).sum()
if start is None:
arma0 = np.r_[-0.9 * np.ones(p-1), np.zeros(q-1)]
else:
arma0 = start
res = optimize.leastsq(msear_err, arma0, ar_des, maxfev=5000)
#print(res)
arma_app = np.atleast_1d(res[0])
ar_app = np.r_[1, arma_app[:p-1]],
ma_app = np.r_[1, arma_app[p-1:]]
return ar_app, ma_app, res
def lpol2index(ar):
'''remove zeros from lagpolynomial, squeezed representation with index
Parameters
----------
ar : array_like
coefficients of lag polynomial
Returns
-------
coeffs : array
non-zero coefficients of lag polynomial
index : array
index (lags) of lagpolynomial with non-zero elements
'''
ar = np.asarray(ar)
index = np.nonzero(ar)[0]
coeffs = ar[index]
return coeffs, index
def index2lpol(coeffs, index):
'''expand coefficients to lag poly
Parameters
----------
coeffs : array
non-zero coefficients of lag polynomial
index : array
index (lags) of lagpolynomial with non-zero elements
ar : array_like
coefficients of lag polynomial
Returns
-------
ar : array_like
coefficients of lag polynomial
'''
n = max(index)
ar = np.zeros(n)
ar[index] = coeffs
return ar
#moved from sandbox.tsa.try_fi
def lpol_fima(d, n=20):
'''MA representation of fractional integration
.. math:: (1-L)^{-d} for |d|<0.5 or |d|<1 (?)
Parameters
----------
d : float
fractional power
n : int
number of terms to calculate, including lag zero
Returns
-------
ma : array
coefficients of lag polynomial
'''
#hide import inside function until we use this heavily
from scipy.special import gammaln
j = np.arange(n)
return np.exp(gammaln(d+j) - gammaln(j+1) - gammaln(d))
#moved from sandbox.tsa.try_fi
def lpol_fiar(d, n=20):
'''AR representation of fractional integration
.. math:: (1-L)^{d} for |d|<0.5 or |d|<1 (?)
Parameters
----------
d : float
fractional power
n : int
number of terms to calculate, including lag zero
Returns
-------
ar : array
coefficients of lag polynomial
Notes:
first coefficient is 1, negative signs except for first term,
ar(L)*x_t
'''
#hide import inside function until we use this heavily
from scipy.special import gammaln
j = np.arange(n)
ar = - np.exp(gammaln(-d+j) - gammaln(j+1) - gammaln(-d))
ar[0] = 1
return ar
#moved from sandbox.tsa.try_fi
def lpol_sdiff(s):
'''return coefficients for seasonal difference (1-L^s)
just a trivial convenience function
Parameters
----------
s : int
number of periods in season
Returns
-------
sdiff : list, length s+1
'''
return [1] + [0]*(s-1) + [-1]
def deconvolve(num, den, n=None):
"""Deconvolves divisor out of signal, division of polynomials for n terms
calculates den^{-1} * num
Parameters
----------
num : array_like
signal or lag polynomial
denom : array_like
coefficients of lag polynomial (linear filter)
n : None or int
number of terms of quotient
Returns
-------
quot : array
quotient or filtered series
rem : array
remainder
Notes
-----
If num is a time series, then this applies the linear filter den^{-1}.
If both num and den are both lagpolynomials, then this calculates the
quotient polynomial for n terms and also returns the remainder.
This is copied from scipy.signal.signaltools and added n as optional
parameter.
"""
num = np.atleast_1d(num)
den = np.atleast_1d(den)
N = len(num)
D = len(den)
if D > N and n is None:
quot = []
rem = num
else:
if n is None:
n = N-D+1
input = np.zeros(n, float)
input[0] = 1
quot = signal.lfilter(num, den, input)
num_approx = signal.convolve(den, quot, mode='full')
if len(num) < len(num_approx): # 1d only ?
num = np.concatenate((num, np.zeros(len(num_approx)-len(num))))
rem = num - num_approx
return quot, rem
class ArmaProcess(object):
"""
Represent an ARMA process for given lag-polynomials
This is a class to bring together properties of the process.
It does not do any estimation or statistical analysis.
Parameters
----------
ar : array_like, 1d
Coefficient for autoregressive lag polynomial, including zero lag.
See the notes for some information about the sign.
ma : array_like, 1d
Coefficient for moving-average lag polynomial, including zero lag
nobs : int, optional
Length of simulated time series. Used, for example, if a sample is
generated. See example.
Notes
-----
As mentioned above, both the AR and MA components should include the
coefficient on the zero-lag. This is typically 1. Further, due to the
conventions used in signal processing used in signal.lfilter vs.
conventions in statistics for ARMA processes, the AR paramters should
have the opposite sign of what you might expect. See the examples below.
Examples
--------
>>> import numpy as np
>>> np.random.seed(12345)
>>> arparams = np.array([.75, -.25])
>>> maparams = np.array([.65, .35])
>>> ar = np.r_[1, -ar] # add zero-lag and negate
>>> ma = np.r_[1, ma] # add zero-lag
>>> arma_process = sm.tsa.ArmaProcess(ar, ma)
>>> arma_process.isstationary
True
>>> arma_process.isinvertible
True
>>> y = arma_process.generate_sample(250)
>>> model = sm.tsa.ARMA(y, (2, 2)).fit(trend='nc', disp=0)
>>> model.params
array([ 0.79044189, -0.23140636, 0.70072904, 0.40608028])
"""
# maybe needs special handling for unit roots
def __init__(self, ar, ma, nobs=100):
self.ar = np.asarray(ar)
self.ma = np.asarray(ma)
self.arcoefs = -self.ar[1:]
self.macoefs = self.ma[1:]
self.arpoly = np.polynomial.Polynomial(self.ar)
self.mapoly = np.polynomial.Polynomial(self.ma)
self.nobs = nobs
@classmethod
def from_coeffs(cls, arcoefs, macoefs, nobs=100):
"""
Create ArmaProcess instance from coefficients of the lag-polynomials
Parameters
----------
arcoefs : array-like
Coefficient for autoregressive lag polynomial, not including zero
lag. The sign is inverted to conform to the usual time series
representation of an ARMA process in statistics. See the class
docstring for more information.
macoefs : array-like
Coefficient for moving-average lag polynomial, including zero lag
nobs : int, optional
Length of simulated time series. Used, for example, if a sample
is generated.
"""
return cls(np.r_[1, -arcoefs], np.r_[1, macoefs], nobs=nobs)
@classmethod
def from_estimation(cls, model_results, nobs=None):
"""
Create ArmaProcess instance from ARMA estimation results
Parameters
----------
model_results : ARMAResults instance
A fitted model
nobs : int, optional
If None, nobs is taken from the results
"""
arcoefs = model_results.arparams
macoefs = model_results.maparams
nobs = nobs or model_results.nobs
return cls(np.r_[1, -arcoefs], np.r_[1, macoefs], nobs=nobs)
def __mul__(self, oth):
if isinstance(oth, self.__class__):
ar = (self.arpoly * oth.arpoly).coef
ma = (self.mapoly * oth.mapoly).coef
else:
try:
aroth, maoth = oth
arpolyoth = np.polynomial.Polynomial(aroth)
mapolyoth = np.polynomial.Polynomial(maoth)
ar = (self.arpoly * arpolyoth).coef
ma = (self.mapoly * mapolyoth).coef
except:
print('other is not a valid type')
raise
return self.__class__(ar, ma, nobs=self.nobs)
def __repr__(self):
return 'ArmaProcess(%r, %r, nobs=%d)' % (self.ar.tolist(),
self.ma.tolist(),
self.nobs)
def __str__(self):
return 'ArmaProcess\nAR: %r\nMA: %r' % (self.ar.tolist(),
self.ma.tolist())
def acovf(self, nobs=None):
nobs = nobs or self.nobs
return arma_acovf(self.ar, self.ma, nobs=nobs)
acovf.__doc__ = arma_acovf.__doc__
def acf(self, nobs=None):
nobs = nobs or self.nobs
return arma_acf(self.ar, self.ma, nobs=nobs)
acf.__doc__ = arma_acf.__doc__
def pacf(self, nobs=None):
nobs = nobs or self.nobs
return arma_pacf(self.ar, self.ma, nobs=nobs)
pacf.__doc__ = arma_pacf.__doc__
def periodogram(self, nobs=None):
nobs = nobs or self.nobs
return arma_periodogram(self.ar, self.ma, worN=nobs)
periodogram.__doc__ = arma_periodogram.__doc__
def impulse_response(self, nobs=None):
nobs = nobs or self.nobs
return arma_impulse_response(self.ar, self.ma, worN=nobs)
impulse_response.__doc__ = arma_impulse_response.__doc__
def arma2ma(self, nobs=None):
nobs = nobs or self.nobs
return arma2ma(self.ar, self.ma, nobs=nobs)
arma2ma.__doc__ = arma2ma.__doc__
def arma2ar(self, nobs=None):
nobs = nobs or self.nobs
return arma2ar(self.ar, self.ma, nobs=nobs)
arma2ar.__doc__ = arma2ar.__doc__
@property
def arroots(self):
"""
Roots of autoregressive lag-polynomial
"""
return self.arpoly.roots()
@property
def maroots(self):
"""
Roots of moving average lag-polynomial
"""
return self.mapoly.roots()
@property
def isstationary(self):
'''Arma process is stationary if AR roots are outside unit circle
Returns
-------
isstationary : boolean
True if autoregressive roots are outside unit circle
'''
if np.all(np.abs(self.arroots) > 1):
return True
else:
return False
@property
def isinvertible(self):
'''Arma process is invertible if MA roots are outside unit circle
Returns
-------
isinvertible : boolean
True if moving average roots are outside unit circle
'''
if np.all(np.abs(self.maroots) > 1):
return True
else:
return False
def invertroots(self, retnew=False):
'''make MA polynomial invertible by inverting roots inside unit circle
Parameters
----------
retnew : boolean
If False (default), then return the lag-polynomial as array.
If True, then return a new instance with invertible MA-polynomial
Returns
-------
manew : array
new invertible MA lag-polynomial, returned if retnew is false.
wasinvertible : boolean
True if the MA lag-polynomial was already invertible, returned if
retnew is false.
armaprocess : new instance of class
If retnew is true, then return a new instance with invertible
MA-polynomial
'''
#TODO: variable returns like this?
pr = self.ma_roots()
insideroots = np.abs(pr) < 1
if insideroots.any():
pr[np.abs(pr) < 1] = 1./pr[np.abs(pr) < 1]
pnew = np.polynomial.Polynomial.fromroots(pr)
mainv = pnew.coef/pnew.coef[0]
wasinvertible = False
else:
mainv = self.ma
wasinvertible = True
if retnew:
return self.__class__(self.ar, mainv, nobs=self.nobs)
else:
return mainv, wasinvertible
def generate_sample(self, nsample=100, scale=1., distrvs=None, axis=0,
burnin=0):
'''generate ARMA samples
Parameters
----------
nsample : int or tuple of ints
If nsample is an integer, then this creates a 1d timeseries of
length size. If nsample is a tuple, then the timeseries is along
axis. All other axis have independent arma samples.
scale : float
standard deviation of noise
distrvs : function, random number generator
function that generates the random numbers, and takes sample size
as argument
default: np.random.randn
TODO: change to size argument
burnin : integer (default: 0)
to reduce the effect of initial conditions, burnin observations
at the beginning of the sample are dropped
axis : int
See nsample.
Returns
-------
rvs : ndarray
random sample(s) of arma process
Notes
-----
Should work for n-dimensional with time series along axis, but not
tested yet. Processes are sampled independently.
'''
if distrvs is None:
distrvs = np.random.normal
if np.ndim(nsample) == 0:
nsample = [nsample]
if burnin:
#handle burin time for nd arrays
#maybe there is a better trick in scipy.fft code
newsize = list(nsample)
newsize[axis] += burnin
newsize = tuple(newsize)
fslice = [slice(None)]*len(newsize)
fslice[axis] = slice(burnin, None, None)
fslice = tuple(fslice)
else:
newsize = tuple(nsample)
fslice = tuple([slice(None)]*np.ndim(newsize))
eta = scale * distrvs(size=newsize)
return signal.lfilter(self.ma, self.ar, eta, axis=axis)[fslice]
__all__ = ['arma_acf', 'arma_acovf', 'arma_generate_sample',
'arma_impulse_response', 'arma2ar', 'arma2ma', 'deconvolve',
'lpol2index', 'index2lpol']
if __name__ == '__main__':
# Simulate AR(1)
#--------------
# ar * y = ma * eta
ar = [1, -0.8]
ma = [1.0]
# generate AR data
eta = 0.1 * np.random.randn(1000)
yar1 = signal.lfilter(ar, ma, eta)
print("\nExample 0")
arest = ARIMAProcess(yar1)
rhohat, cov_x, infodict, mesg, ier = arest.fit((1,0,1))
print(rhohat)
print(cov_x)
print("\nExample 1")
ar = [1.0, -0.8]
ma = [1.0, 0.5]
y1 = arest.generate_sample(ar,ma,1000,0.1)
arest = ARIMAProcess(y1)
rhohat1, cov_x1, infodict, mesg, ier = arest.fit((1,0,1))
print(rhohat1)
print(cov_x1)
err1 = arest.errfn(x=y1)
print(np.var(err1))
import statsmodels.api as sm
print(sm.regression.yule_walker(y1, order=2, inv=True))
print("\nExample 2")
nsample = 1000
ar = [1.0, -0.6, -0.1]
ma = [1.0, 0.3, 0.2]
y2 = ARIMA.generate_sample(ar,ma,nsample,0.1)
arest2 = ARIMAProcess(y2)
rhohat2, cov_x2, infodict, mesg, ier = arest2.fit((1,0,2))
print(rhohat2)
print(cov_x2)
err2 = arest.errfn(x=y2)
print(np.var(err2))
print(arest2.rhoy)
print(arest2.rhoe)
print("true")
print(ar)
print(ma)
rhohat2a, cov_x2a, infodict, mesg, ier = arest2.fit((2,0,2))
print(rhohat2a)
print(cov_x2a)
err2a = arest.errfn(x=y2)
print(np.var(err2a))
print(arest2.rhoy)
print(arest2.rhoe)
print("true")
print(ar)
print(ma)
print(sm.regression.yule_walker(y2, order=2, inv=True))
print("\nExample 20")
nsample = 1000
ar = [1.0]#, -0.8, -0.4]
ma = [1.0, 0.5, 0.2]
y3 = ARIMA.generate_sample(ar,ma,nsample,0.01)
arest20 = ARIMAProcess(y3)
rhohat3, cov_x3, infodict, mesg, ier = arest20.fit((2,0,0))
print(rhohat3)
print(cov_x3)
err3 = arest20.errfn(x=y3)
print(np.var(err3))
print(np.sqrt(np.dot(err3,err3)/nsample))
print(arest20.rhoy)
print(arest20.rhoe)
print("true")
print(ar)
print(ma)
rhohat3a, cov_x3a, infodict, mesg, ier = arest20.fit((0,0,2))
print(rhohat3a)
print(cov_x3a)
err3a = arest20.errfn(x=y3)
print(np.var(err3a))
print(np.sqrt(np.dot(err3a,err3a)/nsample))
print(arest20.rhoy)
print(arest20.rhoe)
print("true")
print(ar)
print(ma)
print(sm.regression.yule_walker(y3, order=2, inv=True))
print("\nExample 02")
nsample = 1000
ar = [1.0, -0.8, 0.4] #-0.8, -0.4]
ma = [1.0]#, 0.8, 0.4]
y4 = ARIMA.generate_sample(ar,ma,nsample)
arest02 = ARIMAProcess(y4)
rhohat4, cov_x4, infodict, mesg, ier = arest02.fit((2,0,0))
print(rhohat4)
print(cov_x4)
err4 = arest02.errfn(x=y4)
print(np.var(err4))
sige = np.sqrt(np.dot(err4,err4)/nsample)
print(sige)
print(sige * np.sqrt(np.diag(cov_x4)))
print(np.sqrt(np.diag(cov_x4)))
print(arest02.rhoy)
print(arest02.rhoe)
print("true")
print(ar)
print(ma)
rhohat4a, cov_x4a, infodict, mesg, ier = arest02.fit((0,0,2))
print(rhohat4a)
print(cov_x4a)
err4a = arest02.errfn(x=y4)
print(np.var(err4a))
sige = np.sqrt(np.dot(err4a,err4a)/nsample)
print(sige)
print(sige * np.sqrt(np.diag(cov_x4a)))
print(np.sqrt(np.diag(cov_x4a)))
print(arest02.rhoy)
print(arest02.rhoe)
print("true")
print(ar)
print(ma)
import statsmodels.api as sm
print(sm.regression.yule_walker(y4, order=2, method='mle', inv=True))
import matplotlib.pyplot as plt
plt.plot(arest2.forecast()[-100:])
#plt.show()
ar1, ar2 = ([1, -0.4], [1, 0.5])
ar2 = [1, -1]
lagpolyproduct = np.convolve(ar1, ar2)
print(deconvolve(lagpolyproduct, ar2, n=None))
print(signal.deconvolve(lagpolyproduct, ar2))
print(deconvolve(lagpolyproduct, ar2, n=10))
| bsd-3-clause |
plissonf/scikit-learn | examples/linear_model/plot_sgd_comparison.py | 77 | 1820 | """
==================================
Comparing various online solvers
==================================
An example showing how different online solvers perform
on the hand-written digits dataset.
"""
# Author: Rob Zinkov <rob at zinkov dot com>
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn import datasets
from sklearn.cross_validation import train_test_split
from sklearn.linear_model import SGDClassifier, Perceptron
from sklearn.linear_model import PassiveAggressiveClassifier
from sklearn.linear_model import LogisticRegression
heldout = [0.95, 0.90, 0.75, 0.50, 0.01]
rounds = 20
digits = datasets.load_digits()
X, y = digits.data, digits.target
classifiers = [
("SGD", SGDClassifier()),
("ASGD", SGDClassifier(average=True)),
("Perceptron", Perceptron()),
("Passive-Aggressive I", PassiveAggressiveClassifier(loss='hinge',
C=1.0)),
("Passive-Aggressive II", PassiveAggressiveClassifier(loss='squared_hinge',
C=1.0)),
("SAG", LogisticRegression(solver='sag', tol=1e-1, C=1.e4 / X.shape[0]))
]
xx = 1. - np.array(heldout)
for name, clf in classifiers:
print("training %s" % name)
rng = np.random.RandomState(42)
yy = []
for i in heldout:
yy_ = []
for r in range(rounds):
X_train, X_test, y_train, y_test = \
train_test_split(X, y, test_size=i, random_state=rng)
clf.fit(X_train, y_train)
y_pred = clf.predict(X_test)
yy_.append(1 - np.mean(y_pred == y_test))
yy.append(np.mean(yy_))
plt.plot(xx, yy, label=name)
plt.legend(loc="upper right")
plt.xlabel("Proportion train")
plt.ylabel("Test Error Rate")
plt.show()
| bsd-3-clause |
echohenry2006/tvb-library | contrib/from_articles/region_deterministic_bnm_sj2d.py | 5 | 6136 | # -*- coding: utf-8 -*-
"""
What:
Reproduces Figures XX Sanz-Leon P., PhD Thesis
Needs:
A working installation of tvb
Run:
python region_deterministic_bnm_sjd2d.py -s True -f True
#Subsequent calls can be made with:
python region_deterministic_bnm_sj2d.py -f True
.. author:: Paula Sanz-Leon
"""
import numpy
import argparse
from tvb.simulator.lab import *
import matplotlib.pylab as pylab
pylab.rcParams['figure.figsize'] = 20, 15 # that's default image size for this interactive session
pylab.rcParams.update({'font.size': 22})
pylab.rcParams.update({'lines.linewidth': 3})
pylab.rcParams.update({'axes.linewidth': 3})
parser = argparse.ArgumentParser(description='Reproduce results of Figure XX presented in Sanz-Leon 2014 PhD Thesis')
parser.add_argument('-s','--sim', help='Run the simulations', default=False)
parser.add_argument('-f','--fig', help='Plot the figures', default=False)
args = vars(parser.parse_args())
idx = ['a0', 'a1', 'a2']
gcs = [0.0, 0.5, 1.0]
simulation_length = 2e3
speed = 10.
if args['sim']:
for i in range(3):
oscilator = models.ReducedSetFitzHughNagumo()
oscilator.variables_of_interest = ["xi", "eta", "alpha","beta"]
white_matter = connectivity.Connectivity.from_file("connectivity_66.zip")
white_matter.speed = numpy.array([speed])
white_matter_coupling = coupling.Linear(a=gcs[i])
#Initialise an Integrator
heunint = integrators.HeunDeterministic(dt=0.1)
#Initialise some Monitors with period in physical time
momo = monitors.Raw()
mama = monitors.TemporalAverage(period=1.)
#Bundle them
what_to_watch = (momo, mama)
#Initialise a Simulator -- Model, Connectivity, Integrator, and Monitors.
sim = simulator.Simulator(model = oscilator, connectivity = white_matter,
coupling = white_matter_coupling,
integrator = heunint, monitors = what_to_watch)
sim.configure()
# LOG.info("Starting simulation...")
# #Perform the simulation
raw_data = []
raw_time = []
tavg_data = []
tavg_time = []
for raw, tavg in sim(simulation_length=simulation_length):
if not raw is None:
raw_time.append(raw[0])
raw_data.append(raw[1])
if not tavg is None:
tavg_time.append(tavg[0])
tavg_data.append(tavg[1])
LOG.info("Finished simulation.")
#Make the lists numpy.arrays for easier use.
TAVG = numpy.asarray(tavg_data)
RAW = numpy.asarray(raw_data)
LOG.info("Saving simulated data ...")
numpy.save('region_deterministic_bnm_sj2d_raw_' + idx[i] + '.npy', RAW)
numpy.save('region_deterministic_bnm_sj2d_tavg_' + idx[i] + '.npy', TAVG)
numpy.save('region_deterministic_bnm_sj2d_rawtime_' + idx[i] + '.npy', raw_time)
numpy.save('region_deterministic_bnm_sj2d_tavgtime_' + idx[i] + '.npy', tavg_time)
if args['fig']:
for i in range(3):
start_point = simulation_length // 4
end_point = simulation_length // 4 + start_point // 2
LOG.info("Generating pretty pictures ...")
TAVG = numpy.load('region_deterministic_bnm_sj2d_tavg_' + idx[i] + '.npy')
tavg_time = numpy.load('region_deterministic_bnm_sj2d_tavgtime_' + idx[i] + '.npy')[start_point:end_point]
fig= figure(1)
clf()
for k in range(3):
# load data
# compute time and use sim_length
ax=subplot(3, 3, 4+k)
plot(tavg_time, TAVG[start_point:end_point, 0, :, k],'k', alpha=0.042, linewidth=3)
plot(tavg_time, TAVG[start_point:end_point, 1, :, k],'r', alpha=0.042, linewidth=3)
plot(tavg_time, TAVG[start_point:end_point, 0, :, k].mean(axis=1), 'k')
plot(tavg_time, TAVG[start_point:end_point, 1, :, k].mean(axis=1), 'r')
ylim([-5, 2])
xlim([start_point, int(end_point)])
for label in ax.get_yticklabels():
label.set_fontsize(20)
ax.get_xaxis().set_ticks([])
ax.get_yaxis().set_ticks([])
if k==0:
ylabel('[au]')
yticks((-4, 0, 1), ('-4', '0', '1'))
title(r'TS ($m=1$)')
ax=subplot(3, 3, 7+k)
plot(tavg_time, TAVG[start_point:end_point, 2, :, k],'k', alpha=0.042, linewidth=3)
plot(tavg_time, TAVG[start_point:end_point, 3, :, k],'r', alpha=0.042, linewidth=3)
plot(tavg_time, TAVG[start_point:end_point, 2, :, k].mean(axis=1), 'k')
plot(tavg_time, TAVG[start_point:end_point, 3, :, k].mean(axis=1), 'r')
ylim([-5, 2])
xlim([start_point, int(end_point)])
ax.get_xaxis().set_ticks([])
ax.get_yaxis().set_ticks([])
xticks((start_point, end_point), (str(int(start_point)), str(int(end_point))))
xlabel('time[ms]')
if k==0:
ylabel('[au]')
yticks((-4, 0, 1), ('-4', '0', '1'))
title(r'TS ($m=2$)')
ax=subplot(3, 3, 1+k)
plot(TAVG[start_point:end_point, 0, :, k], TAVG[start_point:end_point, 1, :, k],'b', alpha=0.042)
plot(TAVG[start_point:end_point, 0, :, k].mean(axis=1), TAVG[start_point:end_point, 1, :, k].mean(axis=1), 'b')
title(r'PP ($o=%s$)' % str(k))
ax.yaxis.set_label_position("right")
ylim([-5, 2])
xlim([-5, 2])
ax.get_xaxis().set_ticks([])
ax.get_yaxis().set_ticks([])
if k==1:
xticks((-4, 0, 1), ('-4', '0', '1'))
ax.xaxis.labelpad = -10
xlabel(r'$\xi$')
yticks((-4, 0, 1), ('-4', '0', '1'))
ylabel(r'$\eta$')
fig_name = 'SJ2D_default_speed_' + str(int(speed)) + '-config_gcs-' + idx[i] + '.png'
savefig(fig_name)
###EoF### | gpl-2.0 |
DrLuke/FEMM-bode | femm.py | 1 | 6403 | import subprocess
#import matplotlib as mpl
#import matplotlib.pyplot as plt
import numpy as np
import re
import math
from scipy.interpolate import griddata, LinearNDInterpolator
import os
class FEMMans:
def __init__(self, points, preamble):
self.points = points
self.preamble = preamble
self.x = np.zeros(points)
self.y = np.zeros(points)
self.B = np.zeros(points, dtype=np.complex64)
self.interpRE = None
self.interpIM = None
@staticmethod
def readans(path):
with open(path, "r") as f:
firstline = f.readline()
match = re.search("\[Format\]\s*=\s*([\d\.]+)", firstline)
if match:
if match.group(1) == "4.0":
return FEMMans.readans40(f)
@staticmethod
def readans40(f):
preamble = "" # Everything before the [Solution] tag
points = None # Number of datapoints to expect
ans = None
index = 0
dataregex = re.compile(r"^([\d\.e-]+)\s+([\d\.e-]+)\s+([\d\.e-]+)\s+([\d\.e-]+)\s?([\d\.e-]*)\s?$")
frequency = None
aftersolution = False
for line in f:
if not aftersolution:
preamble += line
if line == ("[Solution]\n"):
aftersolution = True
match = re.match("\[Frequency\]\s*?=\s*?(\d+\.?\d*)$", preamble, re.MULTILINE)
if match:
frequency = float(match.group(1))
elif points is None: # First line after [Solution] gives the number of points in the solution
points = int(line)
ans = FEMMans(points, preamble)
else: # Read data point and add to dataset
match = dataregex.search(line)
if match:
ans.x[index] = float(match.group(1))
ans.y[index] = float(match.group(2))
if frequency == 0:
ans.B[index] = float(match.group(3))
else:
ans.B[index] = float(match.group(3)) + float(match.group(4)) * 1j
index += 1
return ans
def generateimdata(self, gridsize):
# Create grid with gridsize points per unit
# Syntax is: start:stop:steps
grid_x, grid_y = np.mgrid[math.floor(self.x.min()):math.ceil(self.x.max()):(math.ceil(self.x.max())-math.floor(self.x.min()))*gridsize*1j,
math.floor(self.y.min()):math.ceil(self.y.max()):(math.ceil(self.y.max())-math.floor(self.y.min()))*gridsize*1j]
grid = griddata(np.vstack((self.x, self.y)).T, np.absolute(self.B), (grid_x, grid_y), method='cubic')
return grid.T
def getValueAtPoint(self, x, y):
interpolatorRE = LinearNDInterpolator(np.vstack((self.x, self.y)).T, self.B.real)
interpolatorIM = LinearNDInterpolator(np.vstack((self.x, self.y)).T, self.B.imag)
return interpolatorRE((x, y)) + interpolatorIM((x, y)) * 1j
class FEMMfem:
freqregex = re.compile(r"\[Frequency\]\s*=\s*[\d\.e-]+$", re.MULTILINE)
def __init__(self, filecontent="", path=""):
self.femcontent = None
if filecontent:
self.femcontent = filecontent
elif path:
with open(path) as f:
self.femcontent = f.read()
def setfreq(self, freq):
return FEMMfem.freqregex.sub("[Frequency] = %s" % freq, self.femcontent)
class FEMM:
def readans(self, path):
with open(path, "r") as f:
firstline = f.readline()
match = re.search("\[Format\]\s*=\s*([\d\.]+)", firstline)
if match:
if match.group(1) == "4.0":
return self.readans40(f)
def readans40(self, f):
preamble = "" # Everything before the [Solution] tag
points = None # Number of datapoints to expect
ans = None
index = 0
dataregex = re.compile(r"^([\d\.e-]+)\s+([\d\.e-]+)\s+([\d\.e-]+)\s+([\d\.e-]+)\s+([\d\.e-]+)\s+$")
aftersolution = False
for line in f:
if not aftersolution:
preamble += line
if line == ("[Solution]\n"):
aftersolution = True
elif points is None: # First line after [Solution] gives the number of points in the solution
points = int(line)
ans = FEMMans(points, preamble)
else: # Read data point and add to dataset
match = dataregex.search(line)
if match:
ans.x[index] = float(match.group(1))
ans.y[index] = float(match.group(2))
ans.B[index] = float(match.group(3)) + float(match.group(4)) * 1j
index += 1
return ans
"""def saveans(self, ans, name):
grid_x, grid_y = np.mgrid[math.floor(ans.x.min()):math.ceil(ans.x.max()):1000j,
math.floor(ans.y.min()):math.ceil(ans.y.max()):1000j]
grid = griddata(np.vstack((ans.x, ans.y)).T, np.absolute(ans.B), (grid_x, grid_y), method='cubic')
plt.imshow(grid.T, extent=(
math.floor(ans.x.min()), math.ceil(ans.x.max()), math.floor(ans.y.min()), math.ceil(ans.y.max())),
cmap=plt.get_cmap("jet"),
vmin=0, vmax=0.0000002)
plt.colorbar()
plt.contour(grid_x, grid_y, grid)
plt.savefig(name)
plt.clf()"""
def plotlogrange(self, femmfile, start, stop):
file = None
with open(femmfile) as f:
file = f.read()
logscale = np.logspace(start, stop, num=200)
freqregex = re.compile(r"\[Frequency\]\s*=\s*[\d\.e-]+$", re.MULTILINE)
for freq in logscale:
newfile = freqregex.sub("[Frequency] = %s" % freq, file)
tail, head = os.path.split(femmfile)
with open(os.path.join(tail, "TEMP.FEM"), "w") as f:
f.write(newfile)
#wine C:\\femm42\\bin\\femm.exe -lua-script=C:\\femm42\\examples\\test.lua
subprocess.call(["wine", "C:\\femm42\\bin\\femm.exe", "-lua-script=C:\\femm42\\examples\\test.lua", "-windowhide"])
thisans = self.readans(os.path.join(tail, "TEMP.ans"))
self.saveans(thisans, os.path.join("test", str(freq) + ".png"))
| mit |
openfisca/openfisca-qt | openfisca_qt/scripts/alexis/chunk.py | 1 | 1878 | # -*- coding:utf-8 -*-
#
# This file is part of OpenFisca.
# OpenFisca is a socio-fiscal microsimulation software
# Copyright © 2013 Alexis Eidelman, Clément Schaff, Mahdi Ben Jelloul
# Licensed under the terms of the GVPLv3 or later license
# (see openfisca/__init__.py for details)
# Script to compute the aggregates for all the referenced years
import os
import pdb
from openfisca_core.simulations import SurveySimulation
from openfisca_france.data.sources.config import destination_dir
from openfisca_qt.plugins.survey.aggregates import Aggregates
from openfisca_qt.plugins.survey.inequality import Inequality
from pandas import ExcelWriter, ExcelFile, HDFStore
import pandas.rpy.common as com
fname_all = "aggregates_inflated_loyers.xlsx"
fname_all = os.path.join(destination_dir, fname_all)
num_output = None
def test_chunk():
print "debut"
writer = None
years = range(2011,2012)
filename = destination_dir+'output3.h5'
store = HDFStore(filename)
for year in years:
yr = str(year)
# fname = "Agg_%s.%s" %(str(yr), "xls")
simu = SurveySimulation()
simu.set_config(year = yr)
simu.set_param()
import time
tps = {}
for nb_chunk in range(1,5):
deb_chunk = time.clock()
simu.set_config(survey_filename='C:\\Til\\output\\to_run_leg.h5', num_table=3, chunks_count=nb_chunk ,
print_missing=False)
simu.compute()
tps[nb_chunk] = time.clock() - deb_chunk
voir = simu.output_table.table3['foy']
print len(voir)
pdb.set_trace()
agg3 = Aggregates()
agg3.set_simulation(simu)
agg3.compute()
df1 = agg3.aggr_frame
print df1.to_string()
print tps
store.close()
if __name__ == '__main__':
test_chunk()
| agpl-3.0 |
shekkizh/TensorflowProjects | FaceDetection/FaceDetectionDataUtils.py | 1 | 4210 | __author__ = 'Charlie'
import pandas as pd
import numpy as np
import os, sys, inspect
from six.moves import cPickle as pickle
import scipy.misc as misc
IMAGE_SIZE = 96
NUM_LABELS = 30
VALIDATION_PERCENT = 0.1 # use 10 percent of training images for validation
IMAGE_LOCATION_NORM = IMAGE_SIZE / 2
np.random.seed(0)
def read_data(data_dir, force=False):
pickle_file = os.path.join(data_dir, "FaceDetectionData.pickle")
if force or not os.path.exists(pickle_file):
train_filename = os.path.join(data_dir, "training.csv")
data_frame = pd.read_csv(train_filename)
cols = data_frame.columns[:-1]
np.savetxt(os.path.join(data_dir, "column_labels.txt"), cols.values, fmt="%s")
data_frame['Image'] = data_frame['Image'].apply(lambda x: np.fromstring(x, sep=" ") / 255.0)
data_frame = data_frame.dropna()
print "Reading training.csv ..."
# scale data to a 1x1 image with pixel values 0-1
train_images = np.vstack(data_frame['Image']).reshape(-1, IMAGE_SIZE, IMAGE_SIZE, 1)
train_labels = (data_frame[cols].values - IMAGE_LOCATION_NORM) / float(IMAGE_LOCATION_NORM)
permutations = np.random.permutation(train_images.shape[0])
train_images = train_images[permutations]
train_labels = train_labels[permutations]
validation_percent = int(train_images.shape[0] * VALIDATION_PERCENT)
validation_images = train_images[:validation_percent]
validation_labels = train_labels[:validation_percent]
train_images = train_images[validation_percent:]
train_labels = train_labels[validation_percent:]
print "Reading test.csv ..."
test_filename = os.path.join(data_dir, "test.csv")
data_frame = pd.read_csv(test_filename)
data_frame['Image'] = data_frame['Image'].apply(lambda x: np.fromstring(x, sep=" ") / 255.0)
data_frame = data_frame.dropna()
test_images = np.vstack(data_frame['Image']).reshape(-1, IMAGE_SIZE, IMAGE_SIZE, 1)
with open(pickle_file, "wb") as file:
try:
print 'Picking ...'
save = {
"train_images": train_images,
"train_labels": train_labels,
"validation_images": validation_images,
"validation_labels": validation_labels,
"test_images": test_images,
}
pickle.dump(save, file, pickle.HIGHEST_PROTOCOL)
except:
print("Unable to pickle file :/")
with open(pickle_file, "rb") as file:
save = pickle.load(file)
train_images = save["train_images"]
train_labels = save["train_labels"]
validation_images = save["validation_images"]
validation_labels = save["validation_labels"]
test_images = save["test_images"]
return train_images, train_labels, validation_images, validation_labels, test_images
def save_sample_result(X, y, save_dir):
for i in range(X.shape[0]):
fn = os.path.join(save_dir, "checkpoints", "%d.jpg" % i)
for j in range(0, y.shape[1], 2):
pt1 = y[i, j + 1]
pt2 = y[i, j]
X[i, pt1 - 1:pt1 + 1, pt2 - 1:pt2 + 1] = 0
misc.imsave(fn, X[i, :, :, 0])
def kaggle_submission_format(test_images, test_labels, data_dir):
test_labels *= IMAGE_LOCATION_NORM
test_labels += IMAGE_LOCATION_NORM
test_labels = test_labels.clip(0, 96)
save_sample_result(test_images[0:16], test_labels[0:16], data_dir)
save_sample_result(test_images[0:16], test_labels[0:16], data_dir)
lookup_filename = os.path.join(data_dir, "IdLookupTable.csv")
lookup_table = pd.read_csv(lookup_filename)
values = []
cols = np.genfromtxt(os.path.join(data_dir, "column_labels.txt"), dtype=str)
for index, row in lookup_table.iterrows():
values.append((
row['RowId'],
test_labels[row.ImageId - 1][np.where(cols == row.FeatureName)[0][0]],
))
submission = pd.DataFrame(values, columns=('RowId', 'Location'))
submission.to_csv(os.path.join(data_dir, 'submission.csv'), index=False)
print "Submission created!"
| mit |
X-DataInitiative/tick | tick/linear_model/tests/logistic_regression_test.py | 2 | 24388 | # License: BSD 3 clause
import itertools
import unittest
import numpy as np
from sklearn.metrics.ranking import roc_auc_score
from tick.base.inference import InferenceTest
from tick.linear_model import SimuLogReg, LogisticRegression
from tick.simulation import weights_sparse_gauss
from tick.preprocessing.features_binarizer import FeaturesBinarizer
from tick.prox import ProxZero, ProxL1, ProxL2Sq, ProxElasticNet, ProxTV, \
ProxBinarsity
solvers = ['gd', 'agd', 'sgd', 'sdca', 'bfgs', 'svrg']
penalties = ['none', 'l2', 'l1', 'tv', 'elasticnet', 'binarsity']
class Test(InferenceTest):
def setUp(self):
self.float_1 = 5.23e-4
self.float_2 = 3.86e-2
self.int_1 = 3198
self.int_2 = 230
self.X = np.zeros((5, 5))
self.y = np.zeros(5)
self.y[0] = 1
@staticmethod
def get_train_data(n_features=20, n_samples=3000, nnz=5):
np.random.seed(12)
weights0 = weights_sparse_gauss(n_features, nnz=nnz)
interc0 = 0.1
features, y = SimuLogReg(weights0, interc0, n_samples=n_samples,
verbose=False).simulate()
return features, y
def test_LogisticRegression_fit(self):
"""...Test LogisticRegression fit with different solvers and penalties
"""
sto_seed = 179312
raw_features, y = Test.get_train_data()
for fit_intercept in [True, False]:
for penalty in penalties:
if penalty == 'binarsity':
# binarize features
n_cuts = 3
binarizer = FeaturesBinarizer(n_cuts=n_cuts)
features = binarizer.fit_transform(raw_features)
else:
features = raw_features
for solver in solvers:
solver_kwargs = {
'penalty': penalty,
'tol': 1e-5,
'solver': solver,
'verbose': False,
'max_iter': 10,
'fit_intercept': fit_intercept
}
if penalty != 'none':
solver_kwargs['C'] = 100
if penalty == 'binarsity':
solver_kwargs['blocks_start'] = binarizer.blocks_start
solver_kwargs[
'blocks_length'] = binarizer.blocks_length
if solver == 'sdca':
solver_kwargs['sdca_ridge_strength'] = 2e-2
if solver in ['sgd', 'svrg', 'sdca']:
solver_kwargs['random_state'] = sto_seed
if solver == 'sgd':
solver_kwargs['step'] = 1.
if solver == 'bfgs':
# BFGS only accepts ProxZero and ProxL2sq for now
if penalty not in ['none', 'l2']:
continue
learner = LogisticRegression(**solver_kwargs)
learner.fit(features, y)
probas = learner.predict_proba(features)[:, 1]
auc = roc_auc_score(y, probas)
self.assertGreater(
auc, 0.7, "solver %s with penalty %s and "
"intercept %s reached too low AUC" % (solver, penalty,
fit_intercept))
def test_LogisticRegression_warm_start(self):
"""...Test LogisticRegression warm start
"""
sto_seed = 179312
X, y = Test.get_train_data()
fit_intercepts = [True, False]
cases = itertools.product(solvers, fit_intercepts)
for solver, fit_intercept in cases:
solver_kwargs = {
'solver': solver,
'max_iter': 2,
'fit_intercept': fit_intercept,
'warm_start': True,
'tol': 0
}
if solver == 'sdca':
msg = '^SDCA cannot be warm started$'
with self.assertRaisesRegex(ValueError, msg):
LogisticRegression(**solver_kwargs)
else:
if solver in ['sgd', 'svrg']:
solver_kwargs['random_state'] = sto_seed
if solver == 'sgd':
solver_kwargs['step'] = .3
learner = LogisticRegression(**solver_kwargs)
learner.fit(X, y)
if fit_intercept:
coeffs_1 = np.hstack((learner.weights, learner.intercept))
else:
coeffs_1 = learner.weights
learner.fit(X, y)
if fit_intercept:
coeffs_2 = np.hstack((learner.weights, learner.intercept))
else:
coeffs_2 = learner.weights
# Thanks to warm start objective should have decreased
self.assertLess(
learner._solver_obj.objective(coeffs_2),
learner._solver_obj.objective(coeffs_1))
@staticmethod
def specific_solver_kwargs(solver):
"""...A simple method to as systematically some kwargs to our tests
"""
return dict()
def test_LogisticRegression_settings(self):
"""...Test LogisticRegression basic settings
"""
# solver
from tick.solver import AGD, GD, BFGS, SGD, SVRG, SDCA
solver_class_map = {
'gd': GD,
'agd': AGD,
'sgd': SGD,
'svrg': SVRG,
'bfgs': BFGS,
'sdca': SDCA
}
for solver in solvers:
learner = LogisticRegression(solver=solver,
**Test.specific_solver_kwargs(solver))
solver_class = solver_class_map[solver]
self.assertTrue(isinstance(learner._solver_obj, solver_class))
msg = '^``solver`` must be one of agd, bfgs, gd, sdca, sgd, ' \
'svrg, got wrong_name$'
with self.assertRaisesRegex(ValueError, msg):
LogisticRegression(solver='wrong_name')
# prox
prox_class_map = {
'none': ProxZero,
'l1': ProxL1,
'l2': ProxL2Sq,
'elasticnet': ProxElasticNet,
'tv': ProxTV,
'binarsity': ProxBinarsity
}
for penalty in penalties:
if penalty == 'binarsity':
learner = LogisticRegression(penalty=penalty, blocks_start=[0],
blocks_length=[1])
else:
learner = LogisticRegression(penalty=penalty)
prox_class = prox_class_map[penalty]
self.assertTrue(isinstance(learner._prox_obj, prox_class))
msg = '^``penalty`` must be one of binarsity, elasticnet, l1, l2, none, ' \
'tv, got wrong_name$'
with self.assertRaisesRegex(ValueError, msg):
LogisticRegression(penalty='wrong_name')
def test_LogisticRegression_model_settings(self):
"""...Test LogisticRegression setting of parameters of model
"""
for solver in solvers:
learner = LogisticRegression(fit_intercept=True, solver=solver)
self.assertEqual(learner.fit_intercept, True)
self.assertEqual(learner._model_obj.fit_intercept, True)
learner.fit_intercept = False
self.assertEqual(learner.fit_intercept, False)
self.assertEqual(learner._model_obj.fit_intercept, False)
learner = LogisticRegression(fit_intercept=False, solver=solver)
self.assertEqual(learner.fit_intercept, False)
self.assertEqual(learner._model_obj.fit_intercept, False)
learner.fit_intercept = True
self.assertEqual(learner.fit_intercept, True)
self.assertEqual(learner._model_obj.fit_intercept, True)
def test_LogisticRegression_penalty_C(self):
"""...Test LogisticRegression setting of parameter of C
"""
for penalty in penalties:
if penalty != 'none':
if penalty == 'binarsity':
learner = LogisticRegression(
penalty=penalty, C=self.float_1, blocks_start=[0],
blocks_length=[1])
else:
learner = LogisticRegression(penalty=penalty,
C=self.float_1)
self.assertEqual(learner.C, self.float_1)
self.assertEqual(learner._prox_obj.strength, 1. / self.float_1)
learner.C = self.float_2
self.assertEqual(learner.C, self.float_2)
self.assertEqual(learner._prox_obj.strength, 1. / self.float_2)
msg = '^``C`` must be positive, got -1$'
with self.assertRaisesRegex(ValueError, msg):
if penalty == 'binarsity':
LogisticRegression(penalty=penalty, C=-1,
blocks_start=[0], blocks_length=[1])
else:
LogisticRegression(penalty=penalty, C=-1)
else:
msg = '^You cannot set C for penalty "%s"$' % penalty
with self.assertWarnsRegex(RuntimeWarning, msg):
if penalty == 'binarsity':
LogisticRegression(penalty=penalty, C=self.float_1,
blocks_start=[0], blocks_length=[1])
else:
LogisticRegression(penalty=penalty, C=self.float_1)
if penalty == 'binarsity':
learner = LogisticRegression(
penalty=penalty, blocks_start=[0], blocks_length=[1])
else:
learner = LogisticRegression(penalty=penalty)
with self.assertWarnsRegex(RuntimeWarning, msg):
learner.C = self.float_1
msg = '^``C`` must be positive, got -2$'
with self.assertRaisesRegex(ValueError, msg):
learner.C = -2
def test_LogisticRegression_penalty_elastic_net_ratio(self):
"""...Test LogisticRegression setting of parameter of elastic_net_ratio
"""
ratio_1 = 0.6
ratio_2 = 0.3
for penalty in penalties:
if penalty == 'elasticnet':
learner = LogisticRegression(penalty=penalty, C=self.float_1,
elastic_net_ratio=ratio_1)
self.assertEqual(learner.C, self.float_1)
self.assertEqual(learner.elastic_net_ratio, ratio_1)
self.assertEqual(learner._prox_obj.strength, 1. / self.float_1)
self.assertEqual(learner._prox_obj.ratio, ratio_1)
learner.elastic_net_ratio = ratio_2
self.assertEqual(learner.C, self.float_1)
self.assertEqual(learner.elastic_net_ratio, ratio_2)
self.assertEqual(learner._prox_obj.ratio, ratio_2)
else:
msg = '^Penalty "%s" has no elastic_net_ratio attribute$$' % \
penalty
with self.assertWarnsRegex(RuntimeWarning, msg):
if penalty == 'binarsity':
LogisticRegression(penalty=penalty,
elastic_net_ratio=0.8,
blocks_start=[0], blocks_length=[1])
else:
LogisticRegression(penalty=penalty,
elastic_net_ratio=0.8)
if penalty == 'binarsity':
learner = LogisticRegression(
penalty=penalty, blocks_start=[0], blocks_length=[1])
else:
learner = LogisticRegression(penalty=penalty)
with self.assertWarnsRegex(RuntimeWarning, msg):
learner.elastic_net_ratio = ratio_1
def test_LogisticRegression_solver_basic_settings(self):
"""...Test LogisticRegression setting of basic parameters of solver
"""
for solver in solvers:
# tol
learner = LogisticRegression(solver=solver, tol=self.float_1,
**Test.specific_solver_kwargs(solver))
self.assertEqual(learner.tol, self.float_1)
self.assertEqual(learner._solver_obj.tol, self.float_1)
learner.tol = self.float_2
self.assertEqual(learner.tol, self.float_2)
self.assertEqual(learner._solver_obj.tol, self.float_2)
# max_iter
learner = LogisticRegression(solver=solver, max_iter=self.int_1,
**Test.specific_solver_kwargs(solver))
self.assertEqual(learner.max_iter, self.int_1)
self.assertEqual(learner._solver_obj.max_iter, self.int_1)
learner.max_iter = self.int_2
self.assertEqual(learner.max_iter, self.int_2)
self.assertEqual(learner._solver_obj.max_iter, self.int_2)
# verbose
learner = LogisticRegression(solver=solver, verbose=True,
**Test.specific_solver_kwargs(solver))
self.assertEqual(learner.verbose, True)
self.assertEqual(learner._solver_obj.verbose, True)
learner.verbose = False
self.assertEqual(learner.verbose, False)
self.assertEqual(learner._solver_obj.verbose, False)
learner = LogisticRegression(solver=solver, verbose=False,
**Test.specific_solver_kwargs(solver))
self.assertEqual(learner.verbose, False)
self.assertEqual(learner._solver_obj.verbose, False)
learner.verbose = True
self.assertEqual(learner.verbose, True)
self.assertEqual(learner._solver_obj.verbose, True)
# print_every
learner = LogisticRegression(solver=solver, print_every=self.int_1,
**Test.specific_solver_kwargs(solver))
self.assertEqual(learner.print_every, self.int_1)
self.assertEqual(learner._solver_obj.print_every, self.int_1)
learner.print_every = self.int_2
self.assertEqual(learner.print_every, self.int_2)
self.assertEqual(learner._solver_obj.print_every, self.int_2)
# record_every
learner = LogisticRegression(solver=solver,
record_every=self.int_1,
**Test.specific_solver_kwargs(solver))
self.assertEqual(learner.record_every, self.int_1)
self.assertEqual(learner._solver_obj.record_every, self.int_1)
learner.record_every = self.int_2
self.assertEqual(learner.record_every, self.int_2)
self.assertEqual(learner._solver_obj.record_every, self.int_2)
def test_LogisticRegression_solver_step(self):
"""...Test LogisticRegression setting of step parameter of solver
"""
for solver in solvers:
if solver in ['sdca', 'bfgs']:
msg = '^Solver "%s" has no settable step$' % solver
with self.assertWarnsRegex(RuntimeWarning, msg):
learner = LogisticRegression(
solver=solver, step=1,
**Test.specific_solver_kwargs(solver))
self.assertIsNone(learner.step)
else:
learner = LogisticRegression(
solver=solver, step=self.float_1,
**Test.specific_solver_kwargs(solver))
self.assertEqual(learner.step, self.float_1)
self.assertEqual(learner._solver_obj.step, self.float_1)
learner.step = self.float_2
self.assertEqual(learner.step, self.float_2)
self.assertEqual(learner._solver_obj.step, self.float_2)
if solver in ['sgd']:
msg = '^SGD step needs to be tuned manually$'
with self.assertWarnsRegex(RuntimeWarning, msg):
learner = LogisticRegression(solver='sgd')
learner.fit(self.X, self.y)
def test_LogisticRegression_solver_random_state(self):
"""...Test LogisticRegression setting of random_state parameter of solver
"""
for solver in solvers:
if solver in ['bfgs', 'agd', 'gd']:
msg = '^Solver "%s" has no settable random_state$' % solver
with self.assertWarnsRegex(RuntimeWarning, msg):
learner = LogisticRegression(
solver=solver, random_state=1,
**Test.specific_solver_kwargs(solver))
self.assertIsNone(learner.random_state)
else:
learner = LogisticRegression(
solver=solver, random_state=self.int_1,
**Test.specific_solver_kwargs(solver))
self.assertEqual(learner.random_state, self.int_1)
self.assertEqual(learner._solver_obj.seed, self.int_1)
msg = '^random_state must be positive, got -1$'
with self.assertRaisesRegex(ValueError, msg):
LogisticRegression(solver=solver, random_state=-1,
**Test.specific_solver_kwargs(solver))
msg = '^random_state is readonly in LogisticRegression$'
with self.assertRaisesRegex(AttributeError, msg):
learner = LogisticRegression(
solver=solver, **Test.specific_solver_kwargs(solver))
learner.random_state = self.int_2
def test_LogisticRegression_solver_sdca_ridge_strength(self):
"""...Test LogisticRegression setting of sdca_ridge_strength parameter
of solver
"""
for solver in solvers:
if solver == 'sdca':
learner = LogisticRegression(
solver=solver, sdca_ridge_strength=self.float_1,
**Test.specific_solver_kwargs(solver))
self.assertEqual(learner.sdca_ridge_strength, self.float_1)
self.assertEqual(learner._solver_obj._solver.get_l_l2sq(),
self.float_1)
learner.sdca_ridge_strength = self.float_2
self.assertEqual(learner.sdca_ridge_strength, self.float_2)
self.assertEqual(learner._solver_obj._solver.get_l_l2sq(),
self.float_2)
else:
msg = '^Solver "%s" has no sdca_ridge_strength attribute$' % \
solver
with self.assertWarnsRegex(RuntimeWarning, msg):
LogisticRegression(solver=solver, sdca_ridge_strength=1e-2,
**Test.specific_solver_kwargs(solver))
learner = LogisticRegression(
solver=solver, **Test.specific_solver_kwargs(solver))
with self.assertWarnsRegex(RuntimeWarning, msg):
learner.sdca_ridge_strength = self.float_1
def test_safe_array_cast(self):
"""...Test error and warnings raised by LogLearner constructor
"""
msg = '^Copying array of size \(5, 5\) to convert it in the ' \
'right format$'
with self.assertWarnsRegex(RuntimeWarning, msg):
LogisticRegression._safe_array(self.X.astype(int))
msg = '^Copying array of size \(3, 5\) to create a ' \
'C-contiguous version of it$'
with self.assertWarnsRegex(RuntimeWarning, msg):
LogisticRegression._safe_array(self.X[::2])
np.testing.assert_array_equal(self.X,
LogisticRegression._safe_array(self.X))
def test_labels_encoding(self):
"""...Test that class encoding is well done for LogReg
"""
learner = LogisticRegression(max_iter=1)
np.random.seed(38027)
n_features = 3
n_samples = 5
X = np.random.rand(n_samples, n_features)
encoded_y = np.array([1., -1., 1., -1., -1.])
learner.fit(X, encoded_y)
np.testing.assert_array_equal(learner.classes, [-1., 1.])
np.testing.assert_array_equal(
learner._encode_labels_vector(encoded_y), encoded_y)
zero_one_y = np.array([1., 0., 1., 0., 0.])
learner.fit(X, zero_one_y)
np.testing.assert_array_equal(learner.classes, [0., 1.])
np.testing.assert_array_equal(
learner._encode_labels_vector(zero_one_y), encoded_y)
text_y = np.array(['cat', 'dog', 'cat', 'dog', 'dog'])
learner.fit(X, text_y)
np.testing.assert_array_equal(set(learner.classes), {'cat', 'dog'})
encoded_text_y = learner._encode_labels_vector(text_y)
np.testing.assert_array_equal(
encoded_text_y,
encoded_y * np.sign(encoded_text_y[0]) * np.sign(encoded_y[0]))
def test_predict(self):
"""...Test LogReg prediction
"""
labels_mappings = [{
-1: -1.,
1: 1.
}, {
-1: 1.,
1: -1.
}, {
-1: 1,
1: 0
}, {
-1: 0,
1: 1
}, {
-1: 'cat',
1: 'dog'
}]
for labels_mapping in labels_mappings:
X, y = Test.get_train_data(n_features=12, n_samples=300, nnz=0)
y = np.vectorize(labels_mapping.get)(y)
learner = LogisticRegression(random_state=32789, tol=1e-9)
learner.fit(X, y)
X_test, y_test = Test.get_train_data(n_features=12, n_samples=5,
nnz=0)
predicted_y = [1., 1., -1., 1., 1.]
predicted_y = np.vectorize(labels_mapping.get)(predicted_y)
np.testing.assert_array_equal(learner.predict(X_test), predicted_y)
def test_predict_proba(self):
"""...Test LogReg predict_proba
"""
X, y = Test.get_train_data(n_features=12, n_samples=300, nnz=0)
learner = LogisticRegression(random_state=32289, tol=1e-13)
learner.fit(X, y)
X_test, y_test = Test.get_train_data(n_features=12, n_samples=5, nnz=0)
predicted_probas = np.array(
[[0.35851418, 0.64148582], [0.42549328, 0.57450672],
[0.6749705, 0.3250295], [0.39684181,
0.60315819], [0.42732443, 0.57267557]])
np.testing.assert_array_almost_equal(
learner.predict_proba(X_test), predicted_probas, decimal=3)
def test_decision_function(self):
"""...Test LogReg predict_proba
"""
X, y = Test.get_train_data(n_features=12, n_samples=300, nnz=0)
learner = LogisticRegression(random_state=32789, tol=1e-13)
learner.fit(X, y)
X_test, y_test = Test.get_train_data(n_features=12, n_samples=5, nnz=0)
decision_function_values = np.array(
[0.58182, 0.30026, -0.73075, 0.41864, 0.29278])
np.testing.assert_array_almost_equal(
learner.decision_function(X_test), decision_function_values,
decimal=3)
def test_float_double_arrays_fitting(self):
X, y = Test.get_train_data(n_features=12, n_samples=300, nnz=0)
learner_64 = LogisticRegression(random_state=32789, tol=1e-13)
learner_64.fit(X, y)
weights_64 = learner_64.weights
self.assertEqual(weights_64.dtype, np.dtype('float64'))
learner_32 = LogisticRegression(random_state=32789, tol=1e-13)
X_32, y_32 = X.astype('float32'), y.astype('float32')
learner_32.fit(X_32, y_32)
weights_32 = learner_32.weights
self.assertEqual(weights_32.dtype, np.dtype('float32'))
np.testing.assert_array_almost_equal(weights_32, weights_64, decimal=5)
if __name__ == "__main__":
unittest.main()
| bsd-3-clause |
pjryan126/solid-start-careers | store/api/zillow/venv/lib/python2.7/site-packages/pandas/tests/test_msgpack/test_read_size.py | 9 | 1867 | """Test Unpacker's read_array_header and read_map_header methods"""
from pandas.msgpack import packb, Unpacker, OutOfData
UnexpectedTypeException = ValueError
def test_read_array_header():
unpacker = Unpacker()
unpacker.feed(packb(['a', 'b', 'c']))
assert unpacker.read_array_header() == 3
assert unpacker.unpack() == b'a'
assert unpacker.unpack() == b'b'
assert unpacker.unpack() == b'c'
try:
unpacker.unpack()
assert 0, 'should raise exception'
except OutOfData:
assert 1, 'okay'
def test_read_map_header():
unpacker = Unpacker()
unpacker.feed(packb({'a': 'A'}))
assert unpacker.read_map_header() == 1
assert unpacker.unpack() == B'a'
assert unpacker.unpack() == B'A'
try:
unpacker.unpack()
assert 0, 'should raise exception'
except OutOfData:
assert 1, 'okay'
def test_incorrect_type_array():
unpacker = Unpacker()
unpacker.feed(packb(1))
try:
unpacker.read_array_header()
assert 0, 'should raise exception'
except UnexpectedTypeException:
assert 1, 'okay'
def test_incorrect_type_map():
unpacker = Unpacker()
unpacker.feed(packb(1))
try:
unpacker.read_map_header()
assert 0, 'should raise exception'
except UnexpectedTypeException:
assert 1, 'okay'
def test_correct_type_nested_array():
unpacker = Unpacker()
unpacker.feed(packb({'a': ['b', 'c', 'd']}))
try:
unpacker.read_array_header()
assert 0, 'should raise exception'
except UnexpectedTypeException:
assert 1, 'okay'
def test_incorrect_type_nested_map():
unpacker = Unpacker()
unpacker.feed(packb([{'a': 'b'}]))
try:
unpacker.read_map_header()
assert 0, 'should raise exception'
except UnexpectedTypeException:
assert 1, 'okay'
| gpl-2.0 |
hoenirvili/distributions | distributions/binomial.py | 1 | 2925 | #!/usr/bin/env python3
import numpy as np
import matplotlib.pyplot as plt
from scipy.stats import binom
from .distribution import Distribution
__all__ = ['Binomial']
class Binomial(Distribution):
"""
A random variable X that has a bernoulli distribution
represents one success in one yes/no trial, each of which
yields success with probability p.
Parameters
----------
r : int
Number of successes among n trials
n : int
Number of trials
p : int or float
Probability of a trial to be successful
"""
def __init__(self, r, n, p):
if type(r) != int or r < 0 or r is None:
raise ValueError("Invalid number of sucesses among n trials")
if type(n) != int or n < 0 or n is None:
raise ValueError("Invalid number of trials")
if (type(p) != int and type(p) != float or
p > 1 or p < 0 or p is None):
raise ValueError("Invalid probability number")
self.__r = r
self.__n = n
self.__p = p
self.__notp = 1 - p
self.__all_r = np.arange(0, self.__r + 1)
def mean(self):
"""
Compute the mean of the distribution
Returns:
--------
mean : float
"""
return binom.mean(self.__n, self.__p)
def variance(self):
"""
Compute the variance of the distribution
Returns:
--------
variance : float
"""
return binom.var(self.__n, self.__p)
def pmf(self):
"""
Compute the probability mass function of the distribution
Returns:
--------
pmf : float
"""
return binom.pmf(self.__r, self.__n, self.__p)
def std(self):
"""
Compute the standard deviation of the distribution.
Returns:
--------
std : float
"""
return binom.std(self.__n, self.__p)
def cdf(self):
"""
Compute the cumulative distribution function.
Returns:
--------
cdf : float
"""
return binom.cdf(self.__r, self.__n, self.__p)
def pmfs(self):
"""
Compute the probability mass function of the distribution of all
number of successes among n trials [0, r] interval
Returns:
--------
pmf : numpy.narray
"""
return binom.pmf(self.__all_r, self.__n, self.__p)
def plot(self):
"""
Plot all values pmf values ranging from zero to the
number of successes among n trials
"""
pmfs = self.pmfs()
plt.plot(self.__all_r, pmfs, 'o-')
plt.title('Binomial: number of trials=%i , probability=%.2f' %
(self.__n, self.__p), fontsize=15)
plt.xlabel('Number of successes')
plt.ylabel('Probability of successes', fontsize=15)
plt.show()
| mit |
cosurgi/trunk | examples/test/psd.py | 10 | 1794 | # encoding: utf-8
#
# demonstrate how to generate sphere packing based on arbitrary PSD (particle size distribution)
# show the difference between size-based and mass-based (≡ volume-based in our case) PSD
#
import matplotlib; matplotlib.rc('axes',grid=True)
from yade import pack
import pylab
# PSD given as points of piecewise-linear function
psdSizes,psdCumm=[.02,0.04,0.045,.05,.06,.08,.12],[0.,0.1,0.3,0.3,.3,.7,1.]
pylab.plot(psdSizes,psdCumm,label='precribed mass PSD')
sp0=pack.SpherePack();
sp0.makeCloud((0,0,0),(1,1,1),psdSizes=psdSizes,psdCumm=psdCumm,distributeMass=True)
sp1=pack.SpherePack();
sp1.makeCloud((0,0,0),(1,1,1),psdSizes=psdSizes,psdCumm=psdCumm,distributeMass=True,num=5000)
sp2=pack.SpherePack();
sp2.makeCloud((0,0,0),(1,1,1),psdSizes=psdSizes,psdCumm=psdCumm,distributeMass=True,num=20000)
pylab.semilogx(*sp0.psd(bins=30,mass=True),label='Mass PSD of (free) %d random spheres'%len(sp0))
pylab.semilogx(*sp1.psd(bins=30,mass=True),label='Mass PSD of (imposed) %d random spheres'%len(sp1))
pylab.semilogx(*sp2.psd(bins=30,mass=True),label='Mass PSD of (imposed) %d random spheres (scaled down)'%len(sp2))
pylab.legend()
# uniform distribution of size (sp3) and of mass (sp4)
sp3=pack.SpherePack(); sp3.makeCloud((0,0,0),(1,1,1),rMean=0.03,rRelFuzz=2/3.,distributeMass=False);
sp4=pack.SpherePack(); sp4.makeCloud((0,0,0),(1,1,1),rMean=0.03,rRelFuzz=2/3.,distributeMass=True);
pylab.figure()
pylab.plot(*(sp3.psd(mass=True)+('g',)+sp4.psd(mass=True)+('r',)))
pylab.legend(['Mass PSD of size-uniform distribution','Mass PSD of mass-uniform distribution'])
pylab.figure()
pylab.plot(*(sp3.psd(mass=False)+('g',)+sp4.psd(mass=False)+('r',)))
pylab.legend(['Size PSD of size-uniform distribution','Size PSD of mass-uniform distribution'])
pylab.show()
pylab.show() | gpl-2.0 |
rowanc1/Seismogram | syntheticSeismogram.py | 2 | 14041 | import numpy as np
import matplotlib.pyplot as plt
import scipy.io
def getPlotLog(d,log,dmax=200):
d = np.array(d, dtype=float)
log = np.array(log, dtype=float)
dplot = np.kron(d,np.ones(2))
logplot = np.kron(log,np.ones(2))
# dplot = dplot[1:]
dplot = np.append(dplot[1:],dmax)
return dplot, logplot
def getImpedance(rholog,vlog):
"""
Acoustic Impedance is the product of density and velocity
$$
Z = \\rho v
$$
"""
rholog, vlog = np.array(rholog, dtype=float), np.array(vlog, dtype=float),
return rholog*vlog
def getReflectivity(d,rho,v,usingT=True):
"""
The reflection coefficient of an interface is
$$
R_i = \\frac{Z_{i+1} - Z_{i}}{Z_{i+1}+Z_{i}}
$$
The reflectivity can also include the effect of transmission through above layers, in which case the reflectivity is given by
$$
\\text{reflectivity} = R_i \\pi_{j = 1}^{i-1}(1-R_j^2)
$$
"""
Z = getImpedance(rho,v) # acoustic impedance
dZ = (Z[1:] - Z[:-1])
sZ = (Z[:-1] + Z[1:])
R = dZ/sZ # reflection coefficients
nlayer = len(v) # number of layers
rseries = R
if usingT:
for i in range(nlayer-1):
rseries[i+1:] = rseries[i+1:]*(1.-R[i]**2)
return rseries, R
def getTimeDepth(d,v,dmax=200):
"""
The time depth conversion is computed by determining the two-way travel time for a reflection from a given depth.
"""
d = np.sort(d)
d = np.append(d,dmax)
twttop = 2.*np.diff(d)/v # 2-way travel time within each layer
twttop = np.append(0.,twttop)
twttop = np.cumsum(twttop) # 2-way travel time from surface to top of each layer
return d, twttop
def getLogs(d, rho, v, usingT=True):
"""
Function to make plotting convenient
"""
dpth, rholog = getPlotLog(d,rho)
_ , vlog = getPlotLog(d,v)
zlog = getImpedance(rholog,vlog)
rseries, _ = getReflectivity(d,rho,v,usingT)
return dpth, rholog, vlog, zlog, rseries
def syntheticSeismogram(d, rho, v, wavf, wavA=1., usingT=True, wavtyp = 'RICKER', dt=0.0001, dmax=200):
"""
function syntheticSeismogram(d, rho, v, wavtyp, wavf, usingT)
syntheicSeismogram generates a synthetic seismogram for
a simple 1-D layered model.
Inputs:
d : depth to the top of each layer (m)
rho : density of each layer (kg/m^3)
v : velocity of each layer (m/s)
The last layer is assumed to be a half-space
wavf : wavelet frequency
wavA : wavelet amplitude
usintT : using Transmission coefficients?
wavtyp : type of Wavelet
The wavelet options are:
Ricker: takes one frequency
Gaussian: still in progress
Ormsby: takes 4 frequencies
Klauder: takes 2 frequencies
usingT : use transmission coefficients?
Lindsey Heagy
lheagy@eos.ubc.ca
Created: November 30, 2013
Modified: October 3, 2014
"""
v, rho, d = np.array(v, dtype=float), np.array(rho, dtype=float), np.array(d, dtype=float)
usingT = np.array(usingT, dtype=bool)
_, t = getTimeDepth(d,v,dmax)
rseries,R = getReflectivity(d,rho,v)
# time for reflectivity series
tref = t[1:-1]
# create time vector
t = np.arange(t.min(),t.max(),dt)
# make wavelet
twav = np.arange(-2.0/np.min(wavf), 2.0/np.min(wavf), dt)
# Get source wavelet
wav = {'RICKER':getRicker, 'ORMSBY':getOrmsby, 'KLAUDER':getKlauder}[wavtyp](wavf,twav)
wav = wavA*wav
rseriesconv = np.zeros(len(t))
for i in range(len(tref)):
index = np.abs(t - tref[i]).argmin()
rseriesconv[index] = rseries[i]
# Do the convolution
seis = np.convolve(wav,rseriesconv)
tseis = np.min(twav)+dt*np.arange(len(seis))
index = np.logical_and(tseis >= 0, tseis <= np.max(t))
tseis = tseis[index]
seis = seis[index]
return tseis, seis, twav, wav, tref, rseries
## WAVELET DEFINITIONS
pi = np.pi
def getRicker(f,t):
"""
Retrieves a Ricker wavelet with center frequency f.
See: http://www.subsurfwiki.org/wiki/Ricker_wavelet
"""
# assert len(f) == 1, 'Ricker wavelet needs 1 frequency as input'
# f = f[0]
pift = pi*f*t
wav = (1 - 2*pift**2)*np.exp(-pift**2)
return wav
# def getGauss(f,t):
# assert len(f) == 1, 'Gauss wavelet needs 1 frequency as input'
# f = f[0]
def getOrmsby(f,t):
"""
Retrieves an Ormsby wavelet with low-cut frequency f[0], low-pass frequency f[1], high-pass frequency f[2] and high-cut frequency f[3]
See: http://www.subsurfwiki.org/wiki/Ormsby_filter
"""
assert len(f) == 4, 'Ormsby wavelet needs 4 frequencies as input'
f = np.sort(f) #Ormsby wavelet frequencies must be in increasing order
pif = pi*f
den1 = pif[3] - pif[2]
den2 = pif[1] - pif[0]
term1 = (pif[3]*np.sinc(pif[3]*t))**2 - (pif[2]*np.sinc(pif[2]))**2
term2 = (pif[1]*np.sinc(pif[1]*t))**2 - (pif[0]*np.sinc(pif[0]))**2
wav = term1/den1 - term2/den2;
return wav
def getKlauder(f,t,T=5.0):
"""
Retrieves a Klauder Wavelet with upper frequency f[0] and lower frequency f[1].
See: http://www.subsurfwiki.org/wiki/Ormsby_filter
"""
assert len(f) == 2, 'Klauder wavelet needs 2 frequencies as input'
k = np.diff(f)/T
f0 = np.sum(f)/2.0
wav = np.real(np.sin(pi*k*t*(T-t))/(pi*k*t)*np.exp(2*pi*1j*f0*t))
return wav
## Plotting Functions
def plotLogFormat(log, dpth,xlim, col='blue'):
"""
Nice formatting for plotting logs as a function of depth
"""
ax = plt.plot(log,dpth,linewidth=2,color=col)
plt.xlim(xlim)
plt.ylim((dpth.min(),dpth.max()))
plt.grid()
plt.gca().invert_yaxis()
plt.setp(plt.xticks()[1],rotation='90',fontsize=9)
plt.setp(plt.yticks()[1],fontsize=9)
return ax
def plotLogs(d, rho, v, usingT=True):
"""
Plotting wrapper to plot density, velocity, acoustic impedance and reflectivity as a function of depth.
"""
d = np.sort(d)
dpth, rholog, vlog, zlog, rseries = getLogs(d, rho, v, usingT)
nd = len(dpth)
xlimrho = (1.95,5.05)
xlimv = (0.25,4.05)
xlimz = (xlimrho[0]*xlimv[0], xlimrho[1]*xlimv[1])
# Plot Density
plt.figure(1)
plt.subplot(141)
plotLogFormat(rholog*10**-3,dpth,xlimrho,'blue')
plt.title('$\\rho$')
plt.xlabel('Density \n $\\times 10^3$ (kg /m$^3$)',fontsize=9)
plt.ylabel('Depth (m)',fontsize=9)
plt.subplot(142)
plotLogFormat(vlog*10**-3,dpth,xlimv,'red')
plt.title('$v$')
plt.xlabel('Velocity \n $\\times 10^3$ (m/s)',fontsize=9)
plt.setp(plt.yticks()[1],visible=False)
plt.subplot(143)
plotLogFormat(zlog*10.**-6.,dpth,xlimz,'green')
plt.gca().set_title('$Z = \\rho v$')
plt.gca().set_xlabel('Impedance \n $\\times 10^{6}$ (kg m$^{-2}$ s$^{-1}$)',fontsize=9)
plt.setp(plt.yticks()[1],visible=False)
plt.subplot(144)
plt.hlines(d[1:],np.zeros(nd-1),rseries,linewidth=2)
plt.plot(np.zeros(nd),dpth,linewidth=2,color='black')
plt.title('Reflectivity');
plt.xlim((-1.,1.))
plt.gca().set_xlabel('Reflectivity')
plt.grid()
plt.gca().invert_yaxis()
plt.setp(plt.xticks()[1],rotation='90',fontsize=9)
plt.setp(plt.yticks()[1],visible=False)
plt.tight_layout()
plt.show()
def plotTimeDepth(d,v):
"""
Wrapper to plot time-depth conversion based on the provided velocity model
"""
dpth,t = getTimeDepth(d,v)
plt.figure()
plt.plot(dpth,t,linewidth=2);
plt.title('Depth-Time');
plt.grid()
plt.gca().set_xlabel('Depth (m)',fontsize=9)
plt.gca().set_ylabel('Two Way Time (s)',fontsize=9)
plt.tight_layout()
plt.show()
def plotSeismogram(d, rho, v, wavf, wavA=1., noise = 0., usingT=True, wavtyp='RICKER'):
"""
Plotting function to plot the wavelet, reflectivity series and seismogram as functions of time provided the geologic model (depths, densities, and velocities)
"""
tseis, seis, twav, wav, tref, rseriesconv = syntheticSeismogram(d, rho, v, wavf, wavA, usingT,wavtyp)
noise = noise*np.max(np.abs(seis))*np.random.randn(seis.size)
filt = np.arange(1.,15.)
filtr = filt[::-1]
filt = np.append(filt,filtr[1:])*1./15.
noise = np.convolve(noise,filt)
noise = noise[0:seis.size]
seis = seis + noise
plt.figure()
plt.subplot(131)
plt.plot(wav,twav,linewidth=1,color='black')
plt.title('Wavelet')
plt.xlim((-2.,2.))
plt.grid()
plt.gca().invert_yaxis()
plt.setp(plt.xticks()[1],rotation='90',fontsize=9)
plt.setp(plt.yticks()[1],fontsize=9)
plt.gca().set_xlabel('Amplitude',fontsize=9)
plt.gca().set_ylabel('Time (s)',fontsize=9)
plt.subplot(132)
plt.plot(np.zeros(tref.size),(tseis.max(),tseis.min()),linewidth=2,color='black')
plt.hlines(tref,np.zeros(len(rseriesconv)),rseriesconv,linewidth=2) #,'marker','none'
plt.title('Reflectivity')
plt.grid()
plt.ylim((0,tseis.max()))
plt.gca().invert_yaxis()
plt.xlim((-1.,1.))
plt.setp(plt.xticks()[1],rotation='90',fontsize=9)
plt.setp(plt.yticks()[1],fontsize=9)
plt.gca().set_xlabel('Amplitude',fontsize=9)
plt.gca().set_ylabel('Time (s)',fontsize=9)
plt.subplot(133)
plt.plot(seis,tseis,color='black',linewidth=1)
plt.title('Seismogram')
plt.grid()
plt.ylim((tseis.min(),tseis.max()))
plt.gca().invert_yaxis()
plt.xlim((-0.95,0.95))
plt.setp(plt.xticks()[1],rotation='90',fontsize=9)
plt.setp(plt.yticks()[1],fontsize=9)
plt.gca().set_xlabel('Amplitude',fontsize=9)
plt.gca().set_ylabel('Time (s)',fontsize=9)
plt.tight_layout()
plt.show()
def plotSeismogramV2(d, rho, v, wavf, wavA=1., noise = 0., usingT=True, wavtyp='RICKER'):
"""
Plotting function to show physical property logs (in depth) and seismogram (in time).
"""
dpth, rholog, vlog, zlog, rseries = getLogs(d, rho, v, usingT)
tseis, seis, twav, wav, tref, rseriesconv = syntheticSeismogram(d, rho, v, wavf, wavA, usingT,wavtyp)
noise = noise*np.max(np.abs(seis))*np.random.randn(seis.size)
filt = np.arange(1.,21.)
filtr = filt[::-1]
filt = np.append(filt,filtr[1:])*1./21.
noise = np.convolve(noise,filt)
noise = noise[0:seis.size]
xlimrho = (1.95,5.05)
xlimv = (0.25,4.05)
xlimz = (xlimrho[0]*xlimv[0], xlimrho[1]*xlimv[1])
seis = seis + noise
plt.figure()
plt.subplot(131)
plotLogFormat(rholog*10**-3,dpth,xlimrho,'blue')
plt.title('$\\rho$')
plt.xlabel('Density \n $\\times 10^3$ (kg /m$^3$)',fontsize=9)
plt.ylabel('Depth (m)',fontsize=9)
plt.subplot(132)
plotLogFormat(vlog*10**-3,dpth,xlimv,'red')
plt.title('$v$')
plt.xlabel('Velocity \n $\\times 10^3$ (m/s)',fontsize=9)
plt.ylabel('Depth (m)',fontsize=9)
plt.subplot(133)
plt.plot(seis,tseis,color='black',linewidth=1)
plt.title('Seismogram')
plt.grid()
plt.ylim((tseis.min(),tseis.max()))
plt.gca().invert_yaxis()
plt.xlim((-0.5,0.5))
plt.setp(plt.xticks()[1],rotation='90',fontsize=9)
plt.setp(plt.yticks()[1],fontsize=9)
plt.gca().set_xlabel('Amplitude',fontsize=9)
plt.gca().set_ylabel('Time (s)',fontsize=9)
plt.tight_layout()
plt.show()
## INTERACTIVE PLOT WRAPPERS
def plotLogsInteract(d2,d3,rho1,rho2,rho3,v1,v2,v3,usingT=False):
"""
interactive wrapper of plotLogs
"""
d = np.array((0.,d2,d3), dtype=float)
rho = np.array((rho1,rho2,rho3), dtype=float)
v = np.array((v1,v2,v3), dtype=float)
plotLogs(d, rho, v, usingT)
def plotTimeDepthInteract(d2,d3,v1,v2,v3):
"""
interactive wrapper for plotTimeDepth
"""
d = np.array((0.,d2,d3), dtype=float)
v = np.array((v1,v2,v3), dtype=float)
plotTimeDepth(d,v)
def plotSeismogramInteractFixMod(wavf,wavA):
"""
interactive wrapper for plot seismogram
"""
d = [0., 50., 100.] # Position of top of each layer (m)
v = [500., 1000., 1500.] # Velocity of each layer (m/s)
rho = [2000., 2300., 2500.] # Density of each layer (kg/m^3)
wavf = np.array(wavf, dtype=float)
usingT = True
plotSeismogram(d, rho, v, wavf, wavA, 0., usingT)
def plotSeismogramInteract(d2,d3,rho1,rho2,rho3,v1,v2,v3,wavf,wavA,AddNoise=False,usingT=True):
"""
interactive wrapper for plot SeismogramV2 for a fixed geologic model
"""
d = np.array((0.,d2,d3), dtype=float)
v = [500., 1000., 1500.] # Velocity of each layer (m/s)
rho = [2000., 2300., 2500.]
if AddNoise:
noise = 0.02
else:
noise = 0.
plotSeismogramV2(d, rho, v, wavf, wavA, noise,usingT)
def plotSeismogramInteractRes(h2,wavf,AddNoise=False):
"""
Interactive wrapper for plotSeismogramV2 for a fixed geologic model
"""
d = [0., 50., 50.+h2] # Position of top of each layer (m)
v = [500., 1000., 1500.] # Velocity of each layer (m/s)
rho = [2000., 2300., 2500.] # Density of each layer (kg/m^3)
wavf = np.array(wavf, dtype=float)
usingT = True
if AddNoise:
noise = 0.02
else:
noise = 0.
plotSeismogramV2(d, rho, v, wavf, 1., noise)
if __name__ == '__main__':
d = [0., 50., 100.] # Position of top of each layer (m)
v = [500., 1000., 1500.] # Velocity of each layer (m/s)
rho = [2000., 2300., 2500.] # Density of each layer (kg/m^3)
wavtyp = 'RICKER' # Wavelet type
wavf = 50. # Wavelet Frequency
usingT = False # Use Transmission Coefficients?
#plotLogsInteract(d[1],d[2],rho[0],rho[1],rho[2],v[0],v[1],v[2])
#plotTimeDepth(d,v)
#plotSeismogram(d, rho, v, wavtyp, wavf, usingT)
#plotSeismogramV2(d, rho, v, 50., wavA=1., noise = 0., usingT=True, wavtyp='RICKER')
| mit |
toobaz/pandas | pandas/tests/indexing/test_floats.py | 1 | 30613 | import numpy as np
import pytest
from pandas import DataFrame, Float64Index, Index, Int64Index, RangeIndex, Series
import pandas.util.testing as tm
from pandas.util.testing import assert_almost_equal, assert_series_equal
class TestFloatIndexers:
def check(self, result, original, indexer, getitem):
"""
comparator for results
we need to take care if we are indexing on a
Series or a frame
"""
if isinstance(original, Series):
expected = original.iloc[indexer]
else:
if getitem:
expected = original.iloc[:, indexer]
else:
expected = original.iloc[indexer]
assert_almost_equal(result, expected)
def test_scalar_error(self):
# GH 4892
# float_indexers should raise exceptions
# on appropriate Index types & accessors
# this duplicates the code below
# but is specifically testing for the error
# message
for index in [
tm.makeStringIndex,
tm.makeUnicodeIndex,
tm.makeCategoricalIndex,
tm.makeDateIndex,
tm.makeTimedeltaIndex,
tm.makePeriodIndex,
tm.makeIntIndex,
tm.makeRangeIndex,
]:
i = index(5)
s = Series(np.arange(len(i)), index=i)
msg = "Cannot index by location index"
with pytest.raises(TypeError, match=msg):
s.iloc[3.0]
msg = (
"cannot do positional indexing on {klass} with these "
r"indexers \[3\.0\] of {kind}".format(klass=type(i), kind=str(float))
)
with pytest.raises(TypeError, match=msg):
s.iloc[3.0] = 0
def test_scalar_non_numeric(self):
# GH 4892
# float_indexers should raise exceptions
# on appropriate Index types & accessors
for index in [
tm.makeStringIndex,
tm.makeUnicodeIndex,
tm.makeCategoricalIndex,
tm.makeDateIndex,
tm.makeTimedeltaIndex,
tm.makePeriodIndex,
]:
i = index(5)
for s in [
Series(np.arange(len(i)), index=i),
DataFrame(np.random.randn(len(i), len(i)), index=i, columns=i),
]:
# getting
for idxr, getitem in [(lambda x: x.iloc, False), (lambda x: x, True)]:
# gettitem on a DataFrame is a KeyError as it is indexing
# via labels on the columns
if getitem and isinstance(s, DataFrame):
error = KeyError
msg = r"^3(\.0)?$"
else:
error = TypeError
msg = (
r"cannot do (label|index|positional) indexing"
r" on {klass} with these indexers \[3\.0\] of"
r" {kind}|"
"Cannot index by location index with a"
" non-integer key".format(klass=type(i), kind=str(float))
)
with pytest.raises(error, match=msg):
idxr(s)[3.0]
# label based can be a TypeError or KeyError
if s.index.inferred_type in ["string", "unicode", "mixed"]:
error = KeyError
msg = r"^3$"
else:
error = TypeError
msg = (
r"cannot do (label|index) indexing"
r" on {klass} with these indexers \[3\.0\] of"
r" {kind}".format(klass=type(i), kind=str(float))
)
with pytest.raises(error, match=msg):
s.loc[3.0]
# contains
assert 3.0 not in s
# setting with a float fails with iloc
msg = (
r"cannot do (label|index|positional) indexing"
r" on {klass} with these indexers \[3\.0\] of"
r" {kind}".format(klass=type(i), kind=str(float))
)
with pytest.raises(TypeError, match=msg):
s.iloc[3.0] = 0
# setting with an indexer
if s.index.inferred_type in ["categorical"]:
# Value or Type Error
pass
elif s.index.inferred_type in ["datetime64", "timedelta64", "period"]:
# these should prob work
# and are inconsisten between series/dataframe ATM
# for idxr in [lambda x: x.ix,
# lambda x: x]:
# s2 = s.copy()
#
# with pytest.raises(TypeError):
# idxr(s2)[3.0] = 0
pass
else:
s2 = s.copy()
s2.loc[3.0] = 10
assert s2.index.is_object()
for idxr in [lambda x: x]:
s2 = s.copy()
idxr(s2)[3.0] = 0
assert s2.index.is_object()
# fallsback to position selection, series only
s = Series(np.arange(len(i)), index=i)
s[3]
msg = (
r"cannot do (label|index) indexing"
r" on {klass} with these indexers \[3\.0\] of"
r" {kind}".format(klass=type(i), kind=str(float))
)
with pytest.raises(TypeError, match=msg):
s[3.0]
def test_scalar_with_mixed(self):
s2 = Series([1, 2, 3], index=["a", "b", "c"])
s3 = Series([1, 2, 3], index=["a", "b", 1.5])
# lookup in a pure stringstr
# with an invalid indexer
for idxr in [lambda x: x, lambda x: x.iloc]:
msg = (
r"cannot do label indexing"
r" on {klass} with these indexers \[1\.0\] of"
r" {kind}|"
"Cannot index by location index with a non-integer key".format(
klass=str(Index), kind=str(float)
)
)
with pytest.raises(TypeError, match=msg):
idxr(s2)[1.0]
with pytest.raises(KeyError, match=r"^1$"):
s2.loc[1.0]
result = s2.loc["b"]
expected = 2
assert result == expected
# mixed index so we have label
# indexing
for idxr in [lambda x: x]:
msg = (
r"cannot do label indexing"
r" on {klass} with these indexers \[1\.0\] of"
r" {kind}".format(klass=str(Index), kind=str(float))
)
with pytest.raises(TypeError, match=msg):
idxr(s3)[1.0]
result = idxr(s3)[1]
expected = 2
assert result == expected
msg = "Cannot index by location index with a non-integer key"
with pytest.raises(TypeError, match=msg):
s3.iloc[1.0]
with pytest.raises(KeyError, match=r"^1$"):
s3.loc[1.0]
result = s3.loc[1.5]
expected = 3
assert result == expected
def test_scalar_integer(self):
# test how scalar float indexers work on int indexes
# integer index
for i in [Int64Index(range(5)), RangeIndex(5)]:
for s in [
Series(np.arange(len(i))),
DataFrame(np.random.randn(len(i), len(i)), index=i, columns=i),
]:
# coerce to equal int
for idxr, getitem in [(lambda x: x.loc, False), (lambda x: x, True)]:
result = idxr(s)[3.0]
self.check(result, s, 3, getitem)
# coerce to equal int
for idxr, getitem in [(lambda x: x.loc, False), (lambda x: x, True)]:
if isinstance(s, Series):
def compare(x, y):
assert x == y
expected = 100
else:
compare = tm.assert_series_equal
if getitem:
expected = Series(100, index=range(len(s)), name=3)
else:
expected = Series(100.0, index=range(len(s)), name=3)
s2 = s.copy()
idxr(s2)[3.0] = 100
result = idxr(s2)[3.0]
compare(result, expected)
result = idxr(s2)[3]
compare(result, expected)
# contains
# coerce to equal int
assert 3.0 in s
def test_scalar_float(self):
# scalar float indexers work on a float index
index = Index(np.arange(5.0))
for s in [
Series(np.arange(len(index)), index=index),
DataFrame(
np.random.randn(len(index), len(index)), index=index, columns=index
),
]:
# assert all operations except for iloc are ok
indexer = index[3]
for idxr, getitem in [(lambda x: x.loc, False), (lambda x: x, True)]:
# getting
result = idxr(s)[indexer]
self.check(result, s, 3, getitem)
# setting
s2 = s.copy()
result = idxr(s2)[indexer]
self.check(result, s, 3, getitem)
# random integer is a KeyError
with pytest.raises(KeyError, match=r"^3\.5$"):
idxr(s)[3.5]
# contains
assert 3.0 in s
# iloc succeeds with an integer
expected = s.iloc[3]
s2 = s.copy()
s2.iloc[3] = expected
result = s2.iloc[3]
self.check(result, s, 3, False)
# iloc raises with a float
msg = "Cannot index by location index with a non-integer key"
with pytest.raises(TypeError, match=msg):
s.iloc[3.0]
msg = (
r"cannot do positional indexing"
r" on {klass} with these indexers \[3\.0\] of"
r" {kind}".format(klass=str(Float64Index), kind=str(float))
)
with pytest.raises(TypeError, match=msg):
s2.iloc[3.0] = 0
def test_slice_non_numeric(self):
# GH 4892
# float_indexers should raise exceptions
# on appropriate Index types & accessors
for index in [
tm.makeStringIndex,
tm.makeUnicodeIndex,
tm.makeDateIndex,
tm.makeTimedeltaIndex,
tm.makePeriodIndex,
]:
index = index(5)
for s in [
Series(range(5), index=index),
DataFrame(np.random.randn(5, 2), index=index),
]:
# getitem
for l in [slice(3.0, 4), slice(3, 4.0), slice(3.0, 4.0)]:
msg = (
"cannot do slice indexing"
r" on {klass} with these indexers \[(3|4)\.0\] of"
" {kind}".format(klass=type(index), kind=str(float))
)
with pytest.raises(TypeError, match=msg):
s.iloc[l]
for idxr in [lambda x: x.loc, lambda x: x.iloc, lambda x: x]:
msg = (
"cannot do slice indexing"
r" on {klass} with these indexers"
r" \[(3|4)(\.0)?\]"
r" of ({kind_float}|{kind_int})".format(
klass=type(index),
kind_float=str(float),
kind_int=str(int),
)
)
with pytest.raises(TypeError, match=msg):
idxr(s)[l]
# setitem
for l in [slice(3.0, 4), slice(3, 4.0), slice(3.0, 4.0)]:
msg = (
"cannot do slice indexing"
r" on {klass} with these indexers \[(3|4)\.0\] of"
" {kind}".format(klass=type(index), kind=str(float))
)
with pytest.raises(TypeError, match=msg):
s.iloc[l] = 0
for idxr in [lambda x: x.loc, lambda x: x.iloc, lambda x: x]:
msg = (
"cannot do slice indexing"
r" on {klass} with these indexers"
r" \[(3|4)(\.0)?\]"
r" of ({kind_float}|{kind_int})".format(
klass=type(index),
kind_float=str(float),
kind_int=str(int),
)
)
with pytest.raises(TypeError, match=msg):
idxr(s)[l] = 0
def test_slice_integer(self):
# same as above, but for Integer based indexes
# these coerce to a like integer
# oob indicates if we are out of bounds
# of positional indexing
for index, oob in [
(Int64Index(range(5)), False),
(RangeIndex(5), False),
(Int64Index(range(5)) + 10, True),
]:
# s is an in-range index
s = Series(range(5), index=index)
# getitem
for l in [slice(3.0, 4), slice(3, 4.0), slice(3.0, 4.0)]:
for idxr in [lambda x: x.loc]:
result = idxr(s)[l]
# these are all label indexing
# except getitem which is positional
# empty
if oob:
indexer = slice(0, 0)
else:
indexer = slice(3, 5)
self.check(result, s, indexer, False)
# positional indexing
msg = (
"cannot do slice indexing"
r" on {klass} with these indexers \[(3|4)\.0\] of"
" {kind}".format(klass=type(index), kind=str(float))
)
with pytest.raises(TypeError, match=msg):
s[l]
# getitem out-of-bounds
for l in [slice(-6, 6), slice(-6.0, 6.0)]:
for idxr in [lambda x: x.loc]:
result = idxr(s)[l]
# these are all label indexing
# except getitem which is positional
# empty
if oob:
indexer = slice(0, 0)
else:
indexer = slice(-6, 6)
self.check(result, s, indexer, False)
# positional indexing
msg = (
"cannot do slice indexing"
r" on {klass} with these indexers \[-6\.0\] of"
" {kind}".format(klass=type(index), kind=str(float))
)
with pytest.raises(TypeError, match=msg):
s[slice(-6.0, 6.0)]
# getitem odd floats
for l, res1 in [
(slice(2.5, 4), slice(3, 5)),
(slice(2, 3.5), slice(2, 4)),
(slice(2.5, 3.5), slice(3, 4)),
]:
for idxr in [lambda x: x.loc]:
result = idxr(s)[l]
if oob:
res = slice(0, 0)
else:
res = res1
self.check(result, s, res, False)
# positional indexing
msg = (
"cannot do slice indexing"
r" on {klass} with these indexers \[(2|3)\.5\] of"
" {kind}".format(klass=type(index), kind=str(float))
)
with pytest.raises(TypeError, match=msg):
s[l]
# setitem
for l in [slice(3.0, 4), slice(3, 4.0), slice(3.0, 4.0)]:
for idxr in [lambda x: x.loc]:
sc = s.copy()
idxr(sc)[l] = 0
result = idxr(sc)[l].values.ravel()
assert (result == 0).all()
# positional indexing
msg = (
"cannot do slice indexing"
r" on {klass} with these indexers \[(3|4)\.0\] of"
" {kind}".format(klass=type(index), kind=str(float))
)
with pytest.raises(TypeError, match=msg):
s[l] = 0
def test_integer_positional_indexing(self):
""" make sure that we are raising on positional indexing
w.r.t. an integer index """
s = Series(range(2, 6), index=range(2, 6))
result = s[2:4]
expected = s.iloc[2:4]
assert_series_equal(result, expected)
for idxr in [lambda x: x, lambda x: x.iloc]:
for l in [slice(2, 4.0), slice(2.0, 4), slice(2.0, 4.0)]:
klass = RangeIndex
msg = (
"cannot do slice indexing"
r" on {klass} with these indexers \[(2|4)\.0\] of"
" {kind}".format(klass=str(klass), kind=str(float))
)
with pytest.raises(TypeError, match=msg):
idxr(s)[l]
def test_slice_integer_frame_getitem(self):
# similar to above, but on the getitem dim (of a DataFrame)
for index in [Int64Index(range(5)), RangeIndex(5)]:
s = DataFrame(np.random.randn(5, 2), index=index)
def f(idxr):
# getitem
for l in [slice(0.0, 1), slice(0, 1.0), slice(0.0, 1.0)]:
result = idxr(s)[l]
indexer = slice(0, 2)
self.check(result, s, indexer, False)
# positional indexing
msg = (
"cannot do slice indexing"
r" on {klass} with these indexers \[(0|1)\.0\] of"
" {kind}".format(klass=type(index), kind=str(float))
)
with pytest.raises(TypeError, match=msg):
s[l]
# getitem out-of-bounds
for l in [slice(-10, 10), slice(-10.0, 10.0)]:
result = idxr(s)[l]
self.check(result, s, slice(-10, 10), True)
# positional indexing
msg = (
"cannot do slice indexing"
r" on {klass} with these indexers \[-10\.0\] of"
" {kind}".format(klass=type(index), kind=str(float))
)
with pytest.raises(TypeError, match=msg):
s[slice(-10.0, 10.0)]
# getitem odd floats
for l, res in [
(slice(0.5, 1), slice(1, 2)),
(slice(0, 0.5), slice(0, 1)),
(slice(0.5, 1.5), slice(1, 2)),
]:
result = idxr(s)[l]
self.check(result, s, res, False)
# positional indexing
msg = (
"cannot do slice indexing"
r" on {klass} with these indexers \[0\.5\] of"
" {kind}".format(klass=type(index), kind=str(float))
)
with pytest.raises(TypeError, match=msg):
s[l]
# setitem
for l in [slice(3.0, 4), slice(3, 4.0), slice(3.0, 4.0)]:
sc = s.copy()
idxr(sc)[l] = 0
result = idxr(sc)[l].values.ravel()
assert (result == 0).all()
# positional indexing
msg = (
"cannot do slice indexing"
r" on {klass} with these indexers \[(3|4)\.0\] of"
" {kind}".format(klass=type(index), kind=str(float))
)
with pytest.raises(TypeError, match=msg):
s[l] = 0
f(lambda x: x.loc)
def test_slice_float(self):
# same as above, but for floats
index = Index(np.arange(5.0)) + 0.1
for s in [
Series(range(5), index=index),
DataFrame(np.random.randn(5, 2), index=index),
]:
for l in [slice(3.0, 4), slice(3, 4.0), slice(3.0, 4.0)]:
expected = s.iloc[3:4]
for idxr in [lambda x: x.loc, lambda x: x]:
# getitem
result = idxr(s)[l]
if isinstance(s, Series):
tm.assert_series_equal(result, expected)
else:
tm.assert_frame_equal(result, expected)
# setitem
s2 = s.copy()
idxr(s2)[l] = 0
result = idxr(s2)[l].values.ravel()
assert (result == 0).all()
def test_floating_index_doc_example(self):
index = Index([1.5, 2, 3, 4.5, 5])
s = Series(range(5), index=index)
assert s[3] == 2
assert s.loc[3] == 2
assert s.loc[3] == 2
assert s.iloc[3] == 3
def test_floating_misc(self):
# related 236
# scalar/slicing of a float index
s = Series(np.arange(5), index=np.arange(5) * 2.5, dtype=np.int64)
# label based slicing
result1 = s[1.0:3.0]
result2 = s.loc[1.0:3.0]
result3 = s.loc[1.0:3.0]
assert_series_equal(result1, result2)
assert_series_equal(result1, result3)
# exact indexing when found
result1 = s[5.0]
result2 = s.loc[5.0]
result3 = s.loc[5.0]
assert result1 == result2
assert result1 == result3
result1 = s[5]
result2 = s.loc[5]
result3 = s.loc[5]
assert result1 == result2
assert result1 == result3
assert s[5.0] == s[5]
# value not found (and no fallbacking at all)
# scalar integers
with pytest.raises(KeyError, match=r"^4\.0$"):
s.loc[4]
with pytest.raises(KeyError, match=r"^4\.0$"):
s.loc[4]
with pytest.raises(KeyError, match=r"^4\.0$"):
s[4]
# fancy floats/integers create the correct entry (as nan)
# fancy tests
expected = Series([2, 0], index=Float64Index([5.0, 0.0]))
for fancy_idx in [[5.0, 0.0], np.array([5.0, 0.0])]: # float
assert_series_equal(s[fancy_idx], expected)
assert_series_equal(s.loc[fancy_idx], expected)
assert_series_equal(s.loc[fancy_idx], expected)
expected = Series([2, 0], index=Index([5, 0], dtype="int64"))
for fancy_idx in [[5, 0], np.array([5, 0])]: # int
assert_series_equal(s[fancy_idx], expected)
assert_series_equal(s.loc[fancy_idx], expected)
assert_series_equal(s.loc[fancy_idx], expected)
# all should return the same as we are slicing 'the same'
result1 = s.loc[2:5]
result2 = s.loc[2.0:5.0]
result3 = s.loc[2.0:5]
result4 = s.loc[2.1:5]
assert_series_equal(result1, result2)
assert_series_equal(result1, result3)
assert_series_equal(result1, result4)
# previously this did fallback indexing
result1 = s[2:5]
result2 = s[2.0:5.0]
result3 = s[2.0:5]
result4 = s[2.1:5]
assert_series_equal(result1, result2)
assert_series_equal(result1, result3)
assert_series_equal(result1, result4)
result1 = s.loc[2:5]
result2 = s.loc[2.0:5.0]
result3 = s.loc[2.0:5]
result4 = s.loc[2.1:5]
assert_series_equal(result1, result2)
assert_series_equal(result1, result3)
assert_series_equal(result1, result4)
# combined test
result1 = s.loc[2:5]
result2 = s.loc[2:5]
result3 = s[2:5]
assert_series_equal(result1, result2)
assert_series_equal(result1, result3)
# list selection
result1 = s[[0.0, 5, 10]]
result2 = s.loc[[0.0, 5, 10]]
result3 = s.loc[[0.0, 5, 10]]
result4 = s.iloc[[0, 2, 4]]
assert_series_equal(result1, result2)
assert_series_equal(result1, result3)
assert_series_equal(result1, result4)
with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
result1 = s[[1.6, 5, 10]]
with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
result2 = s.loc[[1.6, 5, 10]]
with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
result3 = s.loc[[1.6, 5, 10]]
assert_series_equal(result1, result2)
assert_series_equal(result1, result3)
assert_series_equal(result1, Series([np.nan, 2, 4], index=[1.6, 5, 10]))
with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
result1 = s[[0, 1, 2]]
with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
result2 = s.loc[[0, 1, 2]]
with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
result3 = s.loc[[0, 1, 2]]
assert_series_equal(result1, result2)
assert_series_equal(result1, result3)
assert_series_equal(result1, Series([0.0, np.nan, np.nan], index=[0, 1, 2]))
result1 = s.loc[[2.5, 5]]
result2 = s.loc[[2.5, 5]]
assert_series_equal(result1, result2)
assert_series_equal(result1, Series([1, 2], index=[2.5, 5.0]))
result1 = s[[2.5]]
result2 = s.loc[[2.5]]
result3 = s.loc[[2.5]]
assert_series_equal(result1, result2)
assert_series_equal(result1, result3)
assert_series_equal(result1, Series([1], index=[2.5]))
def test_floating_tuples(self):
# see gh-13509
s = Series([(1, 1), (2, 2), (3, 3)], index=[0.0, 0.1, 0.2], name="foo")
result = s[0.0]
assert result == (1, 1)
expected = Series([(1, 1), (2, 2)], index=[0.0, 0.0], name="foo")
s = Series([(1, 1), (2, 2), (3, 3)], index=[0.0, 0.0, 0.2], name="foo")
result = s[0.0]
tm.assert_series_equal(result, expected)
def test_float64index_slicing_bug(self):
# GH 5557, related to slicing a float index
ser = {
256: 2321.0,
1: 78.0,
2: 2716.0,
3: 0.0,
4: 369.0,
5: 0.0,
6: 269.0,
7: 0.0,
8: 0.0,
9: 0.0,
10: 3536.0,
11: 0.0,
12: 24.0,
13: 0.0,
14: 931.0,
15: 0.0,
16: 101.0,
17: 78.0,
18: 9643.0,
19: 0.0,
20: 0.0,
21: 0.0,
22: 63761.0,
23: 0.0,
24: 446.0,
25: 0.0,
26: 34773.0,
27: 0.0,
28: 729.0,
29: 78.0,
30: 0.0,
31: 0.0,
32: 3374.0,
33: 0.0,
34: 1391.0,
35: 0.0,
36: 361.0,
37: 0.0,
38: 61808.0,
39: 0.0,
40: 0.0,
41: 0.0,
42: 6677.0,
43: 0.0,
44: 802.0,
45: 0.0,
46: 2691.0,
47: 0.0,
48: 3582.0,
49: 0.0,
50: 734.0,
51: 0.0,
52: 627.0,
53: 70.0,
54: 2584.0,
55: 0.0,
56: 324.0,
57: 0.0,
58: 605.0,
59: 0.0,
60: 0.0,
61: 0.0,
62: 3989.0,
63: 10.0,
64: 42.0,
65: 0.0,
66: 904.0,
67: 0.0,
68: 88.0,
69: 70.0,
70: 8172.0,
71: 0.0,
72: 0.0,
73: 0.0,
74: 64902.0,
75: 0.0,
76: 347.0,
77: 0.0,
78: 36605.0,
79: 0.0,
80: 379.0,
81: 70.0,
82: 0.0,
83: 0.0,
84: 3001.0,
85: 0.0,
86: 1630.0,
87: 7.0,
88: 364.0,
89: 0.0,
90: 67404.0,
91: 9.0,
92: 0.0,
93: 0.0,
94: 7685.0,
95: 0.0,
96: 1017.0,
97: 0.0,
98: 2831.0,
99: 0.0,
100: 2963.0,
101: 0.0,
102: 854.0,
103: 0.0,
104: 0.0,
105: 0.0,
106: 0.0,
107: 0.0,
108: 0.0,
109: 0.0,
110: 0.0,
111: 0.0,
112: 0.0,
113: 0.0,
114: 0.0,
115: 0.0,
116: 0.0,
117: 0.0,
118: 0.0,
119: 0.0,
120: 0.0,
121: 0.0,
122: 0.0,
123: 0.0,
124: 0.0,
125: 0.0,
126: 67744.0,
127: 22.0,
128: 264.0,
129: 0.0,
260: 197.0,
268: 0.0,
265: 0.0,
269: 0.0,
261: 0.0,
266: 1198.0,
267: 0.0,
262: 2629.0,
258: 775.0,
257: 0.0,
263: 0.0,
259: 0.0,
264: 163.0,
250: 10326.0,
251: 0.0,
252: 1228.0,
253: 0.0,
254: 2769.0,
255: 0.0,
}
# smoke test for the repr
s = Series(ser)
result = s.value_counts()
str(result)
| bsd-3-clause |
nmayorov/scikit-learn | sklearn/preprocessing/tests/test_label.py | 156 | 17626 | import numpy as np
from scipy.sparse import issparse
from scipy.sparse import coo_matrix
from scipy.sparse import csc_matrix
from scipy.sparse import csr_matrix
from scipy.sparse import dok_matrix
from scipy.sparse import lil_matrix
from sklearn.utils.multiclass import type_of_target
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import ignore_warnings
from sklearn.preprocessing.label import LabelBinarizer
from sklearn.preprocessing.label import MultiLabelBinarizer
from sklearn.preprocessing.label import LabelEncoder
from sklearn.preprocessing.label import label_binarize
from sklearn.preprocessing.label import _inverse_binarize_thresholding
from sklearn.preprocessing.label import _inverse_binarize_multiclass
from sklearn import datasets
iris = datasets.load_iris()
def toarray(a):
if hasattr(a, "toarray"):
a = a.toarray()
return a
def test_label_binarizer():
lb = LabelBinarizer()
# one-class case defaults to negative label
inp = ["pos", "pos", "pos", "pos"]
expected = np.array([[0, 0, 0, 0]]).T
got = lb.fit_transform(inp)
assert_array_equal(lb.classes_, ["pos"])
assert_array_equal(expected, got)
assert_array_equal(lb.inverse_transform(got), inp)
# two-class case
inp = ["neg", "pos", "pos", "neg"]
expected = np.array([[0, 1, 1, 0]]).T
got = lb.fit_transform(inp)
assert_array_equal(lb.classes_, ["neg", "pos"])
assert_array_equal(expected, got)
to_invert = np.array([[1, 0],
[0, 1],
[0, 1],
[1, 0]])
assert_array_equal(lb.inverse_transform(to_invert), inp)
# multi-class case
inp = ["spam", "ham", "eggs", "ham", "0"]
expected = np.array([[0, 0, 0, 1],
[0, 0, 1, 0],
[0, 1, 0, 0],
[0, 0, 1, 0],
[1, 0, 0, 0]])
got = lb.fit_transform(inp)
assert_array_equal(lb.classes_, ['0', 'eggs', 'ham', 'spam'])
assert_array_equal(expected, got)
assert_array_equal(lb.inverse_transform(got), inp)
def test_label_binarizer_unseen_labels():
lb = LabelBinarizer()
expected = np.array([[1, 0, 0],
[0, 1, 0],
[0, 0, 1]])
got = lb.fit_transform(['b', 'd', 'e'])
assert_array_equal(expected, got)
expected = np.array([[0, 0, 0],
[1, 0, 0],
[0, 0, 0],
[0, 1, 0],
[0, 0, 1],
[0, 0, 0]])
got = lb.transform(['a', 'b', 'c', 'd', 'e', 'f'])
assert_array_equal(expected, got)
def test_label_binarizer_set_label_encoding():
lb = LabelBinarizer(neg_label=-2, pos_label=0)
# two-class case with pos_label=0
inp = np.array([0, 1, 1, 0])
expected = np.array([[-2, 0, 0, -2]]).T
got = lb.fit_transform(inp)
assert_array_equal(expected, got)
assert_array_equal(lb.inverse_transform(got), inp)
lb = LabelBinarizer(neg_label=-2, pos_label=2)
# multi-class case
inp = np.array([3, 2, 1, 2, 0])
expected = np.array([[-2, -2, -2, +2],
[-2, -2, +2, -2],
[-2, +2, -2, -2],
[-2, -2, +2, -2],
[+2, -2, -2, -2]])
got = lb.fit_transform(inp)
assert_array_equal(expected, got)
assert_array_equal(lb.inverse_transform(got), inp)
@ignore_warnings
def test_label_binarizer_errors():
# Check that invalid arguments yield ValueError
one_class = np.array([0, 0, 0, 0])
lb = LabelBinarizer().fit(one_class)
multi_label = [(2, 3), (0,), (0, 2)]
assert_raises(ValueError, lb.transform, multi_label)
lb = LabelBinarizer()
assert_raises(ValueError, lb.transform, [])
assert_raises(ValueError, lb.inverse_transform, [])
assert_raises(ValueError, LabelBinarizer, neg_label=2, pos_label=1)
assert_raises(ValueError, LabelBinarizer, neg_label=2, pos_label=2)
assert_raises(ValueError, LabelBinarizer, neg_label=1, pos_label=2,
sparse_output=True)
# Fail on y_type
assert_raises(ValueError, _inverse_binarize_thresholding,
y=csr_matrix([[1, 2], [2, 1]]), output_type="foo",
classes=[1, 2], threshold=0)
# Sequence of seq type should raise ValueError
y_seq_of_seqs = [[], [1, 2], [3], [0, 1, 3], [2]]
assert_raises(ValueError, LabelBinarizer().fit_transform, y_seq_of_seqs)
# Fail on the number of classes
assert_raises(ValueError, _inverse_binarize_thresholding,
y=csr_matrix([[1, 2], [2, 1]]), output_type="foo",
classes=[1, 2, 3], threshold=0)
# Fail on the dimension of 'binary'
assert_raises(ValueError, _inverse_binarize_thresholding,
y=np.array([[1, 2, 3], [2, 1, 3]]), output_type="binary",
classes=[1, 2, 3], threshold=0)
# Fail on multioutput data
assert_raises(ValueError, LabelBinarizer().fit, np.array([[1, 3], [2, 1]]))
assert_raises(ValueError, label_binarize, np.array([[1, 3], [2, 1]]),
[1, 2, 3])
def test_label_encoder():
# Test LabelEncoder's transform and inverse_transform methods
le = LabelEncoder()
le.fit([1, 1, 4, 5, -1, 0])
assert_array_equal(le.classes_, [-1, 0, 1, 4, 5])
assert_array_equal(le.transform([0, 1, 4, 4, 5, -1, -1]),
[1, 2, 3, 3, 4, 0, 0])
assert_array_equal(le.inverse_transform([1, 2, 3, 3, 4, 0, 0]),
[0, 1, 4, 4, 5, -1, -1])
assert_raises(ValueError, le.transform, [0, 6])
def test_label_encoder_fit_transform():
# Test fit_transform
le = LabelEncoder()
ret = le.fit_transform([1, 1, 4, 5, -1, 0])
assert_array_equal(ret, [2, 2, 3, 4, 0, 1])
le = LabelEncoder()
ret = le.fit_transform(["paris", "paris", "tokyo", "amsterdam"])
assert_array_equal(ret, [1, 1, 2, 0])
def test_label_encoder_errors():
# Check that invalid arguments yield ValueError
le = LabelEncoder()
assert_raises(ValueError, le.transform, [])
assert_raises(ValueError, le.inverse_transform, [])
# Fail on unseen labels
le = LabelEncoder()
le.fit([1, 2, 3, 1, -1])
assert_raises(ValueError, le.inverse_transform, [-1])
def test_sparse_output_multilabel_binarizer():
# test input as iterable of iterables
inputs = [
lambda: [(2, 3), (1,), (1, 2)],
lambda: (set([2, 3]), set([1]), set([1, 2])),
lambda: iter([iter((2, 3)), iter((1,)), set([1, 2])]),
]
indicator_mat = np.array([[0, 1, 1],
[1, 0, 0],
[1, 1, 0]])
inverse = inputs[0]()
for sparse_output in [True, False]:
for inp in inputs:
# With fit_tranform
mlb = MultiLabelBinarizer(sparse_output=sparse_output)
got = mlb.fit_transform(inp())
assert_equal(issparse(got), sparse_output)
if sparse_output:
got = got.toarray()
assert_array_equal(indicator_mat, got)
assert_array_equal([1, 2, 3], mlb.classes_)
assert_equal(mlb.inverse_transform(got), inverse)
# With fit
mlb = MultiLabelBinarizer(sparse_output=sparse_output)
got = mlb.fit(inp()).transform(inp())
assert_equal(issparse(got), sparse_output)
if sparse_output:
got = got.toarray()
assert_array_equal(indicator_mat, got)
assert_array_equal([1, 2, 3], mlb.classes_)
assert_equal(mlb.inverse_transform(got), inverse)
assert_raises(ValueError, mlb.inverse_transform,
csr_matrix(np.array([[0, 1, 1],
[2, 0, 0],
[1, 1, 0]])))
def test_multilabel_binarizer():
# test input as iterable of iterables
inputs = [
lambda: [(2, 3), (1,), (1, 2)],
lambda: (set([2, 3]), set([1]), set([1, 2])),
lambda: iter([iter((2, 3)), iter((1,)), set([1, 2])]),
]
indicator_mat = np.array([[0, 1, 1],
[1, 0, 0],
[1, 1, 0]])
inverse = inputs[0]()
for inp in inputs:
# With fit_tranform
mlb = MultiLabelBinarizer()
got = mlb.fit_transform(inp())
assert_array_equal(indicator_mat, got)
assert_array_equal([1, 2, 3], mlb.classes_)
assert_equal(mlb.inverse_transform(got), inverse)
# With fit
mlb = MultiLabelBinarizer()
got = mlb.fit(inp()).transform(inp())
assert_array_equal(indicator_mat, got)
assert_array_equal([1, 2, 3], mlb.classes_)
assert_equal(mlb.inverse_transform(got), inverse)
def test_multilabel_binarizer_empty_sample():
mlb = MultiLabelBinarizer()
y = [[1, 2], [1], []]
Y = np.array([[1, 1],
[1, 0],
[0, 0]])
assert_array_equal(mlb.fit_transform(y), Y)
def test_multilabel_binarizer_unknown_class():
mlb = MultiLabelBinarizer()
y = [[1, 2]]
assert_raises(KeyError, mlb.fit(y).transform, [[0]])
mlb = MultiLabelBinarizer(classes=[1, 2])
assert_raises(KeyError, mlb.fit_transform, [[0]])
def test_multilabel_binarizer_given_classes():
inp = [(2, 3), (1,), (1, 2)]
indicator_mat = np.array([[0, 1, 1],
[1, 0, 0],
[1, 0, 1]])
# fit_transform()
mlb = MultiLabelBinarizer(classes=[1, 3, 2])
assert_array_equal(mlb.fit_transform(inp), indicator_mat)
assert_array_equal(mlb.classes_, [1, 3, 2])
# fit().transform()
mlb = MultiLabelBinarizer(classes=[1, 3, 2])
assert_array_equal(mlb.fit(inp).transform(inp), indicator_mat)
assert_array_equal(mlb.classes_, [1, 3, 2])
# ensure works with extra class
mlb = MultiLabelBinarizer(classes=[4, 1, 3, 2])
assert_array_equal(mlb.fit_transform(inp),
np.hstack(([[0], [0], [0]], indicator_mat)))
assert_array_equal(mlb.classes_, [4, 1, 3, 2])
# ensure fit is no-op as iterable is not consumed
inp = iter(inp)
mlb = MultiLabelBinarizer(classes=[1, 3, 2])
assert_array_equal(mlb.fit(inp).transform(inp), indicator_mat)
def test_multilabel_binarizer_same_length_sequence():
# Ensure sequences of the same length are not interpreted as a 2-d array
inp = [[1], [0], [2]]
indicator_mat = np.array([[0, 1, 0],
[1, 0, 0],
[0, 0, 1]])
# fit_transform()
mlb = MultiLabelBinarizer()
assert_array_equal(mlb.fit_transform(inp), indicator_mat)
assert_array_equal(mlb.inverse_transform(indicator_mat), inp)
# fit().transform()
mlb = MultiLabelBinarizer()
assert_array_equal(mlb.fit(inp).transform(inp), indicator_mat)
assert_array_equal(mlb.inverse_transform(indicator_mat), inp)
def test_multilabel_binarizer_non_integer_labels():
tuple_classes = np.empty(3, dtype=object)
tuple_classes[:] = [(1,), (2,), (3,)]
inputs = [
([('2', '3'), ('1',), ('1', '2')], ['1', '2', '3']),
([('b', 'c'), ('a',), ('a', 'b')], ['a', 'b', 'c']),
([((2,), (3,)), ((1,),), ((1,), (2,))], tuple_classes),
]
indicator_mat = np.array([[0, 1, 1],
[1, 0, 0],
[1, 1, 0]])
for inp, classes in inputs:
# fit_transform()
mlb = MultiLabelBinarizer()
assert_array_equal(mlb.fit_transform(inp), indicator_mat)
assert_array_equal(mlb.classes_, classes)
assert_array_equal(mlb.inverse_transform(indicator_mat), inp)
# fit().transform()
mlb = MultiLabelBinarizer()
assert_array_equal(mlb.fit(inp).transform(inp), indicator_mat)
assert_array_equal(mlb.classes_, classes)
assert_array_equal(mlb.inverse_transform(indicator_mat), inp)
mlb = MultiLabelBinarizer()
assert_raises(TypeError, mlb.fit_transform, [({}), ({}, {'a': 'b'})])
def test_multilabel_binarizer_non_unique():
inp = [(1, 1, 1, 0)]
indicator_mat = np.array([[1, 1]])
mlb = MultiLabelBinarizer()
assert_array_equal(mlb.fit_transform(inp), indicator_mat)
def test_multilabel_binarizer_inverse_validation():
inp = [(1, 1, 1, 0)]
mlb = MultiLabelBinarizer()
mlb.fit_transform(inp)
# Not binary
assert_raises(ValueError, mlb.inverse_transform, np.array([[1, 3]]))
# The following binary cases are fine, however
mlb.inverse_transform(np.array([[0, 0]]))
mlb.inverse_transform(np.array([[1, 1]]))
mlb.inverse_transform(np.array([[1, 0]]))
# Wrong shape
assert_raises(ValueError, mlb.inverse_transform, np.array([[1]]))
assert_raises(ValueError, mlb.inverse_transform, np.array([[1, 1, 1]]))
def test_label_binarize_with_class_order():
out = label_binarize([1, 6], classes=[1, 2, 4, 6])
expected = np.array([[1, 0, 0, 0], [0, 0, 0, 1]])
assert_array_equal(out, expected)
# Modified class order
out = label_binarize([1, 6], classes=[1, 6, 4, 2])
expected = np.array([[1, 0, 0, 0], [0, 1, 0, 0]])
assert_array_equal(out, expected)
out = label_binarize([0, 1, 2, 3], classes=[3, 2, 0, 1])
expected = np.array([[0, 0, 1, 0],
[0, 0, 0, 1],
[0, 1, 0, 0],
[1, 0, 0, 0]])
assert_array_equal(out, expected)
def check_binarized_results(y, classes, pos_label, neg_label, expected):
for sparse_output in [True, False]:
if ((pos_label == 0 or neg_label != 0) and sparse_output):
assert_raises(ValueError, label_binarize, y, classes,
neg_label=neg_label, pos_label=pos_label,
sparse_output=sparse_output)
continue
# check label_binarize
binarized = label_binarize(y, classes, neg_label=neg_label,
pos_label=pos_label,
sparse_output=sparse_output)
assert_array_equal(toarray(binarized), expected)
assert_equal(issparse(binarized), sparse_output)
# check inverse
y_type = type_of_target(y)
if y_type == "multiclass":
inversed = _inverse_binarize_multiclass(binarized, classes=classes)
else:
inversed = _inverse_binarize_thresholding(binarized,
output_type=y_type,
classes=classes,
threshold=((neg_label +
pos_label) /
2.))
assert_array_equal(toarray(inversed), toarray(y))
# Check label binarizer
lb = LabelBinarizer(neg_label=neg_label, pos_label=pos_label,
sparse_output=sparse_output)
binarized = lb.fit_transform(y)
assert_array_equal(toarray(binarized), expected)
assert_equal(issparse(binarized), sparse_output)
inverse_output = lb.inverse_transform(binarized)
assert_array_equal(toarray(inverse_output), toarray(y))
assert_equal(issparse(inverse_output), issparse(y))
def test_label_binarize_binary():
y = [0, 1, 0]
classes = [0, 1]
pos_label = 2
neg_label = -1
expected = np.array([[2, -1], [-1, 2], [2, -1]])[:, 1].reshape((-1, 1))
yield check_binarized_results, y, classes, pos_label, neg_label, expected
# Binary case where sparse_output = True will not result in a ValueError
y = [0, 1, 0]
classes = [0, 1]
pos_label = 3
neg_label = 0
expected = np.array([[3, 0], [0, 3], [3, 0]])[:, 1].reshape((-1, 1))
yield check_binarized_results, y, classes, pos_label, neg_label, expected
def test_label_binarize_multiclass():
y = [0, 1, 2]
classes = [0, 1, 2]
pos_label = 2
neg_label = 0
expected = 2 * np.eye(3)
yield check_binarized_results, y, classes, pos_label, neg_label, expected
assert_raises(ValueError, label_binarize, y, classes, neg_label=-1,
pos_label=pos_label, sparse_output=True)
def test_label_binarize_multilabel():
y_ind = np.array([[0, 1, 0], [1, 1, 1], [0, 0, 0]])
classes = [0, 1, 2]
pos_label = 2
neg_label = 0
expected = pos_label * y_ind
y_sparse = [sparse_matrix(y_ind)
for sparse_matrix in [coo_matrix, csc_matrix, csr_matrix,
dok_matrix, lil_matrix]]
for y in [y_ind] + y_sparse:
yield (check_binarized_results, y, classes, pos_label, neg_label,
expected)
assert_raises(ValueError, label_binarize, y, classes, neg_label=-1,
pos_label=pos_label, sparse_output=True)
def test_invalid_input_label_binarize():
assert_raises(ValueError, label_binarize, [0, 2], classes=[0, 2],
pos_label=0, neg_label=1)
def test_inverse_binarize_multiclass():
got = _inverse_binarize_multiclass(csr_matrix([[0, 1, 0],
[-1, 0, -1],
[0, 0, 0]]),
np.arange(3))
assert_array_equal(got, np.array([1, 1, 0]))
| bsd-3-clause |
wzbozon/scikit-learn | sklearn/ensemble/__init__.py | 217 | 1307 | """
The :mod:`sklearn.ensemble` module includes ensemble-based methods for
classification and regression.
"""
from .base import BaseEnsemble
from .forest import RandomForestClassifier
from .forest import RandomForestRegressor
from .forest import RandomTreesEmbedding
from .forest import ExtraTreesClassifier
from .forest import ExtraTreesRegressor
from .bagging import BaggingClassifier
from .bagging import BaggingRegressor
from .weight_boosting import AdaBoostClassifier
from .weight_boosting import AdaBoostRegressor
from .gradient_boosting import GradientBoostingClassifier
from .gradient_boosting import GradientBoostingRegressor
from .voting_classifier import VotingClassifier
from . import bagging
from . import forest
from . import weight_boosting
from . import gradient_boosting
from . import partial_dependence
__all__ = ["BaseEnsemble",
"RandomForestClassifier", "RandomForestRegressor",
"RandomTreesEmbedding", "ExtraTreesClassifier",
"ExtraTreesRegressor", "BaggingClassifier",
"BaggingRegressor", "GradientBoostingClassifier",
"GradientBoostingRegressor", "AdaBoostClassifier",
"AdaBoostRegressor", "VotingClassifier",
"bagging", "forest", "gradient_boosting",
"partial_dependence", "weight_boosting"]
| bsd-3-clause |
treycausey/scikit-learn | examples/neighbors/plot_nearest_centroid.py | 8 | 1784 | """
===============================
Nearest Centroid Classification
===============================
Sample usage of Nearest Centroid classification.
It will plot the decision boundaries for each class.
"""
print(__doc__)
import numpy as np
import pylab as pl
from matplotlib.colors import ListedColormap
from sklearn import datasets
from sklearn.neighbors import NearestCentroid
n_neighbors = 15
# import some data to play with
iris = datasets.load_iris()
X = iris.data[:, :2] # we only take the first two features. We could
# avoid this ugly slicing by using a two-dim dataset
y = iris.target
h = .02 # step size in the mesh
# Create color maps
cmap_light = ListedColormap(['#FFAAAA', '#AAFFAA', '#AAAAFF'])
cmap_bold = ListedColormap(['#FF0000', '#00FF00', '#0000FF'])
for shrinkage in [None, 0.1]:
# we create an instance of Neighbours Classifier and fit the data.
clf = NearestCentroid(shrink_threshold=shrinkage)
clf.fit(X, y)
y_pred = clf.predict(X)
print(shrinkage, np.mean(y == y_pred))
# Plot the decision boundary. For that, we will assign a color to each
# point in the mesh [x_min, m_max]x[y_min, y_max].
x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1
y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, h),
np.arange(y_min, y_max, h))
Z = clf.predict(np.c_[xx.ravel(), yy.ravel()])
# Put the result into a color plot
Z = Z.reshape(xx.shape)
pl.figure()
pl.pcolormesh(xx, yy, Z, cmap=cmap_light)
# Plot also the training points
pl.scatter(X[:, 0], X[:, 1], c=y, cmap=cmap_bold)
pl.title("3-Class classification (shrink_threshold=%r)"
% shrinkage)
pl.axis('tight')
pl.show()
| bsd-3-clause |
alvarofierroclavero/scikit-learn | sklearn/metrics/scorer.py | 211 | 13141 | """
The :mod:`sklearn.metrics.scorer` submodule implements a flexible
interface for model selection and evaluation using
arbitrary score functions.
A scorer object is a callable that can be passed to
:class:`sklearn.grid_search.GridSearchCV` or
:func:`sklearn.cross_validation.cross_val_score` as the ``scoring`` parameter,
to specify how a model should be evaluated.
The signature of the call is ``(estimator, X, y)`` where ``estimator``
is the model to be evaluated, ``X`` is the test data and ``y`` is the
ground truth labeling (or ``None`` in the case of unsupervised models).
"""
# Authors: Andreas Mueller <amueller@ais.uni-bonn.de>
# Lars Buitinck <L.J.Buitinck@uva.nl>
# Arnaud Joly <arnaud.v.joly@gmail.com>
# License: Simplified BSD
from abc import ABCMeta, abstractmethod
from functools import partial
import numpy as np
from . import (r2_score, median_absolute_error, mean_absolute_error,
mean_squared_error, accuracy_score, f1_score,
roc_auc_score, average_precision_score,
precision_score, recall_score, log_loss)
from .cluster import adjusted_rand_score
from ..utils.multiclass import type_of_target
from ..externals import six
from ..base import is_regressor
class _BaseScorer(six.with_metaclass(ABCMeta, object)):
def __init__(self, score_func, sign, kwargs):
self._kwargs = kwargs
self._score_func = score_func
self._sign = sign
@abstractmethod
def __call__(self, estimator, X, y, sample_weight=None):
pass
def __repr__(self):
kwargs_string = "".join([", %s=%s" % (str(k), str(v))
for k, v in self._kwargs.items()])
return ("make_scorer(%s%s%s%s)"
% (self._score_func.__name__,
"" if self._sign > 0 else ", greater_is_better=False",
self._factory_args(), kwargs_string))
def _factory_args(self):
"""Return non-default make_scorer arguments for repr."""
return ""
class _PredictScorer(_BaseScorer):
def __call__(self, estimator, X, y_true, sample_weight=None):
"""Evaluate predicted target values for X relative to y_true.
Parameters
----------
estimator : object
Trained estimator to use for scoring. Must have a predict_proba
method; the output of that is used to compute the score.
X : array-like or sparse matrix
Test data that will be fed to estimator.predict.
y_true : array-like
Gold standard target values for X.
sample_weight : array-like, optional (default=None)
Sample weights.
Returns
-------
score : float
Score function applied to prediction of estimator on X.
"""
y_pred = estimator.predict(X)
if sample_weight is not None:
return self._sign * self._score_func(y_true, y_pred,
sample_weight=sample_weight,
**self._kwargs)
else:
return self._sign * self._score_func(y_true, y_pred,
**self._kwargs)
class _ProbaScorer(_BaseScorer):
def __call__(self, clf, X, y, sample_weight=None):
"""Evaluate predicted probabilities for X relative to y_true.
Parameters
----------
clf : object
Trained classifier to use for scoring. Must have a predict_proba
method; the output of that is used to compute the score.
X : array-like or sparse matrix
Test data that will be fed to clf.predict_proba.
y : array-like
Gold standard target values for X. These must be class labels,
not probabilities.
sample_weight : array-like, optional (default=None)
Sample weights.
Returns
-------
score : float
Score function applied to prediction of estimator on X.
"""
y_pred = clf.predict_proba(X)
if sample_weight is not None:
return self._sign * self._score_func(y, y_pred,
sample_weight=sample_weight,
**self._kwargs)
else:
return self._sign * self._score_func(y, y_pred, **self._kwargs)
def _factory_args(self):
return ", needs_proba=True"
class _ThresholdScorer(_BaseScorer):
def __call__(self, clf, X, y, sample_weight=None):
"""Evaluate decision function output for X relative to y_true.
Parameters
----------
clf : object
Trained classifier to use for scoring. Must have either a
decision_function method or a predict_proba method; the output of
that is used to compute the score.
X : array-like or sparse matrix
Test data that will be fed to clf.decision_function or
clf.predict_proba.
y : array-like
Gold standard target values for X. These must be class labels,
not decision function values.
sample_weight : array-like, optional (default=None)
Sample weights.
Returns
-------
score : float
Score function applied to prediction of estimator on X.
"""
y_type = type_of_target(y)
if y_type not in ("binary", "multilabel-indicator"):
raise ValueError("{0} format is not supported".format(y_type))
if is_regressor(clf):
y_pred = clf.predict(X)
else:
try:
y_pred = clf.decision_function(X)
# For multi-output multi-class estimator
if isinstance(y_pred, list):
y_pred = np.vstack(p for p in y_pred).T
except (NotImplementedError, AttributeError):
y_pred = clf.predict_proba(X)
if y_type == "binary":
y_pred = y_pred[:, 1]
elif isinstance(y_pred, list):
y_pred = np.vstack([p[:, -1] for p in y_pred]).T
if sample_weight is not None:
return self._sign * self._score_func(y, y_pred,
sample_weight=sample_weight,
**self._kwargs)
else:
return self._sign * self._score_func(y, y_pred, **self._kwargs)
def _factory_args(self):
return ", needs_threshold=True"
def get_scorer(scoring):
if isinstance(scoring, six.string_types):
try:
scorer = SCORERS[scoring]
except KeyError:
raise ValueError('%r is not a valid scoring value. '
'Valid options are %s'
% (scoring, sorted(SCORERS.keys())))
else:
scorer = scoring
return scorer
def _passthrough_scorer(estimator, *args, **kwargs):
"""Function that wraps estimator.score"""
return estimator.score(*args, **kwargs)
def check_scoring(estimator, scoring=None, allow_none=False):
"""Determine scorer from user options.
A TypeError will be thrown if the estimator cannot be scored.
Parameters
----------
estimator : estimator object implementing 'fit'
The object to use to fit the data.
scoring : string, callable or None, optional, default: None
A string (see model evaluation documentation) or
a scorer callable object / function with signature
``scorer(estimator, X, y)``.
allow_none : boolean, optional, default: False
If no scoring is specified and the estimator has no score function, we
can either return None or raise an exception.
Returns
-------
scoring : callable
A scorer callable object / function with signature
``scorer(estimator, X, y)``.
"""
has_scoring = scoring is not None
if not hasattr(estimator, 'fit'):
raise TypeError("estimator should a be an estimator implementing "
"'fit' method, %r was passed" % estimator)
elif has_scoring:
return get_scorer(scoring)
elif hasattr(estimator, 'score'):
return _passthrough_scorer
elif allow_none:
return None
else:
raise TypeError(
"If no scoring is specified, the estimator passed should "
"have a 'score' method. The estimator %r does not." % estimator)
def make_scorer(score_func, greater_is_better=True, needs_proba=False,
needs_threshold=False, **kwargs):
"""Make a scorer from a performance metric or loss function.
This factory function wraps scoring functions for use in GridSearchCV
and cross_val_score. It takes a score function, such as ``accuracy_score``,
``mean_squared_error``, ``adjusted_rand_index`` or ``average_precision``
and returns a callable that scores an estimator's output.
Read more in the :ref:`User Guide <scoring>`.
Parameters
----------
score_func : callable,
Score function (or loss function) with signature
``score_func(y, y_pred, **kwargs)``.
greater_is_better : boolean, default=True
Whether score_func is a score function (default), meaning high is good,
or a loss function, meaning low is good. In the latter case, the
scorer object will sign-flip the outcome of the score_func.
needs_proba : boolean, default=False
Whether score_func requires predict_proba to get probability estimates
out of a classifier.
needs_threshold : boolean, default=False
Whether score_func takes a continuous decision certainty.
This only works for binary classification using estimators that
have either a decision_function or predict_proba method.
For example ``average_precision`` or the area under the roc curve
can not be computed using discrete predictions alone.
**kwargs : additional arguments
Additional parameters to be passed to score_func.
Returns
-------
scorer : callable
Callable object that returns a scalar score; greater is better.
Examples
--------
>>> from sklearn.metrics import fbeta_score, make_scorer
>>> ftwo_scorer = make_scorer(fbeta_score, beta=2)
>>> ftwo_scorer
make_scorer(fbeta_score, beta=2)
>>> from sklearn.grid_search import GridSearchCV
>>> from sklearn.svm import LinearSVC
>>> grid = GridSearchCV(LinearSVC(), param_grid={'C': [1, 10]},
... scoring=ftwo_scorer)
"""
sign = 1 if greater_is_better else -1
if needs_proba and needs_threshold:
raise ValueError("Set either needs_proba or needs_threshold to True,"
" but not both.")
if needs_proba:
cls = _ProbaScorer
elif needs_threshold:
cls = _ThresholdScorer
else:
cls = _PredictScorer
return cls(score_func, sign, kwargs)
# Standard regression scores
r2_scorer = make_scorer(r2_score)
mean_squared_error_scorer = make_scorer(mean_squared_error,
greater_is_better=False)
mean_absolute_error_scorer = make_scorer(mean_absolute_error,
greater_is_better=False)
median_absolute_error_scorer = make_scorer(median_absolute_error,
greater_is_better=False)
# Standard Classification Scores
accuracy_scorer = make_scorer(accuracy_score)
f1_scorer = make_scorer(f1_score)
# Score functions that need decision values
roc_auc_scorer = make_scorer(roc_auc_score, greater_is_better=True,
needs_threshold=True)
average_precision_scorer = make_scorer(average_precision_score,
needs_threshold=True)
precision_scorer = make_scorer(precision_score)
recall_scorer = make_scorer(recall_score)
# Score function for probabilistic classification
log_loss_scorer = make_scorer(log_loss, greater_is_better=False,
needs_proba=True)
# Clustering scores
adjusted_rand_scorer = make_scorer(adjusted_rand_score)
SCORERS = dict(r2=r2_scorer,
median_absolute_error=median_absolute_error_scorer,
mean_absolute_error=mean_absolute_error_scorer,
mean_squared_error=mean_squared_error_scorer,
accuracy=accuracy_scorer, roc_auc=roc_auc_scorer,
average_precision=average_precision_scorer,
log_loss=log_loss_scorer,
adjusted_rand_score=adjusted_rand_scorer)
for name, metric in [('precision', precision_score),
('recall', recall_score), ('f1', f1_score)]:
SCORERS[name] = make_scorer(metric)
for average in ['macro', 'micro', 'samples', 'weighted']:
qualified_name = '{0}_{1}'.format(name, average)
SCORERS[qualified_name] = make_scorer(partial(metric, pos_label=None,
average=average))
| bsd-3-clause |
ioam/holoviews | holoviews/plotting/mpl/annotation.py | 2 | 7744 | from __future__ import absolute_import, division, unicode_literals
import param
import numpy as np
import matplotlib
from matplotlib import patches as patches
from ...core.util import match_spec, basestring
from ...core.options import abbreviated_exception
from .element import ElementPlot, ColorbarPlot
from .plot import mpl_rc_context
class AnnotationPlot(ElementPlot):
"""
AnnotationPlot handles the display of all annotation elements.
"""
show_legend = param.Boolean(default=False, doc="""
Whether to show legend for the plot.""")
def __init__(self, annotation, **params):
self._annotation = annotation
super(AnnotationPlot, self).__init__(annotation, **params)
self.handles['annotations'] = []
@mpl_rc_context
def initialize_plot(self, ranges=None):
annotation = self.hmap.last
key = self.keys[-1]
ranges = self.compute_ranges(self.hmap, key, ranges)
ranges = match_spec(annotation, ranges)
axis = self.handles['axis']
opts = self.style[self.cyclic_index]
with abbreviated_exception():
handles = self.draw_annotation(axis, annotation.data, opts)
self.handles['annotations'] = handles
return self._finalize_axis(key, element=annotation, ranges=ranges)
def update_handles(self, key, axis, annotation, ranges, style):
# Clear all existing annotations
for element in self.handles['annotations']:
element.remove()
with abbreviated_exception():
self.handles['annotations'] = self.draw_annotation(axis, annotation.data, style)
class VLinePlot(AnnotationPlot):
"Draw a vertical line on the axis"
style_opts = ['alpha', 'color', 'linewidth', 'linestyle', 'visible']
def draw_annotation(self, axis, position, opts):
if self.invert_axes:
return [axis.axhline(position, **opts)]
else:
return [axis.axvline(position, **opts)]
class HLinePlot(AnnotationPlot):
"Draw a horizontal line on the axis"
style_opts = ['alpha', 'color', 'linewidth', 'linestyle', 'visible']
def draw_annotation(self, axis, position, opts):
"Draw a horizontal line on the axis"
if self.invert_axes:
return [axis.axvline(position, **opts)]
else:
return [axis.axhline(position, **opts)]
class TextPlot(AnnotationPlot):
"Draw the Text annotation object"
style_opts = ['alpha', 'color', 'family', 'weight', 'visible']
def draw_annotation(self, axis, data, opts):
(x,y, text, fontsize,
horizontalalignment, verticalalignment, rotation) = data
if self.invert_axes: x, y = y, x
opts['fontsize'] = fontsize
return [axis.text(x,y, text,
horizontalalignment = horizontalalignment,
verticalalignment = verticalalignment,
rotation=rotation, **opts)]
class LabelsPlot(ColorbarPlot):
color_index = param.ClassSelector(default=None, class_=(basestring, int),
allow_None=True, doc="""
Index of the dimension from which the color will the drawn""")
xoffset = param.Number(default=None, doc="""
Amount of offset to apply to labels along x-axis.""")
yoffset = param.Number(default=None, doc="""
Amount of offset to apply to labels along x-axis.""")
style_opts = ['alpha', 'color', 'family', 'weight', 'size', 'visible',
'horizontalalignment', 'verticalalignment', 'cmap', 'rotation']
_nonvectorized_styles = ['cmap']
_plot_methods = dict(single='annotate')
def get_data(self, element, ranges, style):
with abbreviated_exception():
style = self._apply_transforms(element, ranges, style)
xs, ys = (element.dimension_values(i) for i in range(2))
tdim = element.get_dimension(2)
text = [tdim.pprint_value(v) for v in element.dimension_values(tdim)]
positions = (ys, xs) if self.invert_axes else (xs, ys)
if self.xoffset is not None:
xs += self.xoffset
if self.yoffset is not None:
ys += self.yoffset
cs = None
cdim = element.get_dimension(self.color_index)
if cdim:
self._norm_kwargs(element, ranges, style, cdim)
cs = element.dimension_values(cdim)
if 'c' in style:
cs = style.pop('c')
if 'size' in style: style['fontsize'] = style.pop('size')
if 'horizontalalignment' not in style: style['horizontalalignment'] = 'center'
if 'verticalalignment' not in style: style['verticalalignment'] = 'center'
return positions + (text, cs), style, {}
def init_artists(self, ax, plot_args, plot_kwargs):
if plot_args[-1] is not None:
cmap = plot_kwargs.pop('cmap', None)
colors = list(np.unique(plot_args[-1]))
vmin, vmax = plot_kwargs.pop('vmin'), plot_kwargs.pop('vmax')
else:
cmap = None
plot_args = plot_args[:-1]
vectorized = {k: v for k, v in plot_kwargs.items() if isinstance(v, np.ndarray)}
texts = []
for i, item in enumerate(zip(*plot_args)):
x, y, text = item[:3]
if len(item) == 4 and cmap is not None:
color = item[3]
if plot_args[-1].dtype.kind in 'if':
color = (color - vmin) / (vmax-vmin)
plot_kwargs['color'] = cmap(color)
else:
color = colors.index(color) if color in colors else np.NaN
plot_kwargs['color'] = cmap(color)
kwargs = dict(plot_kwargs, **{k: v[i] for k, v in vectorized.items()})
texts.append(ax.text(x, y, text, **kwargs))
return {'artist': texts}
def teardown_handles(self):
if 'artist' in self.handles:
for artist in self.handles['artist']:
artist.remove()
class ArrowPlot(AnnotationPlot):
"Draw an arrow using the information supplied to the Arrow annotation"
_arrow_style_opts = ['alpha', 'color', 'lw', 'linewidth', 'visible']
_text_style_opts = TextPlot.style_opts + ['textsize', 'fontsize']
style_opts = sorted(set(_arrow_style_opts + _text_style_opts))
def draw_annotation(self, axis, data, opts):
x, y, text, direction, points, arrowstyle = data
if self.invert_axes: x, y = y, x
direction = direction.lower()
arrowprops = dict({'arrowstyle':arrowstyle},
**{k: opts[k] for k in self._arrow_style_opts if k in opts})
textopts = {k: opts[k] for k in self._text_style_opts if k in opts}
if direction in ['v', '^']:
xytext = (0, points if direction=='v' else -points)
elif direction in ['>', '<']:
xytext = (points if direction=='<' else -points, 0)
if 'textsize' in textopts:
textopts['fontsize'] = textopts.pop('textsize')
return [axis.annotate(text, xy=(x, y), textcoords='offset points',
xytext=xytext, ha="center", va="center",
arrowprops=arrowprops, **textopts)]
class SplinePlot(AnnotationPlot):
"Draw the supplied Spline annotation (see Spline docstring)"
style_opts = ['alpha', 'edgecolor', 'linewidth', 'linestyle', 'visible']
def draw_annotation(self, axis, data, opts):
verts, codes = data
if not len(verts):
return []
patch = patches.PathPatch(matplotlib.path.Path(verts, codes),
facecolor='none', **opts)
axis.add_patch(patch)
return [patch]
| bsd-3-clause |
meteorcloudy/tensorflow | tensorflow/contrib/learn/python/learn/estimators/kmeans_test.py | 39 | 20233 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for KMeans."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import math
import time
import numpy as np
from sklearn.cluster import KMeans as SklearnKMeans
# pylint: disable=g-import-not-at-top
from tensorflow.contrib.learn.python import learn
from tensorflow.contrib.learn.python.learn.estimators import kmeans as kmeans_lib
from tensorflow.contrib.learn.python.learn.estimators import run_config
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import data_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.platform import benchmark
from tensorflow.python.platform import flags
from tensorflow.python.platform import test
from tensorflow.python.training import input as input_lib
from tensorflow.python.training import queue_runner
FLAGS = flags.FLAGS
def normalize(x):
return x / np.sqrt(np.sum(x * x, axis=-1, keepdims=True))
def cosine_similarity(x, y):
return np.dot(normalize(x), np.transpose(normalize(y)))
def make_random_centers(num_centers, num_dims, center_norm=500):
return np.round(
np.random.rand(num_centers, num_dims).astype(np.float32) * center_norm)
def make_random_points(centers, num_points, max_offset=20):
num_centers, num_dims = centers.shape
assignments = np.random.choice(num_centers, num_points)
offsets = np.round(
np.random.randn(num_points, num_dims).astype(np.float32) * max_offset)
return (centers[assignments] + offsets, assignments, np.add.reduce(
offsets * offsets, 1))
class KMeansTestBase(test.TestCase):
def input_fn(self,
batch_size=None,
points=None,
randomize=None,
num_epochs=None):
"""Returns an input_fn that randomly selects batches from given points."""
batch_size = batch_size or self.batch_size
points = points if points is not None else self.points
num_points = points.shape[0]
if randomize is None:
randomize = (self.use_mini_batch and
self.mini_batch_steps_per_iteration <= 1)
def _fn():
x = constant_op.constant(points)
if batch_size == num_points:
return input_lib.limit_epochs(x, num_epochs=num_epochs), None
if randomize:
indices = random_ops.random_uniform(
constant_op.constant([batch_size]),
minval=0,
maxval=num_points - 1,
dtype=dtypes.int32,
seed=10)
else:
# We need to cycle through the indices sequentially. We create a queue
# to maintain the list of indices.
q = data_flow_ops.FIFOQueue(num_points, dtypes.int32, ())
# Conditionally initialize the Queue.
def _init_q():
with ops.control_dependencies(
[q.enqueue_many(math_ops.range(num_points))]):
return control_flow_ops.no_op()
init_q = control_flow_ops.cond(q.size() <= 0, _init_q,
control_flow_ops.no_op)
with ops.control_dependencies([init_q]):
offsets = q.dequeue_many(batch_size)
with ops.control_dependencies([q.enqueue_many(offsets)]):
indices = array_ops.identity(offsets)
batch = array_ops.gather(x, indices)
return (input_lib.limit_epochs(batch, num_epochs=num_epochs), None)
return _fn
@staticmethod
def config(tf_random_seed):
return run_config.RunConfig(tf_random_seed=tf_random_seed)
@property
def initial_clusters(self):
return kmeans_lib.KMeansClustering.KMEANS_PLUS_PLUS_INIT
@property
def batch_size(self):
return self.num_points
@property
def use_mini_batch(self):
return False
@property
def mini_batch_steps_per_iteration(self):
return 1
class KMeansTest(KMeansTestBase):
def setUp(self):
np.random.seed(3)
self.num_centers = 5
self.num_dims = 2
self.num_points = 1000
self.true_centers = make_random_centers(self.num_centers, self.num_dims)
self.points, _, self.scores = make_random_points(self.true_centers,
self.num_points)
self.true_score = np.add.reduce(self.scores)
def _kmeans(self, relative_tolerance=None):
return kmeans_lib.KMeansClustering(
self.num_centers,
initial_clusters=self.initial_clusters,
distance_metric=kmeans_lib.KMeansClustering.SQUARED_EUCLIDEAN_DISTANCE,
use_mini_batch=self.use_mini_batch,
mini_batch_steps_per_iteration=self.mini_batch_steps_per_iteration,
random_seed=24,
relative_tolerance=relative_tolerance)
def test_clusters(self):
kmeans = self._kmeans()
kmeans.fit(input_fn=self.input_fn(), steps=1)
clusters = kmeans.clusters()
self.assertAllEqual(list(clusters.shape), [self.num_centers, self.num_dims])
def test_fit(self):
kmeans = self._kmeans()
kmeans.fit(input_fn=self.input_fn(), steps=1)
score1 = kmeans.score(
input_fn=self.input_fn(batch_size=self.num_points), steps=1)
steps = 10 * self.num_points // self.batch_size
kmeans.fit(input_fn=self.input_fn(), steps=steps)
score2 = kmeans.score(
input_fn=self.input_fn(batch_size=self.num_points), steps=1)
self.assertTrue(score1 > score2)
self.assertNear(self.true_score, score2, self.true_score * 0.05)
def test_monitor(self):
if self.use_mini_batch:
# We don't test for use_mini_batch case since the loss value can be noisy.
return
kmeans = kmeans_lib.KMeansClustering(
self.num_centers,
initial_clusters=self.initial_clusters,
distance_metric=kmeans_lib.KMeansClustering.SQUARED_EUCLIDEAN_DISTANCE,
use_mini_batch=self.use_mini_batch,
mini_batch_steps_per_iteration=self.mini_batch_steps_per_iteration,
config=learn.RunConfig(tf_random_seed=14),
random_seed=12,
relative_tolerance=1e-4)
kmeans.fit(
input_fn=self.input_fn(),
# Force it to train until the relative tolerance monitor stops it.
steps=None)
score = kmeans.score(
input_fn=self.input_fn(batch_size=self.num_points), steps=1)
self.assertNear(self.true_score, score, self.true_score * 0.01)
def _infer_helper(self, kmeans, clusters, num_points):
points, true_assignments, true_offsets = make_random_points(
clusters, num_points)
# Test predict
assignments = list(
kmeans.predict_cluster_idx(input_fn=self.input_fn(
batch_size=num_points, points=points, num_epochs=1)))
self.assertAllEqual(assignments, true_assignments)
# Test score
score = kmeans.score(
input_fn=lambda: (constant_op.constant(points), None), steps=1)
self.assertNear(score, np.sum(true_offsets), 0.01 * score)
# Test transform
transform = kmeans.transform(
input_fn=lambda: (constant_op.constant(points), None))
true_transform = np.maximum(
0,
np.sum(np.square(points), axis=1,
keepdims=True) - 2 * np.dot(points, np.transpose(clusters)) +
np.transpose(np.sum(np.square(clusters), axis=1, keepdims=True)))
self.assertAllClose(transform, true_transform, rtol=0.05, atol=10)
def test_infer(self):
kmeans = self._kmeans()
# Make a call to fit to initialize the cluster centers.
max_steps = 1
kmeans.fit(input_fn=self.input_fn(), max_steps=max_steps)
clusters = kmeans.clusters()
# Run inference on small datasets.
self._infer_helper(kmeans, clusters, num_points=10)
self._infer_helper(kmeans, clusters, num_points=1)
class KMeansTestMultiStageInit(KMeansTestBase):
def test_random(self):
points = np.array(
[[1, 2], [3, 4], [5, 6], [7, 8], [9, 0]], dtype=np.float32)
kmeans = kmeans_lib.KMeansClustering(
num_clusters=points.shape[0],
initial_clusters=kmeans_lib.KMeansClustering.RANDOM_INIT,
distance_metric=kmeans_lib.KMeansClustering.SQUARED_EUCLIDEAN_DISTANCE,
use_mini_batch=True,
mini_batch_steps_per_iteration=100,
random_seed=24,
relative_tolerance=None)
kmeans.fit(
input_fn=self.input_fn(batch_size=1, points=points, randomize=False),
steps=1)
clusters = kmeans.clusters()
self.assertAllEqual(points, clusters)
def test_kmeans_plus_plus_batch_just_right(self):
points = np.array([[1, 2]], dtype=np.float32)
kmeans = kmeans_lib.KMeansClustering(
num_clusters=points.shape[0],
initial_clusters=kmeans_lib.KMeansClustering.KMEANS_PLUS_PLUS_INIT,
distance_metric=kmeans_lib.KMeansClustering.SQUARED_EUCLIDEAN_DISTANCE,
use_mini_batch=True,
mini_batch_steps_per_iteration=100,
random_seed=24,
relative_tolerance=None)
kmeans.fit(
input_fn=self.input_fn(batch_size=1, points=points, randomize=False),
steps=1)
clusters = kmeans.clusters()
self.assertAllEqual(points, clusters)
def test_kmeans_plus_plus_batch_too_small(self):
points = np.array(
[[1, 2], [3, 4], [5, 6], [7, 8], [9, 0]], dtype=np.float32)
kmeans = kmeans_lib.KMeansClustering(
num_clusters=points.shape[0],
initial_clusters=kmeans_lib.KMeansClustering.KMEANS_PLUS_PLUS_INIT,
distance_metric=kmeans_lib.KMeansClustering.SQUARED_EUCLIDEAN_DISTANCE,
use_mini_batch=True,
mini_batch_steps_per_iteration=100,
random_seed=24,
relative_tolerance=None)
with self.assertRaisesOpError(AssertionError):
kmeans.fit(
input_fn=self.input_fn(batch_size=4, points=points, randomize=False),
steps=1)
class MiniBatchKMeansTest(KMeansTest):
@property
def batch_size(self):
return 50
@property
def use_mini_batch(self):
return True
class FullBatchAsyncKMeansTest(KMeansTest):
@property
def batch_size(self):
return 50
@property
def use_mini_batch(self):
return True
@property
def mini_batch_steps_per_iteration(self):
return self.num_points // self.batch_size
class KMeansCosineDistanceTest(KMeansTestBase):
def setUp(self):
self.points = np.array(
[[2.5, 0.1], [2, 0.2], [3, 0.1], [4, 0.2], [0.1, 2.5], [0.2, 2],
[0.1, 3], [0.2, 4]],
dtype=np.float32)
self.num_points = self.points.shape[0]
self.true_centers = np.array(
[
normalize(
np.mean(normalize(self.points)[0:4, :], axis=0, keepdims=True))[
0],
normalize(
np.mean(normalize(self.points)[4:, :], axis=0, keepdims=True))[
0]
],
dtype=np.float32)
self.true_assignments = np.array([0] * 4 + [1] * 4)
self.true_score = len(self.points) - np.tensordot(
normalize(self.points), self.true_centers[self.true_assignments])
self.num_centers = 2
self.kmeans = kmeans_lib.KMeansClustering(
self.num_centers,
initial_clusters=kmeans_lib.KMeansClustering.RANDOM_INIT,
distance_metric=kmeans_lib.KMeansClustering.COSINE_DISTANCE,
use_mini_batch=self.use_mini_batch,
mini_batch_steps_per_iteration=self.mini_batch_steps_per_iteration,
config=self.config(3))
def test_fit(self):
max_steps = 10 * self.num_points // self.batch_size
self.kmeans.fit(input_fn=self.input_fn(), max_steps=max_steps)
centers = normalize(self.kmeans.clusters())
centers = centers[centers[:, 0].argsort()]
true_centers = self.true_centers[self.true_centers[:, 0].argsort()]
self.assertAllClose(centers, true_centers, atol=0.04)
def test_transform(self):
self.kmeans.fit(input_fn=self.input_fn(), steps=10)
centers = normalize(self.kmeans.clusters())
true_transform = 1 - cosine_similarity(self.points, centers)
transform = self.kmeans.transform(input_fn=self.input_fn(
batch_size=self.num_points))
self.assertAllClose(transform, true_transform, atol=1e-3)
def test_predict(self):
max_steps = 10 * self.num_points // self.batch_size
self.kmeans.fit(input_fn=self.input_fn(), max_steps=max_steps)
centers = normalize(self.kmeans.clusters())
assignments = list(
self.kmeans.predict_cluster_idx(input_fn=self.input_fn(
num_epochs=1, batch_size=self.num_points)))
self.assertAllClose(
centers[assignments],
self.true_centers[self.true_assignments],
atol=1e-2)
centers = centers[centers[:, 0].argsort()]
true_centers = self.true_centers[self.true_centers[:, 0].argsort()]
self.assertAllClose(centers, true_centers, atol=0.04)
score = self.kmeans.score(
input_fn=self.input_fn(batch_size=self.num_points), steps=1)
self.assertAllClose(score, self.true_score, atol=1e-2)
def test_predict_kmeans_plus_plus(self):
# Most points are concetrated near one center. KMeans++ is likely to find
# the less populated centers.
points = np.array(
[[2.5, 3.5], [2.5, 3.5], [-2, 3], [-2, 3], [-3, -3], [-3.1, -3.2],
[-2.8, -3.], [-2.9, -3.1], [-3., -3.1], [-3., -3.1], [-3.2, -3.],
[-3., -3.]],
dtype=np.float32)
true_centers = np.array(
[
normalize(
np.mean(normalize(points)[0:2, :], axis=0, keepdims=True))[0],
normalize(
np.mean(normalize(points)[2:4, :], axis=0, keepdims=True))[0],
normalize(
np.mean(normalize(points)[4:, :], axis=0, keepdims=True))[0]
],
dtype=np.float32)
true_assignments = [0] * 2 + [1] * 2 + [2] * 8
true_score = len(points) - np.tensordot(
normalize(points), true_centers[true_assignments])
kmeans = kmeans_lib.KMeansClustering(
3,
initial_clusters=self.initial_clusters,
distance_metric=kmeans_lib.KMeansClustering.COSINE_DISTANCE,
use_mini_batch=self.use_mini_batch,
mini_batch_steps_per_iteration=self.mini_batch_steps_per_iteration,
config=self.config(3))
kmeans.fit(input_fn=lambda: (constant_op.constant(points), None), steps=30)
centers = normalize(kmeans.clusters())
self.assertAllClose(
sorted(centers.tolist()), sorted(true_centers.tolist()), atol=1e-2)
def _input_fn():
return (input_lib.limit_epochs(
constant_op.constant(points), num_epochs=1), None)
assignments = list(kmeans.predict_cluster_idx(input_fn=_input_fn))
self.assertAllClose(
centers[assignments], true_centers[true_assignments], atol=1e-2)
score = kmeans.score(
input_fn=lambda: (constant_op.constant(points), None), steps=1)
self.assertAllClose(score, true_score, atol=1e-2)
class MiniBatchKMeansCosineTest(KMeansCosineDistanceTest):
@property
def batch_size(self):
return 2
@property
def use_mini_batch(self):
return True
class FullBatchAsyncKMeansCosineTest(KMeansCosineDistanceTest):
@property
def batch_size(self):
return 2
@property
def use_mini_batch(self):
return True
@property
def mini_batch_steps_per_iteration(self):
return self.num_points // self.batch_size
class KMeansBenchmark(benchmark.Benchmark):
"""Base class for benchmarks."""
def SetUp(self,
dimension=50,
num_clusters=50,
points_per_cluster=10000,
center_norm=500,
cluster_width=20):
np.random.seed(123456)
self.num_clusters = num_clusters
self.num_points = num_clusters * points_per_cluster
self.centers = make_random_centers(
self.num_clusters, dimension, center_norm=center_norm)
self.points, _, scores = make_random_points(
self.centers, self.num_points, max_offset=cluster_width)
self.score = float(np.sum(scores))
def _report(self, num_iters, start, end, scores):
print(scores)
self.report_benchmark(
iters=num_iters,
wall_time=(end - start) / num_iters,
extras={'true_sum_squared_distances': self.score,
'fit_scores': scores})
def _fit(self, num_iters=10):
pass
def benchmark_01_2dim_5center_500point(self):
self.SetUp(dimension=2, num_clusters=5, points_per_cluster=100)
self._fit()
def benchmark_02_20dim_20center_10kpoint(self):
self.SetUp(dimension=20, num_clusters=20, points_per_cluster=500)
self._fit()
def benchmark_03_100dim_50center_50kpoint(self):
self.SetUp(dimension=100, num_clusters=50, points_per_cluster=1000)
self._fit()
def benchmark_03_100dim_50center_50kpoint_unseparated(self):
self.SetUp(
dimension=100,
num_clusters=50,
points_per_cluster=1000,
cluster_width=250)
self._fit()
def benchmark_04_100dim_500center_500kpoint(self):
self.SetUp(dimension=100, num_clusters=500, points_per_cluster=1000)
self._fit(num_iters=4)
def benchmark_05_100dim_500center_500kpoint_unseparated(self):
self.SetUp(
dimension=100,
num_clusters=500,
points_per_cluster=1000,
cluster_width=250)
self._fit(num_iters=4)
class TensorflowKMeansBenchmark(KMeansBenchmark):
def _fit(self, num_iters=10):
scores = []
start = time.time()
for i in range(num_iters):
print('Starting tensorflow KMeans: %d' % i)
tf_kmeans = kmeans_lib.KMeansClustering(
self.num_clusters,
initial_clusters=kmeans_lib.KMeansClustering.KMEANS_PLUS_PLUS_INIT,
kmeans_plus_plus_num_retries=int(math.log(self.num_clusters) + 2),
random_seed=i * 42,
relative_tolerance=1e-6,
config=run_config.RunConfig(tf_random_seed=3))
tf_kmeans.fit(
input_fn=lambda: (constant_op.constant(self.points), None), steps=50)
_ = tf_kmeans.clusters()
scores.append(
tf_kmeans.score(
input_fn=lambda: (constant_op.constant(self.points), None),
steps=1))
self._report(num_iters, start, time.time(), scores)
class SklearnKMeansBenchmark(KMeansBenchmark):
def _fit(self, num_iters=10):
scores = []
start = time.time()
for i in range(num_iters):
print('Starting sklearn KMeans: %d' % i)
sklearn_kmeans = SklearnKMeans(
n_clusters=self.num_clusters,
init='k-means++',
max_iter=50,
n_init=1,
tol=1e-4,
random_state=i * 42)
sklearn_kmeans.fit(self.points)
scores.append(sklearn_kmeans.inertia_)
self._report(num_iters, start, time.time(), scores)
class KMeansTestQueues(test.TestCase):
def input_fn(self):
def _fn():
queue = data_flow_ops.FIFOQueue(
capacity=10, dtypes=dtypes.float32, shapes=[10, 3])
enqueue_op = queue.enqueue(array_ops.zeros([10, 3], dtype=dtypes.float32))
queue_runner.add_queue_runner(
queue_runner.QueueRunner(queue, [enqueue_op]))
return queue.dequeue(), None
return _fn
# This test makes sure that there are no deadlocks when using a QueueRunner.
# Note that since cluster initialization is dependendent on inputs, if input
# is generated using a QueueRunner, one has to make sure that these runners
# are started before the initialization.
def test_queues(self):
kmeans = kmeans_lib.KMeansClustering(5)
kmeans.fit(input_fn=self.input_fn(), steps=1)
if __name__ == '__main__':
test.main()
| apache-2.0 |
carrillo/scikit-learn | examples/svm/plot_svm_regression.py | 249 | 1451 | """
===================================================================
Support Vector Regression (SVR) using linear and non-linear kernels
===================================================================
Toy example of 1D regression using linear, polynomial and RBF kernels.
"""
print(__doc__)
import numpy as np
from sklearn.svm import SVR
import matplotlib.pyplot as plt
###############################################################################
# Generate sample data
X = np.sort(5 * np.random.rand(40, 1), axis=0)
y = np.sin(X).ravel()
###############################################################################
# Add noise to targets
y[::5] += 3 * (0.5 - np.random.rand(8))
###############################################################################
# Fit regression model
svr_rbf = SVR(kernel='rbf', C=1e3, gamma=0.1)
svr_lin = SVR(kernel='linear', C=1e3)
svr_poly = SVR(kernel='poly', C=1e3, degree=2)
y_rbf = svr_rbf.fit(X, y).predict(X)
y_lin = svr_lin.fit(X, y).predict(X)
y_poly = svr_poly.fit(X, y).predict(X)
###############################################################################
# look at the results
plt.scatter(X, y, c='k', label='data')
plt.hold('on')
plt.plot(X, y_rbf, c='g', label='RBF model')
plt.plot(X, y_lin, c='r', label='Linear model')
plt.plot(X, y_poly, c='b', label='Polynomial model')
plt.xlabel('data')
plt.ylabel('target')
plt.title('Support Vector Regression')
plt.legend()
plt.show()
| bsd-3-clause |
toastedcornflakes/scikit-learn | examples/mixture/plot_gmm.py | 18 | 3038 | """
=================================
Gaussian Mixture Model Ellipsoids
=================================
Plot the confidence ellipsoids of a mixture of two Gaussians with EM
and variational Dirichlet process.
Both models have access to five components with which to fit the
data. Note that the EM model will necessarily use all five components
while the DP model will effectively only use as many as are needed for
a good fit. This is a property of the Dirichlet Process prior. Here we
can see that the EM model splits some components arbitrarily, because it
is trying to fit too many components, while the Dirichlet Process model
adapts it number of state automatically.
This example doesn't show it, as we're in a low-dimensional space, but
another advantage of the Dirichlet process model is that it can fit
full covariance matrices effectively even when there are less examples
per cluster than there are dimensions in the data, due to
regularization properties of the inference algorithm.
"""
import itertools
import numpy as np
from scipy import linalg
import matplotlib.pyplot as plt
import matplotlib as mpl
from sklearn import mixture
color_iter = itertools.cycle(['navy', 'c', 'cornflowerblue', 'gold',
'darkorange'])
def plot_results(X, Y_, means, covariances, index, title):
splot = plt.subplot(2, 1, 1 + index)
for i, (mean, covar, color) in enumerate(zip(
means, covariances, color_iter)):
v, w = linalg.eigh(covar)
v = 2. * np.sqrt(2.) * np.sqrt(v)
u = w[0] / linalg.norm(w[0])
# as the DP will not use every component it has access to
# unless it needs it, we shouldn't plot the redundant
# components.
if not np.any(Y_ == i):
continue
plt.scatter(X[Y_ == i, 0], X[Y_ == i, 1], .8, color=color)
# Plot an ellipse to show the Gaussian component
angle = np.arctan(u[1] / u[0])
angle = 180. * angle / np.pi # convert to degrees
ell = mpl.patches.Ellipse(mean, v[0], v[1], 180. + angle, color=color)
ell.set_clip_box(splot.bbox)
ell.set_alpha(0.5)
splot.add_artist(ell)
plt.xlim(-10., 10.)
plt.ylim(-3., 6.)
plt.xticks(())
plt.yticks(())
plt.title(title)
# Number of samples per component
n_samples = 500
# Generate random sample, two components
np.random.seed(0)
C = np.array([[0., -0.1], [1.7, .4]])
X = np.r_[np.dot(np.random.randn(n_samples, 2), C),
.7 * np.random.randn(n_samples, 2) + np.array([-6, 3])]
# Fit a Gaussian mixture with EM using five components
gmm = mixture.GaussianMixture(n_components=5, covariance_type='full').fit(X)
plot_results(X, gmm.predict(X), gmm.means_, gmm.covariances_, 0,
'Gaussian Mixture')
# Fit a Dirichlet process Gaussian mixture using five components
dpgmm = mixture.DPGMM(n_components=5, covariance_type='full').fit(X)
plot_results(X, dpgmm.predict(X), dpgmm.means_, dpgmm._get_covars(), 1,
'Dirichlet Process GMM')
plt.show()
| bsd-3-clause |
7630155/tushare | tushare/stock/shibor.py | 38 | 5010 | # -*- coding:utf-8 -*-
"""
上海银行间同业拆放利率(Shibor)数据接口
Created on 2014/07/31
@author: Jimmy Liu
@group : waditu
@contact: jimmysoa@sina.cn
"""
import pandas as pd
import numpy as np
from tushare.stock import cons as ct
from tushare.util import dateu as du
def shibor_data(year=None):
"""
获取上海银行间同业拆放利率(Shibor)
Parameters
------
year:年份(int)
Return
------
date:日期
ON:隔夜拆放利率
1W:1周拆放利率
2W:2周拆放利率
1M:1个月拆放利率
3M:3个月拆放利率
6M:6个月拆放利率
9M:9个月拆放利率
1Y:1年拆放利率
"""
year = du.get_year() if year is None else year
lab = ct.SHIBOR_TYPE['Shibor']
lab = lab.encode('utf-8') if ct.PY3 else lab
try:
df = pd.read_excel(ct.SHIBOR_DATA_URL%(ct.P_TYPE['http'], ct.DOMAINS['shibor'],
ct.PAGES['dw'], 'Shibor',
year, lab,
year))
df.columns = ct.SHIBOR_COLS
df['date'] = df['date'].map(lambda x: x.date())
df['date'] = df['date'].astype(np.datetime64)
return df
except:
return None
def shibor_quote_data(year=None):
"""
获取Shibor银行报价数据
Parameters
------
year:年份(int)
Return
------
date:日期
bank:报价银行名称
ON:隔夜拆放利率
ON_B:隔夜拆放买入价
ON_A:隔夜拆放卖出价
1W_B:1周买入
1W_A:1周卖出
2W_B:买入
2W_A:卖出
1M_B:买入
1M_A:卖出
3M_B:买入
3M_A:卖出
6M_B:买入
6M_A:卖出
9M_B:买入
9M_A:卖出
1Y_B:买入
1Y_A:卖出
"""
year = du.get_year() if year is None else year
lab = ct.SHIBOR_TYPE['Quote']
lab = lab.encode('utf-8') if ct.PY3 else lab
try:
df = pd.read_excel(ct.SHIBOR_DATA_URL%(ct.P_TYPE['http'], ct.DOMAINS['shibor'],
ct.PAGES['dw'], 'Quote',
year, lab,
year), skiprows=[0])
df.columns = ct.QUOTE_COLS
df['date'] = df['date'].map(lambda x: x.date())
df['date'] = df['date'].astype(np.datetime64)
return df
except:
return None
def shibor_ma_data(year=None):
"""
获取Shibor均值数据
Parameters
------
year:年份(int)
Return
------
date:日期
其它分别为各周期5、10、20均价
"""
year = du.get_year() if year is None else year
lab = ct.SHIBOR_TYPE['Tendency']
lab = lab.encode('utf-8') if ct.PY3 else lab
try:
df = pd.read_excel(ct.SHIBOR_DATA_URL%(ct.P_TYPE['http'], ct.DOMAINS['shibor'],
ct.PAGES['dw'], 'Shibor_Tendency',
year, lab,
year), skiprows=[0])
df.columns = ct.SHIBOR_MA_COLS
df['date'] = df['date'].map(lambda x: x.date())
df['date'] = df['date'].astype(np.datetime64)
return df
except:
return None
def lpr_data(year=None):
"""
获取贷款基础利率(LPR)
Parameters
------
year:年份(int)
Return
------
date:日期
1Y:1年贷款基础利率
"""
year = du.get_year() if year is None else year
lab = ct.SHIBOR_TYPE['LPR']
lab = lab.encode('utf-8') if ct.PY3 else lab
try:
df = pd.read_excel(ct.SHIBOR_DATA_URL%(ct.P_TYPE['http'], ct.DOMAINS['shibor'],
ct.PAGES['dw'], 'LPR',
year, lab,
year))
df.columns = ct.LPR_COLS
df['date'] = df['date'].map(lambda x: x.date())
df['date'] = df['date'].astype(np.datetime64)
return df
except:
return None
def lpr_ma_data(year=None):
"""
获取贷款基础利率均值数据
Parameters
------
year:年份(int)
Return
------
date:日期
1Y_5:5日均值
1Y_10:10日均值
1Y_20:20日均值
"""
year = du.get_year() if year is None else year
lab = ct.SHIBOR_TYPE['LPR_Tendency']
lab = lab.encode('utf-8') if ct.PY3 else lab
try:
df = pd.read_excel(ct.SHIBOR_DATA_URL%(ct.P_TYPE['http'], ct.DOMAINS['shibor'],
ct.PAGES['dw'], 'LPR_Tendency',
year, lab,
year), skiprows=[0])
df.columns = ct.LPR_MA_COLS
df['date'] = df['date'].map(lambda x: x.date())
df['date'] = df['date'].astype(np.datetime64)
return df
except:
return None
| bsd-3-clause |
georgetown-analytics/auto-sentiment | ingestion/database.py | 1 | 5186 | from pymongo import MongoClient
import pymongo
import pandas as pd
# This class creates an instance of our MongoDB
class twitterDB(object):
# Initiates the client to our database
def __init__(self):
self.client = MongoClient()
self.database = self.client.twitter_db
# Saves a list of elements in an specified collection
def save_coll(self, elements, coll_name):
for element in elements:
self.database[coll_name].insert(element)
# Saves a particular element
def save_element(self, element):
data = {}
data['id'] = element['id']
data['created_at'] = element['created_at']
data['favorite_count'] = element['favorite_count']
data['retweet_count'] = element['retweet_count']
data['text'] = element['text']
data['user_name'] = element['user']['screen_name']
data['model_sentiment'] = {}
data['user_sentiment'] = {}
self.collection.insert(data)
# Loads an specified collection
def load_coll(self, coll_name):
self.collection = self.database[coll_name]
self.collection_name = coll_name
# Loads the tweet from the current collection
def load_tweets(self):
tweets = []
docs = self.collection.find()
for d in docs:
tweets.append(d)
return tweets
# Returns the collection names
def get_collection_names(self):
return self.database.collection_names()
# Returns the number of tweets of the current collection
def get_number_tweets(self):
return self.collection.count()
# Returns a tweet in the current collection that does not have a user input
def calibrate_tweet(self):
tweets = self.collection.find({"user_sentiment": {"$nin": [0,1]}}).sort([("id", pymongo.ASCENDING)])
num_tweets = tweets.count()
print("You have %d tweets to classify." %num_tweets)
i = 0
working = 1
while (working == 1 and i < num_tweets):
t = tweets[i]
t_id = t['id']
print("Please read the following tweet.")
print(t['text'].encode('utf-8'))
print("Please tell if it is a positive (1) or a negative (0) tweet. Type 2 if you want to get out.")
option = input("?")
try:
option = int(option) #
if (option >= 0 and option <= 1):
self.set_user_sentiment(t_id, option)
i += 1
elif (option == 2):
working == 0
i = num_tweets
else:
print("Option number not valid. Enter a 0 or a 1")
except ValueError as e:
print("Input %s is incorrect, please enter a number." % str(e))
# Return tweets from the current collection with a particular word
def get_tweets_with_word(self, word):
re = ".*"+word+".*"
tweets = self.collection.find({"text": {"$regex": re}}).sort([("id", pymongo.ASCENDING)])
if(tweets.count() > 0):
print("There are %d tweets with this word." % tweets.count())
working = 1
while working == 1:
print("Please tell if these tweets are positive (1) or negative (0).")
option = input("?")
try:
option = int(option) #
if (option >= 0 and option <= 1):
for t in tweets:
id = t['id']
self.set_user_sentiment(id, option)
working = 0
else:
print("Option number not valid. Enter a 0 or a 1")
except ValueError as e:
print("Input %s is incorrect, please enter a number." % str(e))
else:
print("No tweets found with this word.")
def set_user_sentiment(self, id, option):
result = self.collection.update_one(
{"id" : id},
{
"$set": {
"user_sentiment": option
}
}
)
def export_csv_user_sentiment(self):
file = self.collection_name+"-sentiment.csv"
tweets = self.collection.find({
"user_sentiment": {"$in": [0,1]}
},{
"text": 1,
"user_sentiment": 1,
"_id": 0
}
)
if(tweets.count() > 1):
df = pd.DataFrame(list(tweets))
df.drop_duplicates()
df.to_csv(file, encoding = 'utf-8', index = False)
else:
print("There were no classified tweets in colection %s" %self.collection_name)
def export_csv_collection(self):
file = self.collection_name+"-raw.csv"
tweets = self.collection.find()
if(tweets.count() > 1):
df = pd.DataFrame(list(tweets))
df.to_csv(file, encoding = 'utf-8', index = False)
else:
print("There were no tweets in colection %s" %self.collection_name)
| mit |
madjelan/CostSensitiveClassification | setup.py | 1 | 1561 | #!/usr/bin/env python
from setuptools import setup, find_packages
import re
for line in open('costcla/__init__.py'):
match = re.match("__version__ *= *'(.*)'", line)
if match:
__version__, = match.groups()
setup(name='costcla',
version=__version__,
description='costcla is a Python module for cost-sensitive machine learning (classification) ',
long_description=open('README.rst').read(),
author='Alejandro CORREA BAHNSEN',
author_email='al.bahnsen@gmail.com',
url='https://github.com/albahnsen/CostSensitiveClassification',
license='new BSD',
packages=find_packages(),
include_package_data = True,
keywords=['machine learning', 'classification', 'cost-sensitive'],
install_requires=['scikit-learn>=0.15.0b2','pandas>=0.14.0','numpy>=1.8.0', 'pyea>=0.1'],
classifiers=['Intended Audience :: Science/Research',
'Intended Audience :: Developers',
'Programming Language :: Python',
'Topic :: Software Development',
'Topic :: Scientific/Engineering',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Topic :: Scientific/Engineering :: Artificial Intelligence',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',],
)
| bsd-3-clause |
jdavidrcamacho/Tests_GP | MSc_results/speed_test5.py | 2 | 9388 | # -*- coding: utf-8 -*-
import Gedi as gedi
import george
import numpy as np#;np.random.seed(1234567)
import matplotlib.pylab as pl; pl.close('all')
from time import time,sleep
import scipy.optimize as op
import sys
##### INITIAL DATA ###########################################################
nrep = 10
pontos=[]
temposES=[]
georgeES=[]
temposESS=[]
georgeESS=[]
temposRQ=[]
georgeRQ=[]
sleeptime=60
lista=[10,20,50,100,200,500]
### Functions george
# Define the objective function (negative log-likelihood in this case).
def nll(p):
# Update the kernel parameters and compute the likelihood.
gp.kernel[:] = p
ll = gp.lnlikelihood(y, quiet=True)
# The scipy optimizer doesn't play well with infinities.
return -ll if np.isfinite(ll) else 1e25
# And the gradient of the objective function.
def grad_nll(p):
# Update the kernel parameters and compute the likelihood.
gp.kernel[:] = p
return -gp.grad_lnlikelihood(y, quiet=True)
### Functions gedi
def nll_gedi(p):
global kernel
# Update the kernel parameters and compute the likelihood.
kernel= gedi.kernel_optimization.new_kernel(kernel,np.exp(p))
ll = gedi.kernel_likelihood.likelihood(kernel,x,y,yerr)
# The scipy optimizer doesn't play well with infinities.
return -ll if np.isfinite(ll) else 1e25
# And the gradient of the objective function.
def grad_nll_gedi(p):
global kernel
# Update the kernel parameters and compute the likelihood.
kernel= gedi.kernel_optimization.new_kernel(kernel,np.exp(p))
return -np.array(gedi.kernel_likelihood.gradient_likelihood(kernel,x,y,yerr))
###############################################################################
### Things to run
for i0, i in enumerate(lista):
f=open("{0}.txt".format(i),"w")
sys.stdout = f
print i
pontos.append(i)
print 'pontos', pontos
x = 10 * np.sort(np.random.rand(2*i))
yerr = 0.2 * np.ones_like(x)
y = np.sin(x) + yerr * np.random.randn(len(x))
av = []
for _ in range(nrep):
start= time()
kernel= gedi.kernel.ExpSquared(15.0, 1.1) +\
gedi.kernel.WhiteNoise(1.0)
print 'Initial gedi kernel =',kernel
print 'Initial gedi likelihood =',gedi.kernel_likelihood.likelihood(kernel,x,y,yerr)
# Run the optimization routine.
p0_gedi = np.log(kernel.pars)
results_gedi = op.minimize(nll_gedi, p0_gedi, jac=grad_nll_gedi)
kernel= gedi.kernel_optimization.new_kernel(kernel,np.exp(results_gedi.x))
print 'Final gedi kernel =',kernel
print 'Final gedi likelihood =',gedi.kernel_likelihood.likelihood(kernel,x,y,yerr)
print
tempo1= time() - start
av.append(tempo1)
temposES.append(sum(av) / float(nrep))
print 'temposES', temposES
###########################################################################
sleep(sleeptime)
av = []
for _ in range(nrep):
start = time() # Calculation using george
kernelg1 = 15**2*george.kernels.ExpSquaredKernel(1.1**2)+\
george.kernels.WhiteKernel(1.0)
# You need to compute the GP once before starting the optimization.
gp = george.GP(kernelg1, mean=np.mean(y))
gp.compute(x,yerr)
# Print the initial ln-likelihood.
print 'Initial george kernel', kernelg1
print 'Initial george likelihood', gp.lnlikelihood(y)
# Run the optimization routine.
p0 = gp.kernel.vector
results = op.minimize(nll, p0, jac=grad_nll)
# Update the kernel and print the final log-likelihood.
gp.kernel[:] = results.x
print 'Final george kernel =',gp.kernel
print 'Final george likelihood= ', gp.lnlikelihood(y)
print
tempog1= time() - start
av.append(tempog1)
georgeES.append(sum(av) / float(nrep))
print 'georgeES', georgeES
###########################################################################
sleep(sleeptime)
###############################################################################
av = []
for _ in range(nrep):
start= time()
kernel= gedi.kernel.ExpSineSquared(15.0, 1.1,5.0) +\
gedi.kernel.WhiteNoise(1.0)
print 'Initial gedi kernel =',kernel
print 'Initial gedi likelihood =',gedi.kernel_likelihood.likelihood(kernel,x,y,yerr)
# Run the optimization routine.
p0_gedi = np.log(kernel.pars)
results_gedi = op.minimize(nll_gedi, p0_gedi, jac=grad_nll_gedi)
kernel= gedi.kernel_optimization.new_kernel(kernel,np.exp(results_gedi.x))
print 'Final gedi kernel =',kernel
print 'Final gedi likelihood =',gedi.kernel_likelihood.likelihood(kernel,x,y,yerr)
print
tempo1= time() - start
av.append(tempo1)
temposESS.append(sum(av) / float(nrep))
print 'temposESS', temposESS
###########################################################################
sleep(sleeptime)
av = []
for _ in range(nrep):
start = time() # Calculation using george
kernelg1 = 15**2*george.kernels.ExpSine2Kernel(2./2.0**2,5.0)+\
george.kernels.WhiteKernel(1.0)
# You need to compute the GP once before starting the optimization.
gp = george.GP(kernelg1, mean=np.mean(y))
gp.compute(x,yerr)
# Print the initial ln-likelihood.
print 'Initial george kernel', kernelg1
print 'Initial george likelihood', gp.lnlikelihood(y)
# Run the optimization routine.
p0 = gp.kernel.vector
results = op.minimize(nll, p0, jac=grad_nll)
# Update the kernel and print the final log-likelihood.
gp.kernel[:] = results.x
print 'Final george kernel =',gp.kernel
print 'Final george likelihood= ', gp.lnlikelihood(y)
print
tempog1= time() - start
av.append(tempog1)
georgeESS.append(sum(av) / float(nrep))
print 'georgeESS', georgeESS
###########################################################################
sleep(sleeptime)
###############################################################################
av = []
for _ in range(nrep):
start= time()
kernel= gedi.kernel.RatQuadratic(5.0, 2.0, 100.0)+\
gedi.kernel.WhiteNoise(1.0)
print 'Initial gedi kernel =',kernel
print 'Initial gedi likelihood =',gedi.kernel_likelihood.likelihood(kernel,x,y,yerr)
# Run the optimization routine.
p0_gedi = np.log(kernel.pars)
results_gedi = op.minimize(nll_gedi, p0_gedi, jac=grad_nll_gedi)
kernel1= gedi.kernel_optimization.new_kernel(kernel,np.exp(results_gedi.x))
print 'Final gedi kernel =',kernel
print 'Final gedi likelihood =',gedi.kernel_likelihood.likelihood(kernel,x,y,yerr)
print
tempo1= time() - start
av.append(tempo1)
temposRQ.append(sum(av) / float(nrep))
print 'temposRQ', temposRQ
###########################################################################
sleep(sleeptime)
av = []
for _ in range(nrep):
start = time() # Calculation using george
kernelg1 = 15**2*george.kernels.RationalQuadraticKernel(100,2.0**2)+\
george.kernels.WhiteKernel(1.0)
# You need to compute the GP once before starting the optimization.
gp = george.GP(kernelg1, mean=np.mean(y))
gp.compute(x,yerr)
# Print the initial ln-likelihood.
print 'Initial george kernel', kernelg1
print 'Initial george likelihood', gp.lnlikelihood(y)
# Run the optimization routine.
p0 = gp.kernel.vector
results = op.minimize(nll, p0, jac=grad_nll)
# Update the kernel and print the final log-likelihood.
gp.kernel[:] = results.x
print 'Final george kernel =',gp.kernel
print 'Final george likelihood= ', gp.lnlikelihood(y)
print
tempog1= time() - start
av.append(tempog1)
georgeRQ.append(sum(av) / float(nrep))
print 'georgeRQ', georgeRQ
###########################################################################
sys.stdout = sys.__stdout__
f.close()
sleep(sleeptime)
##### Graphs
N = pontos
f, (ax1, ax2, ax3) = pl.subplots(1, 3, sharey=True)
ax1.loglog(N, temposES, 'b-o')
ax1.loglog(N, georgeES, 'b--')
ax1.legend(['gedi ES+WN','george ES+WN'],loc='upper left')
ax1.set_ylabel('Time')
ax2.loglog(N, temposESS, 'b-o')
ax2.loglog(N, georgeESS, 'b--')
ax2.legend(['gedi ESS+WN','george ESS+WN'],loc='upper left')
ax2.set_xlabel('Number of points')
ax3.loglog(N, temposRQ, 'b-o')
ax3.loglog(N, georgeRQ, 'b--')
ax3.legend(['gedi RQ+WN','george RQ+WN'],loc='upper left')
f.savefig('speedtest_4.png')
#pl.figure()
#pl.loglog(N, temposES, 'b-o')
#pl.loglog(N, georgeES, 'b--')
#pl.xlim(0.9*N[0], 1.1*N[-1])
#pl.xlabel('Number of points')
#pl.ylabel('Time')
##pl.title('Optimization')
#pl.legend(['gedi ES+WN','george ES+WN'],loc='upper left')
#pl.xticks(fontsize = 18);pl.yticks(fontsize=18)
#pl.savefig('speedtest_7.png')
| mit |
grlee77/scipy | scipy/spatial/tests/test__plotutils.py | 18 | 1943 | import pytest
from numpy.testing import assert_, assert_array_equal, suppress_warnings
try:
import matplotlib
matplotlib.rcParams['backend'] = 'Agg'
import matplotlib.pyplot as plt
has_matplotlib = True
except Exception:
has_matplotlib = False
from scipy.spatial import \
delaunay_plot_2d, voronoi_plot_2d, convex_hull_plot_2d, \
Delaunay, Voronoi, ConvexHull
@pytest.mark.skipif(not has_matplotlib, reason="Matplotlib not available")
class TestPlotting:
points = [(0,0), (0,1), (1,0), (1,1)]
def test_delaunay(self):
# Smoke test
fig = plt.figure()
obj = Delaunay(self.points)
s_before = obj.simplices.copy()
with suppress_warnings() as sup:
# filter can be removed when matplotlib 1.x is dropped
sup.filter(message="The ishold function was deprecated in version")
r = delaunay_plot_2d(obj, ax=fig.gca())
assert_array_equal(obj.simplices, s_before) # shouldn't modify
assert_(r is fig)
delaunay_plot_2d(obj, ax=fig.gca())
def test_voronoi(self):
# Smoke test
fig = plt.figure()
obj = Voronoi(self.points)
with suppress_warnings() as sup:
# filter can be removed when matplotlib 1.x is dropped
sup.filter(message="The ishold function was deprecated in version")
r = voronoi_plot_2d(obj, ax=fig.gca())
assert_(r is fig)
voronoi_plot_2d(obj)
voronoi_plot_2d(obj, show_vertices=False)
def test_convex_hull(self):
# Smoke test
fig = plt.figure()
tri = ConvexHull(self.points)
with suppress_warnings() as sup:
# filter can be removed when matplotlib 1.x is dropped
sup.filter(message="The ishold function was deprecated in version")
r = convex_hull_plot_2d(tri, ax=fig.gca())
assert_(r is fig)
convex_hull_plot_2d(tri)
| bsd-3-clause |
rgerkin/upsit | bbdp/__init__.py | 1 | 6402 | import os
import sys
from datetime import datetime,timedelta
import numpy as np
import xlrd
filepath = os.path.abspath(__file__)
dirpath = filepath
for i in range(3):
dirpath = os.path.dirname(dirpath)
sys.path.append(dirpath)
from upsit import Subject,Question,Response,QuestionSet,ResponseSet,Test,plt
def load(kind):
"""Load Banner Brain and Body Donation Project data."""
module_path = os.path.dirname(os.path.realpath(__file__))
data_path = os.path.join(module_path,'data')
questions = []
test_path = os.path.join(data_path,'GerkinSmithUPSITautopsy9_10_14.xlsx')
test_wb = xlrd.open_workbook(test_path)
test_key = test_wb.sheet_by_name('smellTestKey')
for q in range(1,41):
row = test_key.row_values(q)
options = row[1:5] # 4 possible options
answer_num = int(row[6]-1) # Change from 0-indexed to 1-indexed.
questions.append(Question(options,answer_num))
question_set = QuestionSet(questions)
if kind == 'dugger':
disease_path = os.path.join(data_path,
'GerkinSmithUPSITautopsy9_10_14.xlsx')
ctrl_path = os.path.join(data_path,
'GerkinSmithQueryControls9_17_14.xlsx')
cp_path = os.path.join(data_path,'Clinicopathological Correlations.xls')
disease_wb = xlrd.open_workbook(disease_path)
ctrl_wb = xlrd.open_workbook(ctrl_path)
cp_wb = xlrd.open_workbook(cp_path)
pd_sheet = disease_wb.sheet_by_name('"pure"PDonly1test')
pd_subjects,pd_tests = parse_tests_dugger(kind,pd_sheet,question_set,
subject_label='pd')
ctrl_sheet = ctrl_wb.sheet_by_name('AllNPcontrolVisits')
ctrl_subjects,ctrl_tests = parse_tests_dugger(kind,ctrl_sheet,
question_set,subject_label='ctrl')
# Currently not used.
cp = cp_wb.sheets()[0] # Only one sheet.
subjects = ctrl_subjects.copy()
subjects.update(pd_subjects)
tests = pd_tests + ctrl_tests
elif kind == 'hentz':
all_path = os.path.join(data_path,'D20Mar2015a.xls')
all_wb = xlrd.open_workbook(all_path)
all_sheet = all_wb.sheet_by_name('Data')
subjects,tests = parse_tests_hentz(kind,all_sheet,question_set)
return subjects,tests
def parse_tests_hentz(kind,tests_sheet,question_set,subject_label=None):
"""Parse a worksheet of tests to return subject and tests."""
subjects = {}
tests = []
headers = tests_sheet.row_values(0)
hd = {key:i for i,key in enumerate(headers)}
for row_num in range(1,tests_sheet.nrows):
row = tests_sheet.row_values(row_num)
case_id = row[hd['shri_case_num']]
if case_id not in subjects:
subject = Subject(case_id)
# Age is reported as an integer or as 100+.
expired_age = row[hd['deathage']]
subject.expired_age = 100 if expired_age=='100+' else int(expired_age)
# Gender is reported as 1 or 2.
subject.gender = row[hd['female']]
if subject_label is not None:
subject.label = subject_label
subject.dementia = 2 in row[16:25]
subject.stint = float(row[hd['stint']])
subject.other = [int(_)>0 for _ in row[5:17]+row[19:25]]
subject.label = 'ctrl' if int(row[hd['controlp']]) else 'other'
subjects[case_id] = subject
responses = {}
for q in range(1,41):
choice_num = row[hd['smell_%d' % q]]
if type(choice_num) is float:
choice_num = int(choice_num)-1 # Change to 0-indexed.
else:
choice_num = None
responses[q] = Response(question_set.questions[q],choice_num)
response_set = ResponseSet(responses,indices=responses.keys())
test = Test(subjects[case_id],response_set,None)
tests.append(test)
return subjects,tests
def parse_tests_dugger(kind,tests_sheet,question_set,subject_label=None):
"""Parse a worksheet of tests to return subject and tests."""
subjects = {}
tests = []
headers = tests_sheet.row_values(0)
hd = {key:i for i,key in enumerate(headers)}
for row_num in range(1,tests_sheet.nrows):
row = tests_sheet.row_values(row_num)
case_id = row[hd['CaseID']]
if case_id not in subjects:
subject = Subject(case_id)
# Age is reported as an integer or as 100+.
expired_age = row[hd['expired_age']]
subject.expired_age = 100 if expired_age=='100+' else int(expired_age)
# Gender is reported as 1 or 2.
subject.gender = row[hd['tbl_donors.gender']]-1
if subject_label is not None:
subject.label = subject_label
subject.demented = row[hd['dementia_nos']] in [1,'yes']
subjects[case_id] = subject
test_date = row[hd['smell_test_date']]
test_date = datetime(1900,1,1) + timedelta(int(test_date)-2)
responses = {}
for q in range(1,41):
choice_num = row[hd['smell_%d' % q]]
if type(choice_num) is float:
choice_num = int(choice_num)-1 # Change to 0-indexed.
else:
choice_num = None
responses[q] = Response(question_set.questions[q],choice_num)
response_set = ResponseSet(responses,indices=responses.keys())
test = Test(subjects[case_id],response_set,test_date)
tests.append(test)
return subjects,tests
def correct_matrix(tests, kind=None):
correct = {}
for test in tests:
if (kind is None) or (test.subject.label == kind):
correct[test.subject.case_id]= [int(test.response_set.responses[i].correct) \
for i in range(1,41)]
print(test.subject.case_id,test.response_set.responses[35].correct)
return np.array(correct.values())
def correct_corrs(tests, kind=None):
matrix = correct_matrix(tests, kind=kind)
for test in tests:
if (test.subject.label is None) or (test.subject.label == kind):
correct[test.subject]= [int(test.response_set.responses[i].correct) \
for i in range(1,41)]
corrs = np.corrcoef(matrix.transpose())
plt.figure()
plt.pcolor(np.arange(0.5,41.5,1),np.arange(0.5,41.5,1),corrs,cmap='RdBu_r',vmin=-1,vmax=1)
plt.colorbar()
plt.xlim(0.5,40.5)
plt.ylim(0.5,40.5)
return corrs
def factor_analysis(tests):
from sklearn.decomposition import FactorAnalysis
from sklearn.cross_validation import cross_val_score
matrix = correct_matrix(tests,kind='ctrl')
print(matrix.shape)
# matrix must have a number of rows divisible by 3.
# if it does not, eliminate some rows, or pass cv=a to cross_val_score,
# where 'a' is a number by which the number of rows is divisible.
fa = FactorAnalysis()
fa_scores = []
n_components = np.arange(1,41)
for n in n_components:
fa.n_components = n
fa_scores.append(np.mean(cross_val_score(fa, matrix)))
plt.plot(n_components,fa_scores)
return n_components,fa_scores
| gpl-2.0 |
jjx02230808/project0223 | sklearn/_build_utils/__init__.py | 21 | 1125 | """
Utilities useful during the build.
"""
# author: Andy Mueller, Gael Varoquaux
# license: BSD
from __future__ import division, print_function, absolute_import
HASH_FILE = 'cythonize.dat'
DEFAULT_ROOT = 'sklearn'
# WindowsError is not defined on unix systems
try:
WindowsError
except NameError:
WindowsError = None
from numpy.distutils.system_info import get_info
def get_blas_info():
def atlas_not_found(blas_info_):
def_macros = blas_info.get('define_macros', [])
for x in def_macros:
if x[0] == "NO_ATLAS_INFO":
# if x[1] != 1 we should have lapack
# how do we do that now?
return True
if x[0] == "ATLAS_INFO":
if "None" in x[1]:
# this one turned up on FreeBSD
return True
return False
blas_info = get_info('blas_opt', 0)
if (not blas_info) or atlas_not_found(blas_info):
cblas_libs = ['cblas']
blas_info.pop('libraries', None)
else:
cblas_libs = blas_info.pop('libraries', [])
return cblas_libs, blas_info
| bsd-3-clause |
GeraldLoeffler/nupic | external/linux32/lib/python2.6/site-packages/matplotlib/colors.py | 69 | 31676 | """
A module for converting numbers or color arguments to *RGB* or *RGBA*
*RGB* and *RGBA* are sequences of, respectively, 3 or 4 floats in the
range 0-1.
This module includes functions and classes for color specification
conversions, and for mapping numbers to colors in a 1-D array of
colors called a colormap. Colormapping typically involves two steps:
a data array is first mapped onto the range 0-1 using an instance
of :class:`Normalize` or of a subclass; then this number in the 0-1
range is mapped to a color using an instance of a subclass of
:class:`Colormap`. Two are provided here:
:class:`LinearSegmentedColormap`, which is used to generate all
the built-in colormap instances, but is also useful for making
custom colormaps, and :class:`ListedColormap`, which is used for
generating a custom colormap from a list of color specifications.
The module also provides a single instance, *colorConverter*, of the
:class:`ColorConverter` class providing methods for converting single
color specifications or sequences of them to *RGB* or *RGBA*.
Commands which take color arguments can use several formats to specify
the colors. For the basic builtin colors, you can use a single letter
- b : blue
- g : green
- r : red
- c : cyan
- m : magenta
- y : yellow
- k : black
- w : white
Gray shades can be given as a string encoding a float in the 0-1
range, e.g.::
color = '0.75'
For a greater range of colors, you have two options. You can specify
the color using an html hex string, as in::
color = '#eeefff'
or you can pass an *R* , *G* , *B* tuple, where each of *R* , *G* , *B*
are in the range [0,1].
Finally, legal html names for colors, like 'red', 'burlywood' and
'chartreuse' are supported.
"""
import re
import numpy as np
from numpy import ma
import matplotlib.cbook as cbook
parts = np.__version__.split('.')
NP_MAJOR, NP_MINOR = map(int, parts[:2])
# true if clip supports the out kwarg
NP_CLIP_OUT = NP_MAJOR>=1 and NP_MINOR>=2
cnames = {
'aliceblue' : '#F0F8FF',
'antiquewhite' : '#FAEBD7',
'aqua' : '#00FFFF',
'aquamarine' : '#7FFFD4',
'azure' : '#F0FFFF',
'beige' : '#F5F5DC',
'bisque' : '#FFE4C4',
'black' : '#000000',
'blanchedalmond' : '#FFEBCD',
'blue' : '#0000FF',
'blueviolet' : '#8A2BE2',
'brown' : '#A52A2A',
'burlywood' : '#DEB887',
'cadetblue' : '#5F9EA0',
'chartreuse' : '#7FFF00',
'chocolate' : '#D2691E',
'coral' : '#FF7F50',
'cornflowerblue' : '#6495ED',
'cornsilk' : '#FFF8DC',
'crimson' : '#DC143C',
'cyan' : '#00FFFF',
'darkblue' : '#00008B',
'darkcyan' : '#008B8B',
'darkgoldenrod' : '#B8860B',
'darkgray' : '#A9A9A9',
'darkgreen' : '#006400',
'darkkhaki' : '#BDB76B',
'darkmagenta' : '#8B008B',
'darkolivegreen' : '#556B2F',
'darkorange' : '#FF8C00',
'darkorchid' : '#9932CC',
'darkred' : '#8B0000',
'darksalmon' : '#E9967A',
'darkseagreen' : '#8FBC8F',
'darkslateblue' : '#483D8B',
'darkslategray' : '#2F4F4F',
'darkturquoise' : '#00CED1',
'darkviolet' : '#9400D3',
'deeppink' : '#FF1493',
'deepskyblue' : '#00BFFF',
'dimgray' : '#696969',
'dodgerblue' : '#1E90FF',
'firebrick' : '#B22222',
'floralwhite' : '#FFFAF0',
'forestgreen' : '#228B22',
'fuchsia' : '#FF00FF',
'gainsboro' : '#DCDCDC',
'ghostwhite' : '#F8F8FF',
'gold' : '#FFD700',
'goldenrod' : '#DAA520',
'gray' : '#808080',
'green' : '#008000',
'greenyellow' : '#ADFF2F',
'honeydew' : '#F0FFF0',
'hotpink' : '#FF69B4',
'indianred' : '#CD5C5C',
'indigo' : '#4B0082',
'ivory' : '#FFFFF0',
'khaki' : '#F0E68C',
'lavender' : '#E6E6FA',
'lavenderblush' : '#FFF0F5',
'lawngreen' : '#7CFC00',
'lemonchiffon' : '#FFFACD',
'lightblue' : '#ADD8E6',
'lightcoral' : '#F08080',
'lightcyan' : '#E0FFFF',
'lightgoldenrodyellow' : '#FAFAD2',
'lightgreen' : '#90EE90',
'lightgrey' : '#D3D3D3',
'lightpink' : '#FFB6C1',
'lightsalmon' : '#FFA07A',
'lightseagreen' : '#20B2AA',
'lightskyblue' : '#87CEFA',
'lightslategray' : '#778899',
'lightsteelblue' : '#B0C4DE',
'lightyellow' : '#FFFFE0',
'lime' : '#00FF00',
'limegreen' : '#32CD32',
'linen' : '#FAF0E6',
'magenta' : '#FF00FF',
'maroon' : '#800000',
'mediumaquamarine' : '#66CDAA',
'mediumblue' : '#0000CD',
'mediumorchid' : '#BA55D3',
'mediumpurple' : '#9370DB',
'mediumseagreen' : '#3CB371',
'mediumslateblue' : '#7B68EE',
'mediumspringgreen' : '#00FA9A',
'mediumturquoise' : '#48D1CC',
'mediumvioletred' : '#C71585',
'midnightblue' : '#191970',
'mintcream' : '#F5FFFA',
'mistyrose' : '#FFE4E1',
'moccasin' : '#FFE4B5',
'navajowhite' : '#FFDEAD',
'navy' : '#000080',
'oldlace' : '#FDF5E6',
'olive' : '#808000',
'olivedrab' : '#6B8E23',
'orange' : '#FFA500',
'orangered' : '#FF4500',
'orchid' : '#DA70D6',
'palegoldenrod' : '#EEE8AA',
'palegreen' : '#98FB98',
'palevioletred' : '#AFEEEE',
'papayawhip' : '#FFEFD5',
'peachpuff' : '#FFDAB9',
'peru' : '#CD853F',
'pink' : '#FFC0CB',
'plum' : '#DDA0DD',
'powderblue' : '#B0E0E6',
'purple' : '#800080',
'red' : '#FF0000',
'rosybrown' : '#BC8F8F',
'royalblue' : '#4169E1',
'saddlebrown' : '#8B4513',
'salmon' : '#FA8072',
'sandybrown' : '#FAA460',
'seagreen' : '#2E8B57',
'seashell' : '#FFF5EE',
'sienna' : '#A0522D',
'silver' : '#C0C0C0',
'skyblue' : '#87CEEB',
'slateblue' : '#6A5ACD',
'slategray' : '#708090',
'snow' : '#FFFAFA',
'springgreen' : '#00FF7F',
'steelblue' : '#4682B4',
'tan' : '#D2B48C',
'teal' : '#008080',
'thistle' : '#D8BFD8',
'tomato' : '#FF6347',
'turquoise' : '#40E0D0',
'violet' : '#EE82EE',
'wheat' : '#F5DEB3',
'white' : '#FFFFFF',
'whitesmoke' : '#F5F5F5',
'yellow' : '#FFFF00',
'yellowgreen' : '#9ACD32',
}
# add british equivs
for k, v in cnames.items():
if k.find('gray')>=0:
k = k.replace('gray', 'grey')
cnames[k] = v
def is_color_like(c):
'Return *True* if *c* can be converted to *RGB*'
try:
colorConverter.to_rgb(c)
return True
except ValueError:
return False
def rgb2hex(rgb):
'Given a len 3 rgb tuple of 0-1 floats, return the hex string'
return '#%02x%02x%02x' % tuple([round(val*255) for val in rgb])
hexColorPattern = re.compile("\A#[a-fA-F0-9]{6}\Z")
def hex2color(s):
"""
Take a hex string *s* and return the corresponding rgb 3-tuple
Example: #efefef -> (0.93725, 0.93725, 0.93725)
"""
if not isinstance(s, basestring):
raise TypeError('hex2color requires a string argument')
if hexColorPattern.match(s) is None:
raise ValueError('invalid hex color string "%s"' % s)
return tuple([int(n, 16)/255.0 for n in (s[1:3], s[3:5], s[5:7])])
class ColorConverter:
"""
Provides methods for converting color specifications to *RGB* or *RGBA*
Caching is used for more efficient conversion upon repeated calls
with the same argument.
Ordinarily only the single instance instantiated in this module,
*colorConverter*, is needed.
"""
colors = {
'b' : (0.0, 0.0, 1.0),
'g' : (0.0, 0.5, 0.0),
'r' : (1.0, 0.0, 0.0),
'c' : (0.0, 0.75, 0.75),
'm' : (0.75, 0, 0.75),
'y' : (0.75, 0.75, 0),
'k' : (0.0, 0.0, 0.0),
'w' : (1.0, 1.0, 1.0),
}
cache = {}
def to_rgb(self, arg):
"""
Returns an *RGB* tuple of three floats from 0-1.
*arg* can be an *RGB* or *RGBA* sequence or a string in any of
several forms:
1) a letter from the set 'rgbcmykw'
2) a hex color string, like '#00FFFF'
3) a standard name, like 'aqua'
4) a float, like '0.4', indicating gray on a 0-1 scale
if *arg* is *RGBA*, the *A* will simply be discarded.
"""
try: return self.cache[arg]
except KeyError: pass
except TypeError: # could be unhashable rgb seq
arg = tuple(arg)
try: return self.cache[arg]
except KeyError: pass
except TypeError:
raise ValueError(
'to_rgb: arg "%s" is unhashable even inside a tuple'
% (str(arg),))
try:
if cbook.is_string_like(arg):
color = self.colors.get(arg, None)
if color is None:
str1 = cnames.get(arg, arg)
if str1.startswith('#'):
color = hex2color(str1)
else:
fl = float(arg)
if fl < 0 or fl > 1:
raise ValueError(
'gray (string) must be in range 0-1')
color = tuple([fl]*3)
elif cbook.iterable(arg):
if len(arg) > 4 or len(arg) < 3:
raise ValueError(
'sequence length is %d; must be 3 or 4'%len(arg))
color = tuple(arg[:3])
if [x for x in color if (float(x) < 0) or (x > 1)]:
# This will raise TypeError if x is not a number.
raise ValueError('number in rbg sequence outside 0-1 range')
else:
raise ValueError('cannot convert argument to rgb sequence')
self.cache[arg] = color
except (KeyError, ValueError, TypeError), exc:
raise ValueError('to_rgb: Invalid rgb arg "%s"\n%s' % (str(arg), exc))
# Error messages could be improved by handling TypeError
# separately; but this should be rare and not too hard
# for the user to figure out as-is.
return color
def to_rgba(self, arg, alpha=None):
"""
Returns an *RGBA* tuple of four floats from 0-1.
For acceptable values of *arg*, see :meth:`to_rgb`.
If *arg* is an *RGBA* sequence and *alpha* is not *None*,
*alpha* will replace the original *A*.
"""
try:
if not cbook.is_string_like(arg) and cbook.iterable(arg):
if len(arg) == 4:
if [x for x in arg if (float(x) < 0) or (x > 1)]:
# This will raise TypeError if x is not a number.
raise ValueError('number in rbga sequence outside 0-1 range')
if alpha is None:
return tuple(arg)
if alpha < 0.0 or alpha > 1.0:
raise ValueError("alpha must be in range 0-1")
return arg[0], arg[1], arg[2], arg[3] * alpha
r,g,b = arg[:3]
if [x for x in (r,g,b) if (float(x) < 0) or (x > 1)]:
raise ValueError('number in rbg sequence outside 0-1 range')
else:
r,g,b = self.to_rgb(arg)
if alpha is None:
alpha = 1.0
return r,g,b,alpha
except (TypeError, ValueError), exc:
raise ValueError('to_rgba: Invalid rgba arg "%s"\n%s' % (str(arg), exc))
def to_rgba_array(self, c, alpha=None):
"""
Returns a numpy array of *RGBA* tuples.
Accepts a single mpl color spec or a sequence of specs.
Special case to handle "no color": if *c* is "none" (case-insensitive),
then an empty array will be returned. Same for an empty list.
"""
try:
if c.lower() == 'none':
return np.zeros((0,4), dtype=np.float_)
except AttributeError:
pass
if len(c) == 0:
return np.zeros((0,4), dtype=np.float_)
try:
result = np.array([self.to_rgba(c, alpha)], dtype=np.float_)
except ValueError:
if isinstance(c, np.ndarray):
if c.ndim != 2 and c.dtype.kind not in 'SU':
raise ValueError("Color array must be two-dimensional")
result = np.zeros((len(c), 4))
for i, cc in enumerate(c):
result[i] = self.to_rgba(cc, alpha) # change in place
return np.asarray(result, np.float_)
colorConverter = ColorConverter()
def makeMappingArray(N, data):
"""Create an *N* -element 1-d lookup table
*data* represented by a list of x,y0,y1 mapping correspondences.
Each element in this list represents how a value between 0 and 1
(inclusive) represented by x is mapped to a corresponding value
between 0 and 1 (inclusive). The two values of y are to allow
for discontinuous mapping functions (say as might be found in a
sawtooth) where y0 represents the value of y for values of x
<= to that given, and y1 is the value to be used for x > than
that given). The list must start with x=0, end with x=1, and
all values of x must be in increasing order. Values between
the given mapping points are determined by simple linear interpolation.
The function returns an array "result" where ``result[x*(N-1)]``
gives the closest value for values of x between 0 and 1.
"""
try:
adata = np.array(data)
except:
raise TypeError("data must be convertable to an array")
shape = adata.shape
if len(shape) != 2 and shape[1] != 3:
raise ValueError("data must be nx3 format")
x = adata[:,0]
y0 = adata[:,1]
y1 = adata[:,2]
if x[0] != 0. or x[-1] != 1.0:
raise ValueError(
"data mapping points must start with x=0. and end with x=1")
if np.sometrue(np.sort(x)-x):
raise ValueError(
"data mapping points must have x in increasing order")
# begin generation of lookup table
x = x * (N-1)
lut = np.zeros((N,), np.float)
xind = np.arange(float(N))
ind = np.searchsorted(x, xind)[1:-1]
lut[1:-1] = ( ((xind[1:-1] - x[ind-1]) / (x[ind] - x[ind-1]))
* (y0[ind] - y1[ind-1]) + y1[ind-1])
lut[0] = y1[0]
lut[-1] = y0[-1]
# ensure that the lut is confined to values between 0 and 1 by clipping it
np.clip(lut, 0.0, 1.0)
#lut = where(lut > 1., 1., lut)
#lut = where(lut < 0., 0., lut)
return lut
class Colormap:
"""Base class for all scalar to rgb mappings
Important methods:
* :meth:`set_bad`
* :meth:`set_under`
* :meth:`set_over`
"""
def __init__(self, name, N=256):
"""
Public class attributes:
:attr:`N` : number of rgb quantization levels
:attr:`name` : name of colormap
"""
self.name = name
self.N = N
self._rgba_bad = (0.0, 0.0, 0.0, 0.0) # If bad, don't paint anything.
self._rgba_under = None
self._rgba_over = None
self._i_under = N
self._i_over = N+1
self._i_bad = N+2
self._isinit = False
def __call__(self, X, alpha=1.0, bytes=False):
"""
*X* is either a scalar or an array (of any dimension).
If scalar, a tuple of rgba values is returned, otherwise
an array with the new shape = oldshape+(4,). If the X-values
are integers, then they are used as indices into the array.
If they are floating point, then they must be in the
interval (0.0, 1.0).
Alpha must be a scalar.
If bytes is False, the rgba values will be floats on a
0-1 scale; if True, they will be uint8, 0-255.
"""
if not self._isinit: self._init()
alpha = min(alpha, 1.0) # alpha must be between 0 and 1
alpha = max(alpha, 0.0)
self._lut[:-3, -1] = alpha
mask_bad = None
if not cbook.iterable(X):
vtype = 'scalar'
xa = np.array([X])
else:
vtype = 'array'
xma = ma.asarray(X)
xa = xma.filled(0)
mask_bad = ma.getmask(xma)
if xa.dtype.char in np.typecodes['Float']:
np.putmask(xa, xa==1.0, 0.9999999) #Treat 1.0 as slightly less than 1.
# The following clip is fast, and prevents possible
# conversion of large positive values to negative integers.
if NP_CLIP_OUT:
np.clip(xa * self.N, -1, self.N, out=xa)
else:
xa = np.clip(xa * self.N, -1, self.N)
xa = xa.astype(int)
# Set the over-range indices before the under-range;
# otherwise the under-range values get converted to over-range.
np.putmask(xa, xa>self.N-1, self._i_over)
np.putmask(xa, xa<0, self._i_under)
if mask_bad is not None and mask_bad.shape == xa.shape:
np.putmask(xa, mask_bad, self._i_bad)
if bytes:
lut = (self._lut * 255).astype(np.uint8)
else:
lut = self._lut
rgba = np.empty(shape=xa.shape+(4,), dtype=lut.dtype)
lut.take(xa, axis=0, mode='clip', out=rgba)
# twice as fast as lut[xa];
# using the clip or wrap mode and providing an
# output array speeds it up a little more.
if vtype == 'scalar':
rgba = tuple(rgba[0,:])
return rgba
def set_bad(self, color = 'k', alpha = 1.0):
'''Set color to be used for masked values.
'''
self._rgba_bad = colorConverter.to_rgba(color, alpha)
if self._isinit: self._set_extremes()
def set_under(self, color = 'k', alpha = 1.0):
'''Set color to be used for low out-of-range values.
Requires norm.clip = False
'''
self._rgba_under = colorConverter.to_rgba(color, alpha)
if self._isinit: self._set_extremes()
def set_over(self, color = 'k', alpha = 1.0):
'''Set color to be used for high out-of-range values.
Requires norm.clip = False
'''
self._rgba_over = colorConverter.to_rgba(color, alpha)
if self._isinit: self._set_extremes()
def _set_extremes(self):
if self._rgba_under:
self._lut[self._i_under] = self._rgba_under
else:
self._lut[self._i_under] = self._lut[0]
if self._rgba_over:
self._lut[self._i_over] = self._rgba_over
else:
self._lut[self._i_over] = self._lut[self.N-1]
self._lut[self._i_bad] = self._rgba_bad
def _init():
'''Generate the lookup table, self._lut'''
raise NotImplementedError("Abstract class only")
def is_gray(self):
if not self._isinit: self._init()
return (np.alltrue(self._lut[:,0] == self._lut[:,1])
and np.alltrue(self._lut[:,0] == self._lut[:,2]))
class LinearSegmentedColormap(Colormap):
"""Colormap objects based on lookup tables using linear segments.
The lookup table is generated using linear interpolation for each
primary color, with the 0-1 domain divided into any number of
segments.
"""
def __init__(self, name, segmentdata, N=256):
"""Create color map from linear mapping segments
segmentdata argument is a dictionary with a red, green and blue
entries. Each entry should be a list of *x*, *y0*, *y1* tuples,
forming rows in a table.
Example: suppose you want red to increase from 0 to 1 over
the bottom half, green to do the same over the middle half,
and blue over the top half. Then you would use::
cdict = {'red': [(0.0, 0.0, 0.0),
(0.5, 1.0, 1.0),
(1.0, 1.0, 1.0)],
'green': [(0.0, 0.0, 0.0),
(0.25, 0.0, 0.0),
(0.75, 1.0, 1.0),
(1.0, 1.0, 1.0)],
'blue': [(0.0, 0.0, 0.0),
(0.5, 0.0, 0.0),
(1.0, 1.0, 1.0)]}
Each row in the table for a given color is a sequence of
*x*, *y0*, *y1* tuples. In each sequence, *x* must increase
monotonically from 0 to 1. For any input value *z* falling
between *x[i]* and *x[i+1]*, the output value of a given color
will be linearly interpolated between *y1[i]* and *y0[i+1]*::
row i: x y0 y1
/
/
row i+1: x y0 y1
Hence y0 in the first row and y1 in the last row are never used.
.. seealso::
:func:`makeMappingArray`
"""
self.monochrome = False # True only if all colors in map are identical;
# needed for contouring.
Colormap.__init__(self, name, N)
self._segmentdata = segmentdata
def _init(self):
self._lut = np.ones((self.N + 3, 4), np.float)
self._lut[:-3, 0] = makeMappingArray(self.N, self._segmentdata['red'])
self._lut[:-3, 1] = makeMappingArray(self.N, self._segmentdata['green'])
self._lut[:-3, 2] = makeMappingArray(self.N, self._segmentdata['blue'])
self._isinit = True
self._set_extremes()
class ListedColormap(Colormap):
"""Colormap object generated from a list of colors.
This may be most useful when indexing directly into a colormap,
but it can also be used to generate special colormaps for ordinary
mapping.
"""
def __init__(self, colors, name = 'from_list', N = None):
"""
Make a colormap from a list of colors.
*colors*
a list of matplotlib color specifications,
or an equivalent Nx3 floating point array (*N* rgb values)
*name*
a string to identify the colormap
*N*
the number of entries in the map. The default is *None*,
in which case there is one colormap entry for each
element in the list of colors. If::
N < len(colors)
the list will be truncated at *N*. If::
N > len(colors)
the list will be extended by repetition.
"""
self.colors = colors
self.monochrome = False # True only if all colors in map are identical;
# needed for contouring.
if N is None:
N = len(self.colors)
else:
if cbook.is_string_like(self.colors):
self.colors = [self.colors] * N
self.monochrome = True
elif cbook.iterable(self.colors):
self.colors = list(self.colors) # in case it was a tuple
if len(self.colors) == 1:
self.monochrome = True
if len(self.colors) < N:
self.colors = list(self.colors) * N
del(self.colors[N:])
else:
try: gray = float(self.colors)
except TypeError: pass
else: self.colors = [gray] * N
self.monochrome = True
Colormap.__init__(self, name, N)
def _init(self):
rgb = np.array([colorConverter.to_rgb(c)
for c in self.colors], np.float)
self._lut = np.zeros((self.N + 3, 4), np.float)
self._lut[:-3, :-1] = rgb
self._lut[:-3, -1] = 1
self._isinit = True
self._set_extremes()
class Normalize:
"""
Normalize a given value to the 0-1 range
"""
def __init__(self, vmin=None, vmax=None, clip=False):
"""
If *vmin* or *vmax* is not given, they are taken from the input's
minimum and maximum value respectively. If *clip* is *True* and
the given value falls outside the range, the returned value
will be 0 or 1, whichever is closer. Returns 0 if::
vmin==vmax
Works with scalars or arrays, including masked arrays. If
*clip* is *True*, masked values are set to 1; otherwise they
remain masked. Clipping silently defeats the purpose of setting
the over, under, and masked colors in the colormap, so it is
likely to lead to surprises; therefore the default is
*clip* = *False*.
"""
self.vmin = vmin
self.vmax = vmax
self.clip = clip
def __call__(self, value, clip=None):
if clip is None:
clip = self.clip
if cbook.iterable(value):
vtype = 'array'
val = ma.asarray(value).astype(np.float)
else:
vtype = 'scalar'
val = ma.array([value]).astype(np.float)
self.autoscale_None(val)
vmin, vmax = self.vmin, self.vmax
if vmin > vmax:
raise ValueError("minvalue must be less than or equal to maxvalue")
elif vmin==vmax:
return 0.0 * val
else:
if clip:
mask = ma.getmask(val)
val = ma.array(np.clip(val.filled(vmax), vmin, vmax),
mask=mask)
result = (val-vmin) * (1.0/(vmax-vmin))
if vtype == 'scalar':
result = result[0]
return result
def inverse(self, value):
if not self.scaled():
raise ValueError("Not invertible until scaled")
vmin, vmax = self.vmin, self.vmax
if cbook.iterable(value):
val = ma.asarray(value)
return vmin + val * (vmax - vmin)
else:
return vmin + value * (vmax - vmin)
def autoscale(self, A):
'''
Set *vmin*, *vmax* to min, max of *A*.
'''
self.vmin = ma.minimum(A)
self.vmax = ma.maximum(A)
def autoscale_None(self, A):
' autoscale only None-valued vmin or vmax'
if self.vmin is None: self.vmin = ma.minimum(A)
if self.vmax is None: self.vmax = ma.maximum(A)
def scaled(self):
'return true if vmin and vmax set'
return (self.vmin is not None and self.vmax is not None)
class LogNorm(Normalize):
"""
Normalize a given value to the 0-1 range on a log scale
"""
def __call__(self, value, clip=None):
if clip is None:
clip = self.clip
if cbook.iterable(value):
vtype = 'array'
val = ma.asarray(value).astype(np.float)
else:
vtype = 'scalar'
val = ma.array([value]).astype(np.float)
self.autoscale_None(val)
vmin, vmax = self.vmin, self.vmax
if vmin > vmax:
raise ValueError("minvalue must be less than or equal to maxvalue")
elif vmin<=0:
raise ValueError("values must all be positive")
elif vmin==vmax:
return 0.0 * val
else:
if clip:
mask = ma.getmask(val)
val = ma.array(np.clip(val.filled(vmax), vmin, vmax),
mask=mask)
result = (ma.log(val)-np.log(vmin))/(np.log(vmax)-np.log(vmin))
if vtype == 'scalar':
result = result[0]
return result
def inverse(self, value):
if not self.scaled():
raise ValueError("Not invertible until scaled")
vmin, vmax = self.vmin, self.vmax
if cbook.iterable(value):
val = ma.asarray(value)
return vmin * ma.power((vmax/vmin), val)
else:
return vmin * pow((vmax/vmin), value)
class BoundaryNorm(Normalize):
'''
Generate a colormap index based on discrete intervals.
Unlike :class:`Normalize` or :class:`LogNorm`,
:class:`BoundaryNorm` maps values to integers instead of to the
interval 0-1.
Mapping to the 0-1 interval could have been done via
piece-wise linear interpolation, but using integers seems
simpler, and reduces the number of conversions back and forth
between integer and floating point.
'''
def __init__(self, boundaries, ncolors, clip=False):
'''
*boundaries*
a monotonically increasing sequence
*ncolors*
number of colors in the colormap to be used
If::
b[i] <= v < b[i+1]
then v is mapped to color j;
as i varies from 0 to len(boundaries)-2,
j goes from 0 to ncolors-1.
Out-of-range values are mapped to -1 if low and ncolors
if high; these are converted to valid indices by
:meth:`Colormap.__call__` .
'''
self.clip = clip
self.vmin = boundaries[0]
self.vmax = boundaries[-1]
self.boundaries = np.asarray(boundaries)
self.N = len(self.boundaries)
self.Ncmap = ncolors
if self.N-1 == self.Ncmap:
self._interp = False
else:
self._interp = True
def __call__(self, x, clip=None):
if clip is None:
clip = self.clip
x = ma.asarray(x)
mask = ma.getmaskarray(x)
xx = x.filled(self.vmax+1)
if clip:
np.clip(xx, self.vmin, self.vmax)
iret = np.zeros(x.shape, dtype=np.int16)
for i, b in enumerate(self.boundaries):
iret[xx>=b] = i
if self._interp:
iret = (iret * (float(self.Ncmap-1)/(self.N-2))).astype(np.int16)
iret[xx<self.vmin] = -1
iret[xx>=self.vmax] = self.Ncmap
ret = ma.array(iret, mask=mask)
if ret.shape == () and not mask:
ret = int(ret) # assume python scalar
return ret
def inverse(self, value):
return ValueError("BoundaryNorm is not invertible")
class NoNorm(Normalize):
'''
Dummy replacement for Normalize, for the case where we
want to use indices directly in a
:class:`~matplotlib.cm.ScalarMappable` .
'''
def __call__(self, value, clip=None):
return value
def inverse(self, value):
return value
# compatibility with earlier class names that violated convention:
normalize = Normalize
no_norm = NoNorm
| agpl-3.0 |
PrashntS/scikit-learn | sklearn/utils/multiclass.py | 45 | 12390 |
# Author: Arnaud Joly, Joel Nothman, Hamzeh Alsalhi
#
# License: BSD 3 clause
"""
Multi-class / multi-label utility function
==========================================
"""
from __future__ import division
from collections import Sequence
from itertools import chain
from scipy.sparse import issparse
from scipy.sparse.base import spmatrix
from scipy.sparse import dok_matrix
from scipy.sparse import lil_matrix
import numpy as np
from ..externals.six import string_types
from .validation import check_array
from ..utils.fixes import bincount
from ..utils.fixes import array_equal
def _unique_multiclass(y):
if hasattr(y, '__array__'):
return np.unique(np.asarray(y))
else:
return set(y)
def _unique_indicator(y):
return np.arange(check_array(y, ['csr', 'csc', 'coo']).shape[1])
_FN_UNIQUE_LABELS = {
'binary': _unique_multiclass,
'multiclass': _unique_multiclass,
'multilabel-indicator': _unique_indicator,
}
def unique_labels(*ys):
"""Extract an ordered array of unique labels
We don't allow:
- mix of multilabel and multiclass (single label) targets
- mix of label indicator matrix and anything else,
because there are no explicit labels)
- mix of label indicator matrices of different sizes
- mix of string and integer labels
At the moment, we also don't allow "multiclass-multioutput" input type.
Parameters
----------
*ys : array-likes,
Returns
-------
out : numpy array of shape [n_unique_labels]
An ordered array of unique labels.
Examples
--------
>>> from sklearn.utils.multiclass import unique_labels
>>> unique_labels([3, 5, 5, 5, 7, 7])
array([3, 5, 7])
>>> unique_labels([1, 2, 3, 4], [2, 2, 3, 4])
array([1, 2, 3, 4])
>>> unique_labels([1, 2, 10], [5, 11])
array([ 1, 2, 5, 10, 11])
"""
if not ys:
raise ValueError('No argument has been passed.')
# Check that we don't mix label format
ys_types = set(type_of_target(x) for x in ys)
if ys_types == set(["binary", "multiclass"]):
ys_types = set(["multiclass"])
if len(ys_types) > 1:
raise ValueError("Mix type of y not allowed, got types %s" % ys_types)
label_type = ys_types.pop()
# Check consistency for the indicator format
if (label_type == "multilabel-indicator" and
len(set(check_array(y, ['csr', 'csc', 'coo']).shape[1]
for y in ys)) > 1):
raise ValueError("Multi-label binary indicator input with "
"different numbers of labels")
# Get the unique set of labels
_unique_labels = _FN_UNIQUE_LABELS.get(label_type, None)
if not _unique_labels:
raise ValueError("Unknown label type: %s" % repr(ys))
ys_labels = set(chain.from_iterable(_unique_labels(y) for y in ys))
# Check that we don't mix string type with number type
if (len(set(isinstance(label, string_types) for label in ys_labels)) > 1):
raise ValueError("Mix of label input types (string and number)")
return np.array(sorted(ys_labels))
def _is_integral_float(y):
return y.dtype.kind == 'f' and np.all(y.astype(int) == y)
def is_multilabel(y):
""" Check if ``y`` is in a multilabel format.
Parameters
----------
y : numpy array of shape [n_samples]
Target values.
Returns
-------
out : bool,
Return ``True``, if ``y`` is in a multilabel format, else ```False``.
Examples
--------
>>> import numpy as np
>>> from sklearn.utils.multiclass import is_multilabel
>>> is_multilabel([0, 1, 0, 1])
False
>>> is_multilabel([[1], [0, 2], []])
False
>>> is_multilabel(np.array([[1, 0], [0, 0]]))
True
>>> is_multilabel(np.array([[1], [0], [0]]))
False
>>> is_multilabel(np.array([[1, 0, 0]]))
True
"""
if hasattr(y, '__array__'):
y = np.asarray(y)
if not (hasattr(y, "shape") and y.ndim == 2 and y.shape[1] > 1):
return False
if issparse(y):
if isinstance(y, (dok_matrix, lil_matrix)):
y = y.tocsr()
return (len(y.data) == 0 or np.unique(y.data).size == 1 and
(y.dtype.kind in 'biu' or # bool, int, uint
_is_integral_float(np.unique(y.data))))
else:
labels = np.unique(y)
return len(labels) < 3 and (y.dtype.kind in 'biu' or # bool, int, uint
_is_integral_float(labels))
def type_of_target(y):
"""Determine the type of data indicated by target `y`
Parameters
----------
y : array-like
Returns
-------
target_type : string
One of:
* 'continuous': `y` is an array-like of floats that are not all
integers, and is 1d or a column vector.
* 'continuous-multioutput': `y` is a 2d array of floats that are
not all integers, and both dimensions are of size > 1.
* 'binary': `y` contains <= 2 discrete values and is 1d or a column
vector.
* 'multiclass': `y` contains more than two discrete values, is not a
sequence of sequences, and is 1d or a column vector.
* 'multiclass-multioutput': `y` is a 2d array that contains more
than two discrete values, is not a sequence of sequences, and both
dimensions are of size > 1.
* 'multilabel-indicator': `y` is a label indicator matrix, an array
of two dimensions with at least two columns, and at most 2 unique
values.
* 'unknown': `y` is array-like but none of the above, such as a 3d
array, sequence of sequences, or an array of non-sequence objects.
Examples
--------
>>> import numpy as np
>>> type_of_target([0.1, 0.6])
'continuous'
>>> type_of_target([1, -1, -1, 1])
'binary'
>>> type_of_target(['a', 'b', 'a'])
'binary'
>>> type_of_target([1.0, 2.0])
'binary'
>>> type_of_target([1, 0, 2])
'multiclass'
>>> type_of_target([1.0, 0.0, 3.0])
'multiclass'
>>> type_of_target(['a', 'b', 'c'])
'multiclass'
>>> type_of_target(np.array([[1, 2], [3, 1]]))
'multiclass-multioutput'
>>> type_of_target([[1, 2]])
'multiclass-multioutput'
>>> type_of_target(np.array([[1.5, 2.0], [3.0, 1.6]]))
'continuous-multioutput'
>>> type_of_target(np.array([[0, 1], [1, 1]]))
'multilabel-indicator'
"""
valid = ((isinstance(y, (Sequence, spmatrix)) or hasattr(y, '__array__'))
and not isinstance(y, string_types))
if not valid:
raise ValueError('Expected array-like (array or non-string sequence), '
'got %r' % y)
if is_multilabel(y):
return 'multilabel-indicator'
try:
y = np.asarray(y)
except ValueError:
# Known to fail in numpy 1.3 for array of arrays
return 'unknown'
# The old sequence of sequences format
try:
if (not hasattr(y[0], '__array__') and isinstance(y[0], Sequence)
and not isinstance(y[0], string_types)):
raise ValueError('You appear to be using a legacy multi-label data'
' representation. Sequence of sequences are no'
' longer supported; use a binary array or sparse'
' matrix instead.')
except IndexError:
pass
# Invalid inputs
if y.ndim > 2 or (y.dtype == object and len(y) and
not isinstance(y.flat[0], string_types)):
return 'unknown' # [[[1, 2]]] or [obj_1] and not ["label_1"]
if y.ndim == 2 and y.shape[1] == 0:
return 'unknown' # [[]]
if y.ndim == 2 and y.shape[1] > 1:
suffix = "-multioutput" # [[1, 2], [1, 2]]
else:
suffix = "" # [1, 2, 3] or [[1], [2], [3]]
# check float and contains non-integer float values
if y.dtype.kind == 'f' and np.any(y != y.astype(int)):
# [.1, .2, 3] or [[.1, .2, 3]] or [[1., .2]] and not [1., 2., 3.]
return 'continuous' + suffix
if (len(np.unique(y)) > 2) or (y.ndim >= 2 and len(y[0]) > 1):
return 'multiclass' + suffix # [1, 2, 3] or [[1., 2., 3]] or [[1, 2]]
else:
return 'binary' # [1, 2] or [["a"], ["b"]]
def _check_partial_fit_first_call(clf, classes=None):
"""Private helper function for factorizing common classes param logic
Estimators that implement the ``partial_fit`` API need to be provided with
the list of possible classes at the first call to partial_fit.
Subsequent calls to partial_fit should check that ``classes`` is still
consistent with a previous value of ``clf.classes_`` when provided.
This function returns True if it detects that this was the first call to
``partial_fit`` on ``clf``. In that case the ``classes_`` attribute is also
set on ``clf``.
"""
if getattr(clf, 'classes_', None) is None and classes is None:
raise ValueError("classes must be passed on the first call "
"to partial_fit.")
elif classes is not None:
if getattr(clf, 'classes_', None) is not None:
if not array_equal(clf.classes_, unique_labels(classes)):
raise ValueError(
"`classes=%r` is not the same as on last call "
"to partial_fit, was: %r" % (classes, clf.classes_))
else:
# This is the first call to partial_fit
clf.classes_ = unique_labels(classes)
return True
# classes is None and clf.classes_ has already previously been set:
# nothing to do
return False
def class_distribution(y, sample_weight=None):
"""Compute class priors from multioutput-multiclass target data
Parameters
----------
y : array like or sparse matrix of size (n_samples, n_outputs)
The labels for each example.
sample_weight : array-like of shape = (n_samples,), optional
Sample weights.
Returns
-------
classes : list of size n_outputs of arrays of size (n_classes,)
List of classes for each column.
n_classes : list of integrs of size n_outputs
Number of classes in each column
class_prior : list of size n_outputs of arrays of size (n_classes,)
Class distribution of each column.
"""
classes = []
n_classes = []
class_prior = []
n_samples, n_outputs = y.shape
if issparse(y):
y = y.tocsc()
y_nnz = np.diff(y.indptr)
for k in range(n_outputs):
col_nonzero = y.indices[y.indptr[k]:y.indptr[k + 1]]
# separate sample weights for zero and non-zero elements
if sample_weight is not None:
nz_samp_weight = np.asarray(sample_weight)[col_nonzero]
zeros_samp_weight_sum = (np.sum(sample_weight) -
np.sum(nz_samp_weight))
else:
nz_samp_weight = None
zeros_samp_weight_sum = y.shape[0] - y_nnz[k]
classes_k, y_k = np.unique(y.data[y.indptr[k]:y.indptr[k + 1]],
return_inverse=True)
class_prior_k = bincount(y_k, weights=nz_samp_weight)
# An explicit zero was found, combine its wieght with the wieght
# of the implicit zeros
if 0 in classes_k:
class_prior_k[classes_k == 0] += zeros_samp_weight_sum
# If an there is an implict zero and it is not in classes and
# class_prior, make an entry for it
if 0 not in classes_k and y_nnz[k] < y.shape[0]:
classes_k = np.insert(classes_k, 0, 0)
class_prior_k = np.insert(class_prior_k, 0,
zeros_samp_weight_sum)
classes.append(classes_k)
n_classes.append(classes_k.shape[0])
class_prior.append(class_prior_k / class_prior_k.sum())
else:
for k in range(n_outputs):
classes_k, y_k = np.unique(y[:, k], return_inverse=True)
classes.append(classes_k)
n_classes.append(classes_k.shape[0])
class_prior_k = bincount(y_k, weights=sample_weight)
class_prior.append(class_prior_k / class_prior_k.sum())
return (classes, n_classes, class_prior)
| bsd-3-clause |
kylerbrown/scikit-learn | sklearn/covariance/tests/test_graph_lasso.py | 272 | 5245 | """ Test the graph_lasso module.
"""
import sys
import numpy as np
from scipy import linalg
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_array_less
from sklearn.covariance import (graph_lasso, GraphLasso, GraphLassoCV,
empirical_covariance)
from sklearn.datasets.samples_generator import make_sparse_spd_matrix
from sklearn.externals.six.moves import StringIO
from sklearn.utils import check_random_state
from sklearn import datasets
def test_graph_lasso(random_state=0):
# Sample data from a sparse multivariate normal
dim = 20
n_samples = 100
random_state = check_random_state(random_state)
prec = make_sparse_spd_matrix(dim, alpha=.95,
random_state=random_state)
cov = linalg.inv(prec)
X = random_state.multivariate_normal(np.zeros(dim), cov, size=n_samples)
emp_cov = empirical_covariance(X)
for alpha in (0., .1, .25):
covs = dict()
icovs = dict()
for method in ('cd', 'lars'):
cov_, icov_, costs = graph_lasso(emp_cov, alpha=alpha, mode=method,
return_costs=True)
covs[method] = cov_
icovs[method] = icov_
costs, dual_gap = np.array(costs).T
# Check that the costs always decrease (doesn't hold if alpha == 0)
if not alpha == 0:
assert_array_less(np.diff(costs), 0)
# Check that the 2 approaches give similar results
assert_array_almost_equal(covs['cd'], covs['lars'], decimal=4)
assert_array_almost_equal(icovs['cd'], icovs['lars'], decimal=4)
# Smoke test the estimator
model = GraphLasso(alpha=.25).fit(X)
model.score(X)
assert_array_almost_equal(model.covariance_, covs['cd'], decimal=4)
assert_array_almost_equal(model.covariance_, covs['lars'], decimal=4)
# For a centered matrix, assume_centered could be chosen True or False
# Check that this returns indeed the same result for centered data
Z = X - X.mean(0)
precs = list()
for assume_centered in (False, True):
prec_ = GraphLasso(assume_centered=assume_centered).fit(Z).precision_
precs.append(prec_)
assert_array_almost_equal(precs[0], precs[1])
def test_graph_lasso_iris():
# Hard-coded solution from R glasso package for alpha=1.0
# The iris datasets in R and sklearn do not match in a few places, these
# values are for the sklearn version
cov_R = np.array([
[0.68112222, 0.0, 0.2651911, 0.02467558],
[0.00, 0.1867507, 0.0, 0.00],
[0.26519111, 0.0, 3.0924249, 0.28774489],
[0.02467558, 0.0, 0.2877449, 0.57853156]
])
icov_R = np.array([
[1.5188780, 0.0, -0.1302515, 0.0],
[0.0, 5.354733, 0.0, 0.0],
[-0.1302515, 0.0, 0.3502322, -0.1686399],
[0.0, 0.0, -0.1686399, 1.8123908]
])
X = datasets.load_iris().data
emp_cov = empirical_covariance(X)
for method in ('cd', 'lars'):
cov, icov = graph_lasso(emp_cov, alpha=1.0, return_costs=False,
mode=method)
assert_array_almost_equal(cov, cov_R)
assert_array_almost_equal(icov, icov_R)
def test_graph_lasso_iris_singular():
# Small subset of rows to test the rank-deficient case
# Need to choose samples such that none of the variances are zero
indices = np.arange(10, 13)
# Hard-coded solution from R glasso package for alpha=0.01
cov_R = np.array([
[0.08, 0.056666662595, 0.00229729713223, 0.00153153142149],
[0.056666662595, 0.082222222222, 0.00333333333333, 0.00222222222222],
[0.002297297132, 0.003333333333, 0.00666666666667, 0.00009009009009],
[0.001531531421, 0.002222222222, 0.00009009009009, 0.00222222222222]
])
icov_R = np.array([
[24.42244057, -16.831679593, 0.0, 0.0],
[-16.83168201, 24.351841681, -6.206896552, -12.5],
[0.0, -6.206896171, 153.103448276, 0.0],
[0.0, -12.499999143, 0.0, 462.5]
])
X = datasets.load_iris().data[indices, :]
emp_cov = empirical_covariance(X)
for method in ('cd', 'lars'):
cov, icov = graph_lasso(emp_cov, alpha=0.01, return_costs=False,
mode=method)
assert_array_almost_equal(cov, cov_R, decimal=5)
assert_array_almost_equal(icov, icov_R, decimal=5)
def test_graph_lasso_cv(random_state=1):
# Sample data from a sparse multivariate normal
dim = 5
n_samples = 6
random_state = check_random_state(random_state)
prec = make_sparse_spd_matrix(dim, alpha=.96,
random_state=random_state)
cov = linalg.inv(prec)
X = random_state.multivariate_normal(np.zeros(dim), cov, size=n_samples)
# Capture stdout, to smoke test the verbose mode
orig_stdout = sys.stdout
try:
sys.stdout = StringIO()
# We need verbose very high so that Parallel prints on stdout
GraphLassoCV(verbose=100, alphas=5, tol=1e-1).fit(X)
finally:
sys.stdout = orig_stdout
# Smoke test with specified alphas
GraphLassoCV(alphas=[0.8, 0.5], tol=1e-1, n_jobs=1).fit(X)
| bsd-3-clause |
DSLituiev/scikit-learn | doc/tutorial/text_analytics/solutions/exercise_02_sentiment.py | 46 | 2798 | """Build a sentiment analysis / polarity model
Sentiment analysis can be casted as a binary text classification problem,
that is fitting a linear classifier on features extracted from the text
of the user messages so as to guess wether the opinion of the author is
positive or negative.
In this examples we will use a movie review dataset.
"""
# Author: Olivier Grisel <olivier.grisel@ensta.org>
# License: Simplified BSD
import sys
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.svm import LinearSVC
from sklearn.pipeline import Pipeline
from sklearn.model_selection import GridSearchCV
from sklearn.datasets import load_files
from sklearn.model_selection import train_test_split
from sklearn import metrics
if __name__ == "__main__":
# NOTE: we put the following in a 'if __name__ == "__main__"' protected
# block to be able to use a multi-core grid search that also works under
# Windows, see: http://docs.python.org/library/multiprocessing.html#windows
# The multiprocessing module is used as the backend of joblib.Parallel
# that is used when n_jobs != 1 in GridSearchCV
# the training data folder must be passed as first argument
movie_reviews_data_folder = sys.argv[1]
dataset = load_files(movie_reviews_data_folder, shuffle=False)
print("n_samples: %d" % len(dataset.data))
# split the dataset in training and test set:
docs_train, docs_test, y_train, y_test = train_test_split(
dataset.data, dataset.target, test_size=0.25, random_state=None)
# TASK: Build a vectorizer / classifier pipeline that filters out tokens
# that are too rare or too frequent
pipeline = Pipeline([
('vect', TfidfVectorizer(min_df=3, max_df=0.95)),
('clf', LinearSVC(C=1000)),
])
# TASK: Build a grid search to find out whether unigrams or bigrams are
# more useful.
# Fit the pipeline on the training set using grid search for the parameters
parameters = {
'vect__ngram_range': [(1, 1), (1, 2)],
}
grid_search = GridSearchCV(pipeline, parameters, n_jobs=-1)
grid_search.fit(docs_train, y_train)
# TASK: print the cross-validated scores for the each parameters set
# explored by the grid search
print(grid_search.grid_scores_)
# TASK: Predict the outcome on the testing set and store it in a variable
# named y_predicted
y_predicted = grid_search.predict(docs_test)
# Print the classification report
print(metrics.classification_report(y_test, y_predicted,
target_names=dataset.target_names))
# Print and plot the confusion matrix
cm = metrics.confusion_matrix(y_test, y_predicted)
print(cm)
# import matplotlib.pyplot as plt
# plt.matshow(cm)
# plt.show()
| bsd-3-clause |
466152112/scikit-learn | examples/applications/svm_gui.py | 287 | 11161 | """
==========
Libsvm GUI
==========
A simple graphical frontend for Libsvm mainly intended for didactic
purposes. You can create data points by point and click and visualize
the decision region induced by different kernels and parameter settings.
To create positive examples click the left mouse button; to create
negative examples click the right button.
If all examples are from the same class, it uses a one-class SVM.
"""
from __future__ import division, print_function
print(__doc__)
# Author: Peter Prettenhoer <peter.prettenhofer@gmail.com>
#
# License: BSD 3 clause
import matplotlib
matplotlib.use('TkAgg')
from matplotlib.backends.backend_tkagg import FigureCanvasTkAgg
from matplotlib.backends.backend_tkagg import NavigationToolbar2TkAgg
from matplotlib.figure import Figure
from matplotlib.contour import ContourSet
import Tkinter as Tk
import sys
import numpy as np
from sklearn import svm
from sklearn.datasets import dump_svmlight_file
from sklearn.externals.six.moves import xrange
y_min, y_max = -50, 50
x_min, x_max = -50, 50
class Model(object):
"""The Model which hold the data. It implements the
observable in the observer pattern and notifies the
registered observers on change event.
"""
def __init__(self):
self.observers = []
self.surface = None
self.data = []
self.cls = None
self.surface_type = 0
def changed(self, event):
"""Notify the observers. """
for observer in self.observers:
observer.update(event, self)
def add_observer(self, observer):
"""Register an observer. """
self.observers.append(observer)
def set_surface(self, surface):
self.surface = surface
def dump_svmlight_file(self, file):
data = np.array(self.data)
X = data[:, 0:2]
y = data[:, 2]
dump_svmlight_file(X, y, file)
class Controller(object):
def __init__(self, model):
self.model = model
self.kernel = Tk.IntVar()
self.surface_type = Tk.IntVar()
# Whether or not a model has been fitted
self.fitted = False
def fit(self):
print("fit the model")
train = np.array(self.model.data)
X = train[:, 0:2]
y = train[:, 2]
C = float(self.complexity.get())
gamma = float(self.gamma.get())
coef0 = float(self.coef0.get())
degree = int(self.degree.get())
kernel_map = {0: "linear", 1: "rbf", 2: "poly"}
if len(np.unique(y)) == 1:
clf = svm.OneClassSVM(kernel=kernel_map[self.kernel.get()],
gamma=gamma, coef0=coef0, degree=degree)
clf.fit(X)
else:
clf = svm.SVC(kernel=kernel_map[self.kernel.get()], C=C,
gamma=gamma, coef0=coef0, degree=degree)
clf.fit(X, y)
if hasattr(clf, 'score'):
print("Accuracy:", clf.score(X, y) * 100)
X1, X2, Z = self.decision_surface(clf)
self.model.clf = clf
self.model.set_surface((X1, X2, Z))
self.model.surface_type = self.surface_type.get()
self.fitted = True
self.model.changed("surface")
def decision_surface(self, cls):
delta = 1
x = np.arange(x_min, x_max + delta, delta)
y = np.arange(y_min, y_max + delta, delta)
X1, X2 = np.meshgrid(x, y)
Z = cls.decision_function(np.c_[X1.ravel(), X2.ravel()])
Z = Z.reshape(X1.shape)
return X1, X2, Z
def clear_data(self):
self.model.data = []
self.fitted = False
self.model.changed("clear")
def add_example(self, x, y, label):
self.model.data.append((x, y, label))
self.model.changed("example_added")
# update decision surface if already fitted.
self.refit()
def refit(self):
"""Refit the model if already fitted. """
if self.fitted:
self.fit()
class View(object):
"""Test docstring. """
def __init__(self, root, controller):
f = Figure()
ax = f.add_subplot(111)
ax.set_xticks([])
ax.set_yticks([])
ax.set_xlim((x_min, x_max))
ax.set_ylim((y_min, y_max))
canvas = FigureCanvasTkAgg(f, master=root)
canvas.show()
canvas.get_tk_widget().pack(side=Tk.TOP, fill=Tk.BOTH, expand=1)
canvas._tkcanvas.pack(side=Tk.TOP, fill=Tk.BOTH, expand=1)
canvas.mpl_connect('button_press_event', self.onclick)
toolbar = NavigationToolbar2TkAgg(canvas, root)
toolbar.update()
self.controllbar = ControllBar(root, controller)
self.f = f
self.ax = ax
self.canvas = canvas
self.controller = controller
self.contours = []
self.c_labels = None
self.plot_kernels()
def plot_kernels(self):
self.ax.text(-50, -60, "Linear: $u^T v$")
self.ax.text(-20, -60, "RBF: $\exp (-\gamma \| u-v \|^2)$")
self.ax.text(10, -60, "Poly: $(\gamma \, u^T v + r)^d$")
def onclick(self, event):
if event.xdata and event.ydata:
if event.button == 1:
self.controller.add_example(event.xdata, event.ydata, 1)
elif event.button == 3:
self.controller.add_example(event.xdata, event.ydata, -1)
def update_example(self, model, idx):
x, y, l = model.data[idx]
if l == 1:
color = 'w'
elif l == -1:
color = 'k'
self.ax.plot([x], [y], "%so" % color, scalex=0.0, scaley=0.0)
def update(self, event, model):
if event == "examples_loaded":
for i in xrange(len(model.data)):
self.update_example(model, i)
if event == "example_added":
self.update_example(model, -1)
if event == "clear":
self.ax.clear()
self.ax.set_xticks([])
self.ax.set_yticks([])
self.contours = []
self.c_labels = None
self.plot_kernels()
if event == "surface":
self.remove_surface()
self.plot_support_vectors(model.clf.support_vectors_)
self.plot_decision_surface(model.surface, model.surface_type)
self.canvas.draw()
def remove_surface(self):
"""Remove old decision surface."""
if len(self.contours) > 0:
for contour in self.contours:
if isinstance(contour, ContourSet):
for lineset in contour.collections:
lineset.remove()
else:
contour.remove()
self.contours = []
def plot_support_vectors(self, support_vectors):
"""Plot the support vectors by placing circles over the
corresponding data points and adds the circle collection
to the contours list."""
cs = self.ax.scatter(support_vectors[:, 0], support_vectors[:, 1],
s=80, edgecolors="k", facecolors="none")
self.contours.append(cs)
def plot_decision_surface(self, surface, type):
X1, X2, Z = surface
if type == 0:
levels = [-1.0, 0.0, 1.0]
linestyles = ['dashed', 'solid', 'dashed']
colors = 'k'
self.contours.append(self.ax.contour(X1, X2, Z, levels,
colors=colors,
linestyles=linestyles))
elif type == 1:
self.contours.append(self.ax.contourf(X1, X2, Z, 10,
cmap=matplotlib.cm.bone,
origin='lower', alpha=0.85))
self.contours.append(self.ax.contour(X1, X2, Z, [0.0], colors='k',
linestyles=['solid']))
else:
raise ValueError("surface type unknown")
class ControllBar(object):
def __init__(self, root, controller):
fm = Tk.Frame(root)
kernel_group = Tk.Frame(fm)
Tk.Radiobutton(kernel_group, text="Linear", variable=controller.kernel,
value=0, command=controller.refit).pack(anchor=Tk.W)
Tk.Radiobutton(kernel_group, text="RBF", variable=controller.kernel,
value=1, command=controller.refit).pack(anchor=Tk.W)
Tk.Radiobutton(kernel_group, text="Poly", variable=controller.kernel,
value=2, command=controller.refit).pack(anchor=Tk.W)
kernel_group.pack(side=Tk.LEFT)
valbox = Tk.Frame(fm)
controller.complexity = Tk.StringVar()
controller.complexity.set("1.0")
c = Tk.Frame(valbox)
Tk.Label(c, text="C:", anchor="e", width=7).pack(side=Tk.LEFT)
Tk.Entry(c, width=6, textvariable=controller.complexity).pack(
side=Tk.LEFT)
c.pack()
controller.gamma = Tk.StringVar()
controller.gamma.set("0.01")
g = Tk.Frame(valbox)
Tk.Label(g, text="gamma:", anchor="e", width=7).pack(side=Tk.LEFT)
Tk.Entry(g, width=6, textvariable=controller.gamma).pack(side=Tk.LEFT)
g.pack()
controller.degree = Tk.StringVar()
controller.degree.set("3")
d = Tk.Frame(valbox)
Tk.Label(d, text="degree:", anchor="e", width=7).pack(side=Tk.LEFT)
Tk.Entry(d, width=6, textvariable=controller.degree).pack(side=Tk.LEFT)
d.pack()
controller.coef0 = Tk.StringVar()
controller.coef0.set("0")
r = Tk.Frame(valbox)
Tk.Label(r, text="coef0:", anchor="e", width=7).pack(side=Tk.LEFT)
Tk.Entry(r, width=6, textvariable=controller.coef0).pack(side=Tk.LEFT)
r.pack()
valbox.pack(side=Tk.LEFT)
cmap_group = Tk.Frame(fm)
Tk.Radiobutton(cmap_group, text="Hyperplanes",
variable=controller.surface_type, value=0,
command=controller.refit).pack(anchor=Tk.W)
Tk.Radiobutton(cmap_group, text="Surface",
variable=controller.surface_type, value=1,
command=controller.refit).pack(anchor=Tk.W)
cmap_group.pack(side=Tk.LEFT)
train_button = Tk.Button(fm, text='Fit', width=5,
command=controller.fit)
train_button.pack()
fm.pack(side=Tk.LEFT)
Tk.Button(fm, text='Clear', width=5,
command=controller.clear_data).pack(side=Tk.LEFT)
def get_parser():
from optparse import OptionParser
op = OptionParser()
op.add_option("--output",
action="store", type="str", dest="output",
help="Path where to dump data.")
return op
def main(argv):
op = get_parser()
opts, args = op.parse_args(argv[1:])
root = Tk.Tk()
model = Model()
controller = Controller(model)
root.wm_title("Scikit-learn Libsvm GUI")
view = View(root, controller)
model.add_observer(view)
Tk.mainloop()
if opts.output:
model.dump_svmlight_file(opts.output)
if __name__ == "__main__":
main(sys.argv)
| bsd-3-clause |
liberatorqjw/scikit-learn | sklearn/__check_build/__init__.py | 30 | 1669 | """ Module to give helpful messages to the user that did not
compile the scikit properly.
"""
import os
INPLACE_MSG = """
It appears that you are importing a local scikit-learn source tree. For
this, you need to have an inplace install. Maybe you are in the source
directory and you need to try from another location."""
STANDARD_MSG = """
If you have used an installer, please check that it is suited for your
Python version, your operating system and your platform."""
def raise_build_error(e):
# Raise a comprehensible error and list the contents of the
# directory to help debugging on the mailing list.
local_dir = os.path.split(__file__)[0]
msg = STANDARD_MSG
if local_dir == "sklearn/check_build":
# Picking up the local install: this will work only if the
# install is an 'inplace build'
msg = INPLACE_MSG
dir_content = list()
for i, filename in enumerate(os.listdir(local_dir)):
if ((i + 1) % 3):
dir_content.append(filename.ljust(26))
else:
dir_content.append(filename + '\n')
raise ImportError("""%s
___________________________________________________________________________
Contents of %s:
%s
___________________________________________________________________________
It seems that scikit-learn has not been built correctly.
If you have installed scikit-learn from source, please do not forget
to build the package before using it: run `python setup.py install` or
`make` in the source directory.
%s""" % (e, local_dir, ''.join(dir_content).strip(), msg))
try:
from ._check_build import check_build
except ImportError as e:
raise_build_error(e)
| bsd-3-clause |
marcsans/cnn-physics-perception | phy/lib/python2.7/site-packages/matplotlib/tri/triinterpolate.py | 8 | 66410 | """
Interpolation inside triangular grids.
"""
from __future__ import (absolute_import, division, print_function,
unicode_literals)
from matplotlib.externals import six
from matplotlib.externals.six.moves import xrange
from matplotlib.tri import Triangulation
from matplotlib.tri.trifinder import TriFinder
from matplotlib.tri.tritools import TriAnalyzer
import numpy as np
import warnings
__all__ = ('TriInterpolator', 'LinearTriInterpolator', 'CubicTriInterpolator')
class TriInterpolator(object):
"""
Abstract base class for classes used to perform interpolation on
triangular grids.
Derived classes implement the following methods:
- ``__call__(x, y)`` ,
where x, y are array_like point coordinates of the same shape, and
that returns a masked array of the same shape containing the
interpolated z-values.
- ``gradient(x, y)`` ,
where x, y are array_like point coordinates of the same
shape, and that returns a list of 2 masked arrays of the same shape
containing the 2 derivatives of the interpolator (derivatives of
interpolated z values with respect to x and y).
"""
def __init__(self, triangulation, z, trifinder=None):
if not isinstance(triangulation, Triangulation):
raise ValueError("Expected a Triangulation object")
self._triangulation = triangulation
self._z = np.asarray(z)
if self._z.shape != self._triangulation.x.shape:
raise ValueError("z array must have same length as triangulation x"
" and y arrays")
if trifinder is not None and not isinstance(trifinder, TriFinder):
raise ValueError("Expected a TriFinder object")
self._trifinder = trifinder or self._triangulation.get_trifinder()
# Default scaling factors : 1.0 (= no scaling)
# Scaling may be used for interpolations for which the order of
# magnitude of x, y has an impact on the interpolant definition.
# Please refer to :meth:`_interpolate_multikeys` for details.
self._unit_x = 1.0
self._unit_y = 1.0
# Default triangle renumbering: None (= no renumbering)
# Renumbering may be used to avoid unecessary computations
# if complex calculations are done inside the Interpolator.
# Please refer to :meth:`_interpolate_multikeys` for details.
self._tri_renum = None
# __call__ and gradient docstrings are shared by all subclasses
# (except, if needed, relevant additions).
# However these methods are only implemented in subclasses to avoid
# confusion in the documentation.
docstring__call__ = """
Returns a masked array containing interpolated values at the specified
x,y points.
Parameters
----------
x, y : array-like
x and y coordinates of the same shape and any number of
dimensions.
Returns
-------
z : np.ma.array
Masked array of the same shape as *x* and *y* ; values
corresponding to (*x*, *y*) points outside of the triangulation
are masked out.
"""
docstringgradient = """
Returns a list of 2 masked arrays containing interpolated derivatives
at the specified x,y points.
Parameters
----------
x, y : array-like
x and y coordinates of the same shape and any number of
dimensions.
Returns
-------
dzdx, dzdy : np.ma.array
2 masked arrays of the same shape as *x* and *y* ; values
corresponding to (x,y) points outside of the triangulation
are masked out.
The first returned array contains the values of
:math:`\\frac{\\partial z}{\\partial x}` and the second those of
:math:`\\frac{\\partial z}{\\partial y}`.
"""
def _interpolate_multikeys(self, x, y, tri_index=None,
return_keys=('z',)):
"""
Versatile (private) method defined for all TriInterpolators.
:meth:`_interpolate_multikeys` is a wrapper around method
:meth:`_interpolate_single_key` (to be defined in the child
subclasses).
:meth:`_interpolate_single_key actually performs the interpolation,
but only for 1-dimensional inputs and at valid locations (inside
unmasked triangles of the triangulation).
The purpose of :meth:`_interpolate_multikeys` is to implement the
following common tasks needed in all subclasses implementations:
- calculation of containing triangles
- dealing with more than one interpolation request at the same
location (e.g., if the 2 derivatives are requested, it is
unnecessary to compute the containing triangles twice)
- scaling according to self._unit_x, self._unit_y
- dealing with points outside of the grid (with fill value np.nan)
- dealing with multi-dimensionnal *x*, *y* arrays: flattening for
:meth:`_interpolate_params` call and final reshaping.
(Note that np.vectorize could do most of those things very well for
you, but it does it by function evaluations over successive tuples of
the input arrays. Therefore, this tends to be more time consuming than
using optimized numpy functions - e.g., np.dot - which can be used
easily on the flattened inputs, in the child-subclass methods
:meth:`_interpolate_single_key`.)
It is guaranteed that the calls to :meth:`_interpolate_single_key`
will be done with flattened (1-d) array_like input parameters `x`, `y`
and with flattened, valid `tri_index` arrays (no -1 index allowed).
Parameters
----------
x, y : array_like
x and y coordinates indicating where interpolated values are
requested.
tri_index : integer array_like, optional
Array of the containing triangle indices, same shape as
*x* and *y*. Defaults to None. If None, these indices
will be computed by a TriFinder instance.
(Note: For point outside the grid, tri_index[ipt] shall be -1).
return_keys : tuple of keys from {'z', 'dzdx', 'dzdy'}
Defines the interpolation arrays to return, and in which order.
Returns
-------
ret : list of arrays
Each array-like contains the expected interpolated values in the
order defined by *return_keys* parameter.
"""
# Flattening and rescaling inputs arrays x, y
# (initial shape is stored for output)
x = np.asarray(x, dtype=np.float64)
y = np.asarray(y, dtype=np.float64)
sh_ret = x.shape
if (x.shape != y.shape):
raise ValueError("x and y shall have same shapes."
" Given: {0} and {1}".format(x.shape, y.shape))
x = np.ravel(x)
y = np.ravel(y)
x_scaled = x/self._unit_x
y_scaled = y/self._unit_y
size_ret = np.size(x_scaled)
# Computes & ravels the element indexes, extract the valid ones.
if tri_index is None:
tri_index = self._trifinder(x, y)
else:
if (tri_index.shape != sh_ret):
raise ValueError(
"tri_index array is provided and shall"
" have same shape as x and y. Given: "
"{0} and {1}".format(tri_index.shape, sh_ret))
tri_index = np.ravel(tri_index)
mask_in = (tri_index != -1)
if self._tri_renum is None:
valid_tri_index = tri_index[mask_in]
else:
valid_tri_index = self._tri_renum[tri_index[mask_in]]
valid_x = x_scaled[mask_in]
valid_y = y_scaled[mask_in]
ret = []
for return_key in return_keys:
# Find the return index associated with the key.
try:
return_index = {'z': 0, 'dzdx': 1, 'dzdy': 2}[return_key]
except KeyError:
raise ValueError("return_keys items shall take values in"
" {'z', 'dzdx', 'dzdy'}")
# Sets the scale factor for f & df components
scale = [1., 1./self._unit_x, 1./self._unit_y][return_index]
# Computes the interpolation
ret_loc = np.empty(size_ret, dtype=np.float64)
ret_loc[~mask_in] = np.nan
ret_loc[mask_in] = self._interpolate_single_key(
return_key, valid_tri_index, valid_x, valid_y) * scale
ret += [np.ma.masked_invalid(ret_loc.reshape(sh_ret), copy=False)]
return ret
def _interpolate_single_key(self, return_key, tri_index, x, y):
"""
Performs the interpolation at points belonging to the triangulation
(inside an unmasked triangles).
Parameters
----------
return_index : string key from {'z', 'dzdx', 'dzdy'}
Identifies the requested values (z or its derivatives)
tri_index : 1d integer array
Valid triangle index (-1 prohibited)
x, y : 1d arrays, same shape as `tri_index`
Valid locations where interpolation is requested.
Returns
-------
ret : 1-d array
Returned array of the same size as *tri_index*
"""
raise NotImplementedError("TriInterpolator subclasses" +
"should implement _interpolate_single_key!")
class LinearTriInterpolator(TriInterpolator):
"""
A LinearTriInterpolator performs linear interpolation on a triangular grid.
Each triangle is represented by a plane so that an interpolated value at
point (x,y) lies on the plane of the triangle containing (x,y).
Interpolated values are therefore continuous across the triangulation, but
their first derivatives are discontinuous at edges between triangles.
Parameters
----------
triangulation : :class:`~matplotlib.tri.Triangulation` object
The triangulation to interpolate over.
z : array_like of shape (npoints,)
Array of values, defined at grid points, to interpolate between.
trifinder : :class:`~matplotlib.tri.TriFinder` object, optional
If this is not specified, the Triangulation's default TriFinder will
be used by calling
:func:`matplotlib.tri.Triangulation.get_trifinder`.
Methods
-------
`__call__` (x, y) : Returns interpolated values at x,y points
`gradient` (x, y) : Returns interpolated derivatives at x,y points
"""
def __init__(self, triangulation, z, trifinder=None):
TriInterpolator.__init__(self, triangulation, z, trifinder)
# Store plane coefficients for fast interpolation calculations.
self._plane_coefficients = \
self._triangulation.calculate_plane_coefficients(self._z)
def __call__(self, x, y):
return self._interpolate_multikeys(x, y, tri_index=None,
return_keys=('z',))[0]
__call__.__doc__ = TriInterpolator.docstring__call__
def gradient(self, x, y):
return self._interpolate_multikeys(x, y, tri_index=None,
return_keys=('dzdx', 'dzdy'))
gradient.__doc__ = TriInterpolator.docstringgradient
def _interpolate_single_key(self, return_key, tri_index, x, y):
if return_key == 'z':
return (self._plane_coefficients[tri_index, 0]*x +
self._plane_coefficients[tri_index, 1]*y +
self._plane_coefficients[tri_index, 2])
elif return_key == 'dzdx':
return self._plane_coefficients[tri_index, 0]
elif return_key == 'dzdy':
return self._plane_coefficients[tri_index, 1]
else:
raise ValueError("Invalid return_key: " + return_key)
class CubicTriInterpolator(TriInterpolator):
"""
A CubicTriInterpolator performs cubic interpolation on triangular grids.
In one-dimension - on a segment - a cubic interpolating function is
defined by the values of the function and its derivative at both ends.
This is almost the same in 2-d inside a triangle, except that the values
of the function and its 2 derivatives have to be defined at each triangle
node.
The CubicTriInterpolator takes the value of the function at each node -
provided by the user - and internally computes the value of the
derivatives, resulting in a smooth interpolation.
(As a special feature, the user can also impose the value of the
derivatives at each node, but this is not supposed to be the common
usage.)
Parameters
----------
triangulation : :class:`~matplotlib.tri.Triangulation` object
The triangulation to interpolate over.
z : array_like of shape (npoints,)
Array of values, defined at grid points, to interpolate between.
kind : {'min_E', 'geom', 'user'}, optional
Choice of the smoothing algorithm, in order to compute
the interpolant derivatives (defaults to 'min_E'):
- if 'min_E': (default) The derivatives at each node is computed
to minimize a bending energy.
- if 'geom': The derivatives at each node is computed as a
weighted average of relevant triangle normals. To be used for
speed optimization (large grids).
- if 'user': The user provides the argument `dz`, no computation
is hence needed.
trifinder : :class:`~matplotlib.tri.TriFinder` object, optional
If not specified, the Triangulation's default TriFinder will
be used by calling
:func:`matplotlib.tri.Triangulation.get_trifinder`.
dz : tuple of array_likes (dzdx, dzdy), optional
Used only if *kind* ='user'. In this case *dz* must be provided as
(dzdx, dzdy) where dzdx, dzdy are arrays of the same shape as *z* and
are the interpolant first derivatives at the *triangulation* points.
Methods
-------
`__call__` (x, y) : Returns interpolated values at x,y points
`gradient` (x, y) : Returns interpolated derivatives at x,y points
Notes
-----
This note is a bit technical and details the way a
:class:`~matplotlib.tri.CubicTriInterpolator` computes a cubic
interpolation.
The interpolation is based on a Clough-Tocher subdivision scheme of
the *triangulation* mesh (to make it clearer, each triangle of the
grid will be divided in 3 child-triangles, and on each child triangle
the interpolated function is a cubic polynomial of the 2 coordinates).
This technique originates from FEM (Finite Element Method) analysis;
the element used is a reduced Hsieh-Clough-Tocher (HCT)
element. Its shape functions are described in [1]_.
The assembled function is guaranteed to be C1-smooth, i.e. it is
continuous and its first derivatives are also continuous (this
is easy to show inside the triangles but is also true when crossing the
edges).
In the default case (*kind* ='min_E'), the interpolant minimizes a
curvature energy on the functional space generated by the HCT element
shape functions - with imposed values but arbitrary derivatives at each
node. The minimized functional is the integral of the so-called total
curvature (implementation based on an algorithm from [2]_ - PCG sparse
solver):
.. math::
E(z) = \\ \\frac{1}{2} \\int_{\\Omega} \\left(
\\left( \\frac{\\partial^2{z}}{\\partial{x}^2} \\right)^2 +
\\left( \\frac{\\partial^2{z}}{\\partial{y}^2} \\right)^2 +
2\\left( \\frac{\\partial^2{z}}{\\partial{y}\\partial{x}}
\\right)^2 \\right) dx\\,dy
If the case *kind* ='geom' is chosen by the user, a simple geometric
approximation is used (weighted average of the triangle normal
vectors), which could improve speed on very large grids.
References
----------
.. [1] Michel Bernadou, Kamal Hassan, "Basis functions for general
Hsieh-Clough-Tocher triangles, complete or reduced.",
International Journal for Numerical Methods in Engineering,
17(5):784 - 789. 2.01.
.. [2] C.T. Kelley, "Iterative Methods for Optimization".
"""
def __init__(self, triangulation, z, kind='min_E', trifinder=None,
dz=None):
TriInterpolator.__init__(self, triangulation, z, trifinder)
# Loads the underlying c++ _triangulation.
# (During loading, reordering of triangulation._triangles may occur so
# that all final triangles are now anti-clockwise)
self._triangulation.get_cpp_triangulation()
# To build the stiffness matrix and avoid zero-energy spurious modes
# we will only store internally the valid (unmasked) triangles and
# the necessary (used) points coordinates.
# 2 renumbering tables need to be computed and stored:
# - a triangle renum table in order to translate the result from a
# TriFinder instance into the internal stored triangle number.
# - a node renum table to overwrite the self._z values into the new
# (used) node numbering.
tri_analyzer = TriAnalyzer(self._triangulation)
(compressed_triangles, compressed_x, compressed_y, tri_renum,
node_renum) = tri_analyzer._get_compressed_triangulation(True, True)
self._triangles = compressed_triangles
self._tri_renum = tri_renum
# Taking into account the node renumbering in self._z:
node_mask = (node_renum == -1)
self._z[node_renum[~node_mask]] = self._z
self._z = self._z[~node_mask]
# Computing scale factors
self._unit_x = np.max(compressed_x) - np.min(compressed_x)
self._unit_y = np.max(compressed_y) - np.min(compressed_y)
self._pts = np.vstack((compressed_x/float(self._unit_x),
compressed_y/float(self._unit_y))).T
# Computing triangle points
self._tris_pts = self._pts[self._triangles]
# Computing eccentricities
self._eccs = self._compute_tri_eccentricities(self._tris_pts)
# Computing dof estimations for HCT triangle shape function
self._dof = self._compute_dof(kind, dz=dz)
# Loading HCT element
self._ReferenceElement = _ReducedHCT_Element()
def __call__(self, x, y):
return self._interpolate_multikeys(x, y, tri_index=None,
return_keys=('z',))[0]
__call__.__doc__ = TriInterpolator.docstring__call__
def gradient(self, x, y):
return self._interpolate_multikeys(x, y, tri_index=None,
return_keys=('dzdx', 'dzdy'))
gradient.__doc__ = TriInterpolator.docstringgradient + """
Examples
--------
An example of effective application is shown below (plot of the
direction of the vector field derivated from a known potential field):
.. plot:: mpl_examples/pylab_examples/trigradient_demo.py
"""
def _interpolate_single_key(self, return_key, tri_index, x, y):
tris_pts = self._tris_pts[tri_index]
alpha = self._get_alpha_vec(x, y, tris_pts)
ecc = self._eccs[tri_index]
dof = np.expand_dims(self._dof[tri_index], axis=1)
if return_key == 'z':
return self._ReferenceElement.get_function_values(
alpha, ecc, dof)
elif return_key in ['dzdx', 'dzdy']:
J = self._get_jacobian(tris_pts)
dzdx = self._ReferenceElement.get_function_derivatives(
alpha, J, ecc, dof)
if return_key == 'dzdx':
return dzdx[:, 0, 0]
else:
return dzdx[:, 1, 0]
else:
raise ValueError("Invalid return_key: " + return_key)
def _compute_dof(self, kind, dz=None):
"""
Computes and returns nodal dofs according to kind
Parameters
----------
kind: {'min_E', 'geom', 'user'}
Choice of the _DOF_estimator subclass to perform the gradient
estimation.
dz: tuple of array_likes (dzdx, dzdy), optional
Used only if *kind=user ; in this case passed to the
:class:`_DOF_estimator_user`.
Returns
-------
dof : array_like, shape (npts,2)
Estimation of the gradient at triangulation nodes (stored as
degree of freedoms of reduced-HCT triangle elements).
"""
if kind == 'user':
if dz is None:
raise ValueError("For a CubicTriInterpolator with "
"*kind*='user', a valid *dz* "
"argument is expected.")
TE = _DOF_estimator_user(self, dz=dz)
elif kind == 'geom':
TE = _DOF_estimator_geom(self)
elif kind == 'min_E':
TE = _DOF_estimator_min_E(self)
else:
raise ValueError("CubicTriInterpolator *kind* proposed: {0} ; "
"should be one of: "
"'user', 'geom', 'min_E'".format(kind))
return TE.compute_dof_from_df()
@staticmethod
def _get_alpha_vec(x, y, tris_pts):
"""
Fast (vectorized) function to compute barycentric coordinates alpha.
Parameters
----------
x, y : array-like of dim 1 (shape (nx,))
Coordinates of the points whose points barycentric
coordinates are requested
tris_pts : array like of dim 3 (shape: (nx,3,2))
Coordinates of the containing triangles apexes.
Returns
-------
alpha : array of dim 2 (shape (nx,3))
Barycentric coordinates of the points inside the containing
triangles.
"""
ndim = tris_pts.ndim-2
a = tris_pts[:, 1, :] - tris_pts[:, 0, :]
b = tris_pts[:, 2, :] - tris_pts[:, 0, :]
abT = np.concatenate([np.expand_dims(a, ndim+1),
np.expand_dims(b, ndim+1)], ndim+1)
ab = _transpose_vectorized(abT)
x = np.expand_dims(x, ndim)
y = np.expand_dims(y, ndim)
OM = np.concatenate([x, y], ndim) - tris_pts[:, 0, :]
metric = _prod_vectorized(ab, abT)
# Here we try to deal with the colinear cases.
# metric_inv is in this case set to the Moore-Penrose pseudo-inverse
# meaning that we will still return a set of valid barycentric
# coordinates.
metric_inv = _pseudo_inv22sym_vectorized(metric)
Covar = _prod_vectorized(ab, _transpose_vectorized(
np.expand_dims(OM, ndim)))
ksi = _prod_vectorized(metric_inv, Covar)
alpha = _to_matrix_vectorized([
[1-ksi[:, 0, 0]-ksi[:, 1, 0]], [ksi[:, 0, 0]], [ksi[:, 1, 0]]])
return alpha
@staticmethod
def _get_jacobian(tris_pts):
"""
Fast (vectorized) function to compute triangle jacobian matrix.
Parameters
----------
tris_pts : array like of dim 3 (shape: (nx,3,2))
Coordinates of the containing triangles apexes.
Returns
-------
J : array of dim 3 (shape (nx,2,2))
Barycentric coordinates of the points inside the containing
triangles.
J[itri,:,:] is the jacobian matrix at apex 0 of the triangle
itri, so that the following (matrix) relationship holds:
[dz/dksi] = [J] x [dz/dx]
with x: global coordinates
ksi: element parametric coordinates in triangle first apex
local basis.
"""
a = np.array(tris_pts[:, 1, :] - tris_pts[:, 0, :])
b = np.array(tris_pts[:, 2, :] - tris_pts[:, 0, :])
J = _to_matrix_vectorized([[a[:, 0], a[:, 1]],
[b[:, 0], b[:, 1]]])
return J
@staticmethod
def _compute_tri_eccentricities(tris_pts):
"""
Computes triangle eccentricities
Parameters
----------
tris_pts : array like of dim 3 (shape: (nx,3,2))
Coordinates of the triangles apexes.
Returns
-------
ecc : array like of dim 2 (shape: (nx,3))
The so-called eccentricity parameters [1] needed for
HCT triangular element.
"""
a = np.expand_dims(tris_pts[:, 2, :]-tris_pts[:, 1, :], axis=2)
b = np.expand_dims(tris_pts[:, 0, :]-tris_pts[:, 2, :], axis=2)
c = np.expand_dims(tris_pts[:, 1, :]-tris_pts[:, 0, :], axis=2)
# Do not use np.squeeze, this is dangerous if only one triangle
# in the triangulation...
dot_a = _prod_vectorized(_transpose_vectorized(a), a)[:, 0, 0]
dot_b = _prod_vectorized(_transpose_vectorized(b), b)[:, 0, 0]
dot_c = _prod_vectorized(_transpose_vectorized(c), c)[:, 0, 0]
# Note that this line will raise a warning for dot_a, dot_b or dot_c
# zeros, but we choose not to support triangles with duplicate points.
return _to_matrix_vectorized([[(dot_c-dot_b) / dot_a],
[(dot_a-dot_c) / dot_b],
[(dot_b-dot_a) / dot_c]])
# FEM element used for interpolation and for solving minimisation
# problem (Reduced HCT element)
class _ReducedHCT_Element():
"""
Implementation of reduced HCT triangular element with explicit shape
functions.
Computes z, dz, d2z and the element stiffness matrix for bending energy:
E(f) = integral( (d2z/dx2 + d2z/dy2)**2 dA)
*** Reference for the shape functions: ***
[1] Basis functions for general Hsieh-Clough-Tocher _triangles, complete or
reduced.
Michel Bernadou, Kamal Hassan
International Journal for Numerical Methods in Engineering.
17(5):784 - 789. 2.01
*** Element description: ***
9 dofs: z and dz given at 3 apex
C1 (conform)
"""
# 1) Loads matrices to generate shape functions as a function of
# triangle eccentricities - based on [1] p.11 '''
M = np.array([
[ 0.00, 0.00, 0.00, 4.50, 4.50, 0.00, 0.00, 0.00, 0.00, 0.00],
[-0.25, 0.00, 0.00, 0.50, 1.25, 0.00, 0.00, 0.00, 0.00, 0.00],
[-0.25, 0.00, 0.00, 1.25, 0.50, 0.00, 0.00, 0.00, 0.00, 0.00],
[ 0.50, 1.00, 0.00, -1.50, 0.00, 3.00, 3.00, 0.00, 0.00, 3.00],
[ 0.00, 0.00, 0.00, -0.25, 0.25, 0.00, 1.00, 0.00, 0.00, 0.50],
[ 0.25, 0.00, 0.00, -0.50, -0.25, 1.00, 0.00, 0.00, 0.00, 1.00],
[ 0.50, 0.00, 1.00, 0.00, -1.50, 0.00, 0.00, 3.00, 3.00, 3.00],
[ 0.25, 0.00, 0.00, -0.25, -0.50, 0.00, 0.00, 0.00, 1.00, 1.00],
[ 0.00, 0.00, 0.00, 0.25, -0.25, 0.00, 0.00, 1.00, 0.00, 0.50]])
M0 = np.array([
[ 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00],
[ 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00],
[ 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00],
[-1.00, 0.00, 0.00, 1.50, 1.50, 0.00, 0.00, 0.00, 0.00, -3.00],
[-0.50, 0.00, 0.00, 0.75, 0.75, 0.00, 0.00, 0.00, 0.00, -1.50],
[ 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00],
[ 1.00, 0.00, 0.00, -1.50, -1.50, 0.00, 0.00, 0.00, 0.00, 3.00],
[ 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00],
[ 0.50, 0.00, 0.00, -0.75, -0.75, 0.00, 0.00, 0.00, 0.00, 1.50]])
M1 = np.array([
[-0.50, 0.00, 0.00, 1.50, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00],
[ 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00],
[-0.25, 0.00, 0.00, 0.75, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00],
[ 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00],
[ 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00],
[ 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00],
[ 0.50, 0.00, 0.00, -1.50, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00],
[ 0.25, 0.00, 0.00, -0.75, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00],
[ 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00]])
M2 = np.array([
[ 0.50, 0.00, 0.00, 0.00, -1.50, 0.00, 0.00, 0.00, 0.00, 0.00],
[ 0.25, 0.00, 0.00, 0.00, -0.75, 0.00, 0.00, 0.00, 0.00, 0.00],
[ 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00],
[-0.50, 0.00, 0.00, 0.00, 1.50, 0.00, 0.00, 0.00, 0.00, 0.00],
[ 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00],
[-0.25, 0.00, 0.00, 0.00, 0.75, 0.00, 0.00, 0.00, 0.00, 0.00],
[ 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00],
[ 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00],
[ 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00]])
# 2) Loads matrices to rotate components of gradient & Hessian
# vectors in the reference basis of triangle first apex (a0)
rotate_dV = np.array([[ 1., 0.], [ 0., 1.],
[ 0., 1.], [-1., -1.],
[-1., -1.], [ 1., 0.]])
rotate_d2V = np.array([[1., 0., 0.], [0., 1., 0.], [ 0., 0., 1.],
[0., 1., 0.], [1., 1., 1.], [ 0., -2., -1.],
[1., 1., 1.], [1., 0., 0.], [-2., 0., -1.]])
# 3) Loads Gauss points & weights on the 3 sub-_triangles for P2
# exact integral - 3 points on each subtriangles.
# NOTE: as the 2nd derivative is discontinuous , we really need those 9
# points!
n_gauss = 9
gauss_pts = np.array([[13./18., 4./18., 1./18.],
[ 4./18., 13./18., 1./18.],
[ 7./18., 7./18., 4./18.],
[ 1./18., 13./18., 4./18.],
[ 1./18., 4./18., 13./18.],
[ 4./18., 7./18., 7./18.],
[ 4./18., 1./18., 13./18.],
[13./18., 1./18., 4./18.],
[ 7./18., 4./18., 7./18.]], dtype=np.float64)
gauss_w = np.ones([9], dtype=np.float64) / 9.
# 4) Stiffness matrix for curvature energy
E = np.array([[1., 0., 0.], [0., 1., 0.], [0., 0., 2.]])
# 5) Loads the matrix to compute DOF_rot from tri_J at apex 0
J0_to_J1 = np.array([[-1., 1.], [-1., 0.]])
J0_to_J2 = np.array([[ 0., -1.], [ 1., -1.]])
def get_function_values(self, alpha, ecc, dofs):
"""
Parameters
----------
alpha : is a (N x 3 x 1) array (array of column-matrices) of
barycentric coordinates,
ecc : is a (N x 3 x 1) array (array of column-matrices) of triangle
eccentricities,
dofs : is a (N x 1 x 9) arrays (arrays of row-matrices) of computed
degrees of freedom.
Returns
-------
Returns the N-array of interpolated function values.
"""
subtri = np.argmin(alpha, axis=1)[:, 0]
ksi = _roll_vectorized(alpha, -subtri, axis=0)
E = _roll_vectorized(ecc, -subtri, axis=0)
x = ksi[:, 0, 0]
y = ksi[:, 1, 0]
z = ksi[:, 2, 0]
x_sq = x*x
y_sq = y*y
z_sq = z*z
V = _to_matrix_vectorized([
[x_sq*x], [y_sq*y], [z_sq*z], [x_sq*z], [x_sq*y], [y_sq*x],
[y_sq*z], [z_sq*y], [z_sq*x], [x*y*z]])
prod = _prod_vectorized(self.M, V)
prod += _scalar_vectorized(E[:, 0, 0],
_prod_vectorized(self.M0, V))
prod += _scalar_vectorized(E[:, 1, 0],
_prod_vectorized(self.M1, V))
prod += _scalar_vectorized(E[:, 2, 0],
_prod_vectorized(self.M2, V))
s = _roll_vectorized(prod, 3*subtri, axis=0)
return _prod_vectorized(dofs, s)[:, 0, 0]
def get_function_derivatives(self, alpha, J, ecc, dofs):
"""
Parameters
----------
*alpha* is a (N x 3 x 1) array (array of column-matrices of
barycentric coordinates)
*J* is a (N x 2 x 2) array of jacobian matrices (jacobian matrix at
triangle first apex)
*ecc* is a (N x 3 x 1) array (array of column-matrices of triangle
eccentricities)
*dofs* is a (N x 1 x 9) arrays (arrays of row-matrices) of computed
degrees of freedom.
Returns
-------
Returns the values of interpolated function derivatives [dz/dx, dz/dy]
in global coordinates at locations alpha, as a column-matrices of
shape (N x 2 x 1).
"""
subtri = np.argmin(alpha, axis=1)[:, 0]
ksi = _roll_vectorized(alpha, -subtri, axis=0)
E = _roll_vectorized(ecc, -subtri, axis=0)
x = ksi[:, 0, 0]
y = ksi[:, 1, 0]
z = ksi[:, 2, 0]
x_sq = x*x
y_sq = y*y
z_sq = z*z
dV = _to_matrix_vectorized([
[ -3.*x_sq, -3.*x_sq],
[ 3.*y_sq, 0.],
[ 0., 3.*z_sq],
[ -2.*x*z, -2.*x*z+x_sq],
[-2.*x*y+x_sq, -2.*x*y],
[ 2.*x*y-y_sq, -y_sq],
[ 2.*y*z, y_sq],
[ z_sq, 2.*y*z],
[ -z_sq, 2.*x*z-z_sq],
[ x*z-y*z, x*y-y*z]])
# Puts back dV in first apex basis
dV = _prod_vectorized(dV, _extract_submatrices(
self.rotate_dV, subtri, block_size=2, axis=0))
prod = _prod_vectorized(self.M, dV)
prod += _scalar_vectorized(E[:, 0, 0],
_prod_vectorized(self.M0, dV))
prod += _scalar_vectorized(E[:, 1, 0],
_prod_vectorized(self.M1, dV))
prod += _scalar_vectorized(E[:, 2, 0],
_prod_vectorized(self.M2, dV))
dsdksi = _roll_vectorized(prod, 3*subtri, axis=0)
dfdksi = _prod_vectorized(dofs, dsdksi)
# In global coordinates:
# Here we try to deal with the simpliest colinear cases, returning a
# null matrix.
J_inv = _safe_inv22_vectorized(J)
dfdx = _prod_vectorized(J_inv, _transpose_vectorized(dfdksi))
return dfdx
def get_function_hessians(self, alpha, J, ecc, dofs):
"""
Parameters
----------
*alpha* is a (N x 3 x 1) array (array of column-matrices) of
barycentric coordinates
*J* is a (N x 2 x 2) array of jacobian matrices (jacobian matrix at
triangle first apex)
*ecc* is a (N x 3 x 1) array (array of column-matrices) of triangle
eccentricities
*dofs* is a (N x 1 x 9) arrays (arrays of row-matrices) of computed
degrees of freedom.
Returns
-------
Returns the values of interpolated function 2nd-derivatives
[d2z/dx2, d2z/dy2, d2z/dxdy] in global coordinates at locations alpha,
as a column-matrices of shape (N x 3 x 1).
"""
d2sdksi2 = self.get_d2Sidksij2(alpha, ecc)
d2fdksi2 = _prod_vectorized(dofs, d2sdksi2)
H_rot = self.get_Hrot_from_J(J)
d2fdx2 = _prod_vectorized(d2fdksi2, H_rot)
return _transpose_vectorized(d2fdx2)
def get_d2Sidksij2(self, alpha, ecc):
"""
Parameters
----------
*alpha* is a (N x 3 x 1) array (array of column-matrices) of
barycentric coordinates
*ecc* is a (N x 3 x 1) array (array of column-matrices) of triangle
eccentricities
Returns
-------
Returns the arrays d2sdksi2 (N x 3 x 1) Hessian of shape functions
expressed in covariante coordinates in first apex basis.
"""
subtri = np.argmin(alpha, axis=1)[:, 0]
ksi = _roll_vectorized(alpha, -subtri, axis=0)
E = _roll_vectorized(ecc, -subtri, axis=0)
x = ksi[:, 0, 0]
y = ksi[:, 1, 0]
z = ksi[:, 2, 0]
d2V = _to_matrix_vectorized([
[ 6.*x, 6.*x, 6.*x],
[ 6.*y, 0., 0.],
[ 0., 6.*z, 0.],
[ 2.*z, 2.*z-4.*x, 2.*z-2.*x],
[2.*y-4.*x, 2.*y, 2.*y-2.*x],
[2.*x-4.*y, 0., -2.*y],
[ 2.*z, 0., 2.*y],
[ 0., 2.*y, 2.*z],
[ 0., 2.*x-4.*z, -2.*z],
[ -2.*z, -2.*y, x-y-z]])
# Puts back d2V in first apex basis
d2V = _prod_vectorized(d2V, _extract_submatrices(
self.rotate_d2V, subtri, block_size=3, axis=0))
prod = _prod_vectorized(self.M, d2V)
prod += _scalar_vectorized(E[:, 0, 0],
_prod_vectorized(self.M0, d2V))
prod += _scalar_vectorized(E[:, 1, 0],
_prod_vectorized(self.M1, d2V))
prod += _scalar_vectorized(E[:, 2, 0],
_prod_vectorized(self.M2, d2V))
d2sdksi2 = _roll_vectorized(prod, 3*subtri, axis=0)
return d2sdksi2
def get_bending_matrices(self, J, ecc):
"""
Parameters
----------
*J* is a (N x 2 x 2) array of jacobian matrices (jacobian matrix at
triangle first apex)
*ecc* is a (N x 3 x 1) array (array of column-matrices) of triangle
eccentricities
Returns
-------
Returns the element K matrices for bending energy expressed in
GLOBAL nodal coordinates.
K_ij = integral [ (d2zi/dx2 + d2zi/dy2) * (d2zj/dx2 + d2zj/dy2) dA]
tri_J is needed to rotate dofs from local basis to global basis
"""
n = np.size(ecc, 0)
# 1) matrix to rotate dofs in global coordinates
J1 = _prod_vectorized(self.J0_to_J1, J)
J2 = _prod_vectorized(self.J0_to_J2, J)
DOF_rot = np.zeros([n, 9, 9], dtype=np.float64)
DOF_rot[:, 0, 0] = 1
DOF_rot[:, 3, 3] = 1
DOF_rot[:, 6, 6] = 1
DOF_rot[:, 1:3, 1:3] = J
DOF_rot[:, 4:6, 4:6] = J1
DOF_rot[:, 7:9, 7:9] = J2
# 2) matrix to rotate Hessian in global coordinates.
H_rot, area = self.get_Hrot_from_J(J, return_area=True)
# 3) Computes stiffness matrix
# Gauss quadrature.
K = np.zeros([n, 9, 9], dtype=np.float64)
weights = self.gauss_w
pts = self.gauss_pts
for igauss in range(self.n_gauss):
alpha = np.tile(pts[igauss, :], n).reshape(n, 3)
alpha = np.expand_dims(alpha, 3)
weight = weights[igauss]
d2Skdksi2 = self.get_d2Sidksij2(alpha, ecc)
d2Skdx2 = _prod_vectorized(d2Skdksi2, H_rot)
K += weight * _prod_vectorized(_prod_vectorized(d2Skdx2, self.E),
_transpose_vectorized(d2Skdx2))
# 4) With nodal (not elem) dofs
K = _prod_vectorized(_prod_vectorized(_transpose_vectorized(DOF_rot),
K), DOF_rot)
# 5) Need the area to compute total element energy
return _scalar_vectorized(area, K)
def get_Hrot_from_J(self, J, return_area=False):
"""
Parameters
----------
*J* is a (N x 2 x 2) array of jacobian matrices (jacobian matrix at
triangle first apex)
Returns
-------
Returns H_rot used to rotate Hessian from local basis of first apex,
to global coordinates.
if *return_area* is True, returns also the triangle area (0.5*det(J))
"""
# Here we try to deal with the simpliest colinear cases ; a null
# energy and area is imposed.
J_inv = _safe_inv22_vectorized(J)
Ji00 = J_inv[:, 0, 0]
Ji11 = J_inv[:, 1, 1]
Ji10 = J_inv[:, 1, 0]
Ji01 = J_inv[:, 0, 1]
H_rot = _to_matrix_vectorized([
[Ji00*Ji00, Ji10*Ji10, Ji00*Ji10],
[Ji01*Ji01, Ji11*Ji11, Ji01*Ji11],
[2*Ji00*Ji01, 2*Ji11*Ji10, Ji00*Ji11+Ji10*Ji01]])
if not return_area:
return H_rot
else:
area = 0.5 * (J[:, 0, 0]*J[:, 1, 1] - J[:, 0, 1]*J[:, 1, 0])
return H_rot, area
def get_Kff_and_Ff(self, J, ecc, triangles, Uc):
"""
Builds K and F for the following elliptic formulation:
minimization of curvature energy with value of function at node
imposed and derivatives 'free'.
Builds the global Kff matrix in cco format.
Builds the full Ff vec Ff = - Kfc x Uc
Parameters
----------
*J* is a (N x 2 x 2) array of jacobian matrices (jacobian matrix at
triangle first apex)
*ecc* is a (N x 3 x 1) array (array of column-matrices) of triangle
eccentricities
*triangles* is a (N x 3) array of nodes indexes.
*Uc* is (N x 3) array of imposed displacements at nodes
Returns
-------
(Kff_rows, Kff_cols, Kff_vals) Kff matrix in coo format - Duplicate
(row, col) entries must be summed.
Ff: force vector - dim npts * 3
"""
ntri = np.size(ecc, 0)
vec_range = np.arange(ntri, dtype=np.int32)
c_indices = -np.ones(ntri, dtype=np.int32) # for unused dofs, -1
f_dof = [1, 2, 4, 5, 7, 8]
c_dof = [0, 3, 6]
# vals, rows and cols indices in global dof numbering
f_dof_indices = _to_matrix_vectorized([[
c_indices, triangles[:, 0]*2, triangles[:, 0]*2+1,
c_indices, triangles[:, 1]*2, triangles[:, 1]*2+1,
c_indices, triangles[:, 2]*2, triangles[:, 2]*2+1]])
expand_indices = np.ones([ntri, 9, 1], dtype=np.int32)
f_row_indices = _prod_vectorized(_transpose_vectorized(f_dof_indices),
_transpose_vectorized(expand_indices))
f_col_indices = _prod_vectorized(expand_indices, f_dof_indices)
K_elem = self.get_bending_matrices(J, ecc)
# Extracting sub-matrices
# Explanation & notations:
# * Subscript f denotes 'free' degrees of freedom (i.e. dz/dx, dz/dx)
# * Subscript c denotes 'condensated' (imposed) degrees of freedom
# (i.e. z at all nodes)
# * F = [Ff, Fc] is the force vector
# * U = [Uf, Uc] is the imposed dof vector
# [ Kff Kfc ]
# * K = [ ] is the laplacian stiffness matrix
# [ Kcf Kff ]
# * As F = K x U one gets straightforwardly: Ff = - Kfc x Uc
# Computing Kff stiffness matrix in sparse coo format
Kff_vals = np.ravel(K_elem[np.ix_(vec_range, f_dof, f_dof)])
Kff_rows = np.ravel(f_row_indices[np.ix_(vec_range, f_dof, f_dof)])
Kff_cols = np.ravel(f_col_indices[np.ix_(vec_range, f_dof, f_dof)])
# Computing Ff force vector in sparse coo format
Kfc_elem = K_elem[np.ix_(vec_range, f_dof, c_dof)]
Uc_elem = np.expand_dims(Uc, axis=2)
Ff_elem = - _prod_vectorized(Kfc_elem, Uc_elem)[:, :, 0]
Ff_indices = f_dof_indices[np.ix_(vec_range, [0], f_dof)][:, 0, :]
# Extracting Ff force vector in dense format
# We have to sum duplicate indices - using bincount
Ff = np.bincount(np.ravel(Ff_indices), weights=np.ravel(Ff_elem))
return Kff_rows, Kff_cols, Kff_vals, Ff
# :class:_DOF_estimator, _DOF_estimator_user, _DOF_estimator_geom,
# _DOF_estimator_min_E
# Private classes used to compute the degree of freedom of each triangular
# element for the TriCubicInterpolator.
class _DOF_estimator():
"""
Abstract base class for classes used to perform estimation of a function
first derivatives, and deduce the dofs for a CubicTriInterpolator using a
reduced HCT element formulation.
Derived classes implement compute_df(self,**kwargs), returning
np.vstack([dfx,dfy]).T where : dfx, dfy are the estimation of the 2
gradient coordinates.
"""
def __init__(self, interpolator, **kwargs):
if not isinstance(interpolator, CubicTriInterpolator):
raise ValueError("Expected a CubicTriInterpolator object")
self._pts = interpolator._pts
self._tris_pts = interpolator._tris_pts
self.z = interpolator._z
self._triangles = interpolator._triangles
(self._unit_x, self._unit_y) = (interpolator._unit_x,
interpolator._unit_y)
self.dz = self.compute_dz(**kwargs)
self.compute_dof_from_df()
def compute_dz(self, **kwargs):
raise NotImplementedError
def compute_dof_from_df(self):
"""
Computes reduced-HCT elements degrees of freedom, knowing the
gradient.
"""
J = CubicTriInterpolator._get_jacobian(self._tris_pts)
tri_z = self.z[self._triangles]
tri_dz = self.dz[self._triangles]
tri_dof = self.get_dof_vec(tri_z, tri_dz, J)
return tri_dof
@staticmethod
def get_dof_vec(tri_z, tri_dz, J):
"""
Computes the dof vector of a triangle, knowing the value of f, df and
of the local Jacobian at each node.
*tri_z*: array of shape (3,) of f nodal values
*tri_dz*: array of shape (3,2) of df/dx, df/dy nodal values
*J*: Jacobian matrix in local basis of apex 0
Returns dof array of shape (9,) so that for each apex iapex:
dof[iapex*3+0] = f(Ai)
dof[iapex*3+1] = df(Ai).(AiAi+)
dof[iapex*3+2] = df(Ai).(AiAi-)]
"""
npt = tri_z.shape[0]
dof = np.zeros([npt, 9], dtype=np.float64)
J1 = _prod_vectorized(_ReducedHCT_Element.J0_to_J1, J)
J2 = _prod_vectorized(_ReducedHCT_Element.J0_to_J2, J)
col0 = _prod_vectorized(J, np.expand_dims(tri_dz[:, 0, :], axis=3))
col1 = _prod_vectorized(J1, np.expand_dims(tri_dz[:, 1, :], axis=3))
col2 = _prod_vectorized(J2, np.expand_dims(tri_dz[:, 2, :], axis=3))
dfdksi = _to_matrix_vectorized([
[col0[:, 0, 0], col1[:, 0, 0], col2[:, 0, 0]],
[col0[:, 1, 0], col1[:, 1, 0], col2[:, 1, 0]]])
dof[:, 0:7:3] = tri_z
dof[:, 1:8:3] = dfdksi[:, 0]
dof[:, 2:9:3] = dfdksi[:, 1]
return dof
class _DOF_estimator_user(_DOF_estimator):
""" dz is imposed by user / Accounts for scaling if any """
def compute_dz(self, dz):
(dzdx, dzdy) = dz
dzdx = dzdx * self._unit_x
dzdy = dzdy * self._unit_y
return np.vstack([dzdx, dzdy]).T
class _DOF_estimator_geom(_DOF_estimator):
""" Fast 'geometric' approximation, recommended for large arrays. """
def compute_dz(self):
"""
self.df is computed as weighted average of _triangles sharing a common
node. On each triangle itri f is first assumed linear (= ~f), which
allows to compute d~f[itri]
Then the following approximation of df nodal values is then proposed:
f[ipt] = SUM ( w[itri] x d~f[itri] , for itri sharing apex ipt)
The weighted coeff. w[itri] are proportional to the angle of the
triangle itri at apex ipt
"""
el_geom_w = self.compute_geom_weights()
el_geom_grad = self.compute_geom_grads()
# Sum of weights coeffs
w_node_sum = np.bincount(np.ravel(self._triangles),
weights=np.ravel(el_geom_w))
# Sum of weighted df = (dfx, dfy)
dfx_el_w = np.empty_like(el_geom_w)
dfy_el_w = np.empty_like(el_geom_w)
for iapex in range(3):
dfx_el_w[:, iapex] = el_geom_w[:, iapex]*el_geom_grad[:, 0]
dfy_el_w[:, iapex] = el_geom_w[:, iapex]*el_geom_grad[:, 1]
dfx_node_sum = np.bincount(np.ravel(self._triangles),
weights=np.ravel(dfx_el_w))
dfy_node_sum = np.bincount(np.ravel(self._triangles),
weights=np.ravel(dfy_el_w))
# Estimation of df
dfx_estim = dfx_node_sum/w_node_sum
dfy_estim = dfy_node_sum/w_node_sum
return np.vstack([dfx_estim, dfy_estim]).T
def compute_geom_weights(self):
"""
Builds the (nelems x 3) weights coeffs of _triangles angles,
renormalized so that np.sum(weights, axis=1) == np.ones(nelems)
"""
weights = np.zeros([np.size(self._triangles, 0), 3])
tris_pts = self._tris_pts
for ipt in range(3):
p0 = tris_pts[:, (ipt) % 3, :]
p1 = tris_pts[:, (ipt+1) % 3, :]
p2 = tris_pts[:, (ipt-1) % 3, :]
alpha1 = np.arctan2(p1[:, 1]-p0[:, 1], p1[:, 0]-p0[:, 0])
alpha2 = np.arctan2(p2[:, 1]-p0[:, 1], p2[:, 0]-p0[:, 0])
# In the below formula we could take modulo 2. but
# modulo 1. is safer regarding round-off errors (flat triangles).
angle = np.abs(np.mod((alpha2-alpha1) / np.pi, 1.))
# Weight proportional to angle up np.pi/2 ; null weight for
# degenerated cases 0. and np.pi (Note that `angle` is normalized
# by np.pi)
weights[:, ipt] = 0.5 - np.abs(angle-0.5)
return weights
def compute_geom_grads(self):
"""
Compute the (global) gradient component of f assumed linear (~f).
returns array df of shape (nelems,2)
df[ielem].dM[ielem] = dz[ielem] i.e. df = dz x dM = dM.T^-1 x dz
"""
tris_pts = self._tris_pts
tris_f = self.z[self._triangles]
dM1 = tris_pts[:, 1, :] - tris_pts[:, 0, :]
dM2 = tris_pts[:, 2, :] - tris_pts[:, 0, :]
dM = np.dstack([dM1, dM2])
# Here we try to deal with the simpliest colinear cases: a null
# gradient is assumed in this case.
dM_inv = _safe_inv22_vectorized(dM)
dZ1 = tris_f[:, 1] - tris_f[:, 0]
dZ2 = tris_f[:, 2] - tris_f[:, 0]
dZ = np.vstack([dZ1, dZ2]).T
df = np.empty_like(dZ)
# With np.einsum : could be ej,eji -> ej
df[:, 0] = dZ[:, 0]*dM_inv[:, 0, 0] + dZ[:, 1]*dM_inv[:, 1, 0]
df[:, 1] = dZ[:, 0]*dM_inv[:, 0, 1] + dZ[:, 1]*dM_inv[:, 1, 1]
return df
class _DOF_estimator_min_E(_DOF_estimator_geom):
"""
The 'smoothest' approximation, df is computed through global minimization
of the bending energy:
E(f) = integral[(d2z/dx2 + d2z/dy2 + 2 d2z/dxdy)**2 dA]
"""
def __init__(self, Interpolator):
self._eccs = Interpolator._eccs
_DOF_estimator_geom.__init__(self, Interpolator)
def compute_dz(self):
"""
Elliptic solver for bending energy minimization.
Uses a dedicated 'toy' sparse Jacobi PCG solver.
"""
# Initial guess for iterative PCG solver.
dz_init = _DOF_estimator_geom.compute_dz(self)
Uf0 = np.ravel(dz_init)
reference_element = _ReducedHCT_Element()
J = CubicTriInterpolator._get_jacobian(self._tris_pts)
eccs = self._eccs
triangles = self._triangles
Uc = self.z[self._triangles]
# Building stiffness matrix and force vector in coo format
Kff_rows, Kff_cols, Kff_vals, Ff = reference_element.get_Kff_and_Ff(
J, eccs, triangles, Uc)
# Building sparse matrix and solving minimization problem
# We could use scipy.sparse direct solver ; however to avoid this
# external dependency an implementation of a simple PCG solver with
# a simplendiagonal Jocabi preconditioner is implemented.
tol = 1.e-10
n_dof = Ff.shape[0]
Kff_coo = _Sparse_Matrix_coo(Kff_vals, Kff_rows, Kff_cols,
shape=(n_dof, n_dof))
Kff_coo.compress_csc()
Uf, err = _cg(A=Kff_coo, b=Ff, x0=Uf0, tol=tol)
# If the PCG did not converge, we return the best guess between Uf0
# and Uf.
err0 = np.linalg.norm(Kff_coo.dot(Uf0) - Ff)
if err0 < err:
# Maybe a good occasion to raise a warning here ?
warnings.warn("In TriCubicInterpolator initialization, PCG sparse"
" solver did not converge after 1000 iterations. "
"`geom` approximation is used instead of `min_E`")
Uf = Uf0
# Building dz from Uf
dz = np.empty([self._pts.shape[0], 2], dtype=np.float64)
dz[:, 0] = Uf[::2]
dz[:, 1] = Uf[1::2]
return dz
# The following private :class:_Sparse_Matrix_coo and :func:_cg provide
# a PCG sparse solver for (symmetric) elliptic problems.
class _Sparse_Matrix_coo(object):
def __init__(self, vals, rows, cols, shape):
"""
Creates a sparse matrix in coo format
*vals*: arrays of values of non-null entries of the matrix
*rows*: int arrays of rows of non-null entries of the matrix
*cols*: int arrays of cols of non-null entries of the matrix
*shape*: 2-tuple (n,m) of matrix shape
"""
self.n, self.m = shape
self.vals = np.asarray(vals, dtype=np.float64)
self.rows = np.asarray(rows, dtype=np.int32)
self.cols = np.asarray(cols, dtype=np.int32)
def dot(self, V):
"""
Dot product of self by a vector *V* in sparse-dense to dense format
*V* dense vector of shape (self.m,)
"""
assert V.shape == (self.m,)
# For a more generic implementation we could use below kw argument
# minlength=self.m of bincount ; however:
# - it is new in numpy 1.6
# - it is unecessary when each row have at least 1 entry in global
# matrix, which is the case here.
return np.bincount(self.rows, weights=self.vals*V[self.cols])
def compress_csc(self):
"""
Compress rows, cols, vals / summing duplicates. Sort for csc format.
"""
_, unique, indices = np.unique(
self.rows + self.n*self.cols,
return_index=True, return_inverse=True)
self.rows = self.rows[unique]
self.cols = self.cols[unique]
self.vals = np.bincount(indices, weights=self.vals)
def compress_csr(self):
"""
Compress rows, cols, vals / summing duplicates. Sort for csr format.
"""
_, unique, indices = np.unique(
self.m*self.rows + self.cols,
return_index=True, return_inverse=True)
self.rows = self.rows[unique]
self.cols = self.cols[unique]
self.vals = np.bincount(indices, weights=self.vals)
def to_dense(self):
"""
Returns a dense matrix representing self.
Mainly for debugging purposes.
"""
ret = np.zeros([self.n, self.m], dtype=np.float64)
nvals = self.vals.size
for i in range(nvals):
ret[self.rows[i], self.cols[i]] += self.vals[i]
return ret
def __str__(self):
return self.to_dense().__str__()
@property
def diag(self):
"""
Returns the (dense) vector of the diagonal elements.
"""
in_diag = (self.rows == self.cols)
diag = np.zeros(min(self.n, self.n), dtype=np.float64) # default 0.
diag[self.rows[in_diag]] = self.vals[in_diag]
return diag
def _cg(A, b, x0=None, tol=1.e-10, maxiter=1000):
"""
Use Preconditioned Conjugate Gradient iteration to solve A x = b
A simple Jacobi (diagonal) preconditionner is used.
Parameters
----------
A: _Sparse_Matrix_coo
*A* must have been compressed before by compress_csc or
compress_csr method.
b: array
Right hand side of the linear system.
Returns
----------
x: array.
The converged solution.
err: float
The absolute error np.linalg.norm(A.dot(x) - b)
Other parameters
----------
x0: array.
Starting guess for the solution.
tol: float.
Tolerance to achieve. The algorithm terminates when the relative
residual is below tol.
maxiter: integer.
Maximum number of iterations. Iteration will stop
after maxiter steps even if the specified tolerance has not
been achieved.
"""
n = b.size
assert A.n == n
assert A.m == n
b_norm = np.linalg.norm(b)
# Jacobi pre-conditioner
kvec = A.diag
# For diag elem < 1e-6 we keep 1e-6.
kvec = np.where(kvec > 1.e-6, kvec, 1.e-6)
# Initial guess
if x0 is None:
x = np.zeros(n)
else:
x = x0
r = b - A.dot(x)
w = r/kvec
p = np.zeros(n)
beta = 0.0
rho = np.dot(r, w)
k = 0
# Following C. T. Kelley
while (np.sqrt(abs(rho)) > tol*b_norm) and (k < maxiter):
p = w + beta*p
z = A.dot(p)
alpha = rho/np.dot(p, z)
r = r - alpha*z
w = r/kvec
rhoold = rho
rho = np.dot(r, w)
x = x + alpha*p
beta = rho/rhoold
#err = np.linalg.norm(A.dot(x) - b) # absolute accuracy - not used
k += 1
err = np.linalg.norm(A.dot(x) - b)
return x, err
# The following private functions:
# :func:`_inv22_vectorized`
# :func:`_safe_inv22_vectorized`
# :func:`_pseudo_inv22sym_vectorized`
# :func:`_prod_vectorized`
# :func:`_scalar_vectorized`
# :func:`_transpose_vectorized`
# :func:`_roll_vectorized`
# :func:`_to_matrix_vectorized`
# :func:`_extract_submatrices`
# provide fast numpy implementation of some standard operations on arrays of
# matrices - stored as (:, n_rows, n_cols)-shaped np.arrays.
def _inv22_vectorized(M):
"""
Inversion of arrays of (2,2) matrices.
"""
assert (M.ndim == 3)
assert (M.shape[-2:] == (2, 2))
M_inv = np.empty_like(M)
delta_inv = np.reciprocal(M[:, 0, 0]*M[:, 1, 1] - M[:, 0, 1]*M[:, 1, 0])
M_inv[:, 0, 0] = M[:, 1, 1]*delta_inv
M_inv[:, 0, 1] = -M[:, 0, 1]*delta_inv
M_inv[:, 1, 0] = -M[:, 1, 0]*delta_inv
M_inv[:, 1, 1] = M[:, 0, 0]*delta_inv
return M_inv
# Development note: Dealing with pathologic 'flat' triangles in the
# CubicTriInterpolator code and impact on (2,2)-matrix inversion functions
# :func:`_safe_inv22_vectorized` and :func:`_pseudo_inv22sym_vectorized`.
#
# Goals:
# 1) The CubicTriInterpolator should be able to handle flat or almost flat
# triangles without raising an error,
# 2) These degenerated triangles should have no impact on the automatic dof
# calculation (associated with null weight for the _DOF_estimator_geom and
# with null energy for the _DOF_estimator_min_E),
# 3) Linear patch test should be passed exactly on degenerated meshes,
# 4) Interpolation (with :meth:`_interpolate_single_key` or
# :meth:`_interpolate_multi_key`) shall be correctly handled even *inside*
# the pathologic triangles, to interact correctly with a TriRefiner class.
#
# Difficulties:
# Flat triangles have rank-deficient *J* (so-called jacobian matrix) and
# *metric* (the metric tensor = J x J.T). Computation of the local
# tangent plane is also problematic.
#
# Implementation:
# Most of the time, when computing the inverse of a rank-deficient matrix it
# is safe to simply return the null matrix (which is the implementation in
# :func:`_safe_inv22_vectorized`). This is because of point 2), itself
# enforced by:
# - null area hence null energy in :class:`_DOF_estimator_min_E`
# - angles close or equal to 0 or np.pi hence null weight in
# :class:`_DOF_estimator_geom`.
# Note that the function angle -> weight is continuous and maximum for an
# angle np.pi/2 (refer to :meth:`compute_geom_weights`)
# The exception is the computation of barycentric coordinates, which is done
# by inversion of the *metric* matrix. In this case, we need to compute a set
# of valid coordinates (1 among numerous possibilities), to ensure point 4).
# We benefit here from the symmetry of metric = J x J.T, which makes it easier
# to compute a pseudo-inverse in :func:`_pseudo_inv22sym_vectorized`
def _safe_inv22_vectorized(M):
"""
Inversion of arrays of (2,2) matrices, returns 0 for rank-deficient
matrices.
*M* : array of (2,2) matrices to inverse, shape (n,2,2)
"""
assert M.ndim == 3
assert M.shape[-2:] == (2, 2)
M_inv = np.empty_like(M)
prod1 = M[:, 0, 0]*M[:, 1, 1]
delta = prod1 - M[:, 0, 1]*M[:, 1, 0]
# We set delta_inv to 0. in case of a rank deficient matrix ; a
# rank-deficient input matrix *M* will lead to a null matrix in output
rank2 = (np.abs(delta) > 1e-8*np.abs(prod1))
if np.all(rank2):
# Normal 'optimized' flow.
delta_inv = 1./delta
else:
# 'Pathologic' flow.
delta_inv = np.zeros(M.shape[0])
delta_inv[rank2] = 1./delta[rank2]
M_inv[:, 0, 0] = M[:, 1, 1]*delta_inv
M_inv[:, 0, 1] = -M[:, 0, 1]*delta_inv
M_inv[:, 1, 0] = -M[:, 1, 0]*delta_inv
M_inv[:, 1, 1] = M[:, 0, 0]*delta_inv
return M_inv
def _pseudo_inv22sym_vectorized(M):
"""
Inversion of arrays of (2,2) SYMMETRIC matrices ; returns the
(Moore-Penrose) pseudo-inverse for rank-deficient matrices.
In case M is of rank 1, we have M = trace(M) x P where P is the orthogonal
projection on Im(M), and we return trace(M)^-1 x P == M / trace(M)**2
In case M is of rank 0, we return the null matrix.
*M* : array of (2,2) matrices to inverse, shape (n,2,2)
"""
assert M.ndim == 3
assert M.shape[-2:] == (2, 2)
M_inv = np.empty_like(M)
prod1 = M[:, 0, 0]*M[:, 1, 1]
delta = prod1 - M[:, 0, 1]*M[:, 1, 0]
rank2 = (np.abs(delta) > 1e-8*np.abs(prod1))
if np.all(rank2):
# Normal 'optimized' flow.
M_inv[:, 0, 0] = M[:, 1, 1] / delta
M_inv[:, 0, 1] = -M[:, 0, 1] / delta
M_inv[:, 1, 0] = -M[:, 1, 0] / delta
M_inv[:, 1, 1] = M[:, 0, 0] / delta
else:
# 'Pathologic' flow.
# Here we have to deal with 2 sub-cases
# 1) First sub-case: matrices of rank 2:
delta = delta[rank2]
M_inv[rank2, 0, 0] = M[rank2, 1, 1] / delta
M_inv[rank2, 0, 1] = -M[rank2, 0, 1] / delta
M_inv[rank2, 1, 0] = -M[rank2, 1, 0] / delta
M_inv[rank2, 1, 1] = M[rank2, 0, 0] / delta
# 2) Second sub-case: rank-deficient matrices of rank 0 and 1:
rank01 = ~rank2
tr = M[rank01, 0, 0] + M[rank01, 1, 1]
tr_zeros = (np.abs(tr) < 1.e-8)
sq_tr_inv = (1.-tr_zeros) / (tr**2+tr_zeros)
#sq_tr_inv = 1. / tr**2
M_inv[rank01, 0, 0] = M[rank01, 0, 0] * sq_tr_inv
M_inv[rank01, 0, 1] = M[rank01, 0, 1] * sq_tr_inv
M_inv[rank01, 1, 0] = M[rank01, 1, 0] * sq_tr_inv
M_inv[rank01, 1, 1] = M[rank01, 1, 1] * sq_tr_inv
return M_inv
def _prod_vectorized(M1, M2):
"""
Matrix product between arrays of matrices, or a matrix and an array of
matrices (*M1* and *M2*)
"""
sh1 = M1.shape
sh2 = M2.shape
assert len(sh1) >= 2
assert len(sh2) >= 2
assert sh1[-1] == sh2[-2]
ndim1 = len(sh1)
t1_index = list(xrange(ndim1-2)) + [ndim1-1, ndim1-2]
return np.sum(np.transpose(M1, t1_index)[..., np.newaxis] *
M2[..., np.newaxis, :], -3)
def _scalar_vectorized(scalar, M):
"""
Scalar product between scalars and matrices.
"""
return scalar[:, np.newaxis, np.newaxis]*M
def _transpose_vectorized(M):
"""
Transposition of an array of matrices *M*.
"""
ndim = M.ndim
assert ndim == 3
return np.transpose(M, [0, ndim-1, ndim-2])
def _roll_vectorized(M, roll_indices, axis):
"""
Rolls an array of matrices along an axis according to an array of indices
*roll_indices*
*axis* can be either 0 (rolls rows) or 1 (rolls columns).
"""
assert axis in [0, 1]
ndim = M.ndim
assert ndim == 3
ndim_roll = roll_indices.ndim
assert ndim_roll == 1
sh = M.shape
r, c = sh[-2:]
assert sh[0] == roll_indices.shape[0]
vec_indices = np.arange(sh[0], dtype=np.int32)
# Builds the rolled matrix
M_roll = np.empty_like(M)
if axis == 0:
for ir in range(r):
for ic in range(c):
M_roll[:, ir, ic] = M[vec_indices, (-roll_indices+ir) % r, ic]
elif axis == 1:
for ir in range(r):
for ic in range(c):
M_roll[:, ir, ic] = M[vec_indices, ir, (-roll_indices+ic) % c]
return M_roll
def _to_matrix_vectorized(M):
"""
Builds an array of matrices from individuals np.arrays of identical
shapes.
*M*: ncols-list of nrows-lists of shape sh.
Returns M_res np.array of shape (sh, nrow, ncols) so that:
M_res[...,i,j] = M[i][j]
"""
assert isinstance(M, (tuple, list))
assert all([isinstance(item, (tuple, list)) for item in M])
c_vec = np.asarray([len(item) for item in M])
assert np.all(c_vec-c_vec[0] == 0)
r = len(M)
c = c_vec[0]
M00 = np.asarray(M[0][0])
dt = M00.dtype
sh = [M00.shape[0], r, c]
M_ret = np.empty(sh, dtype=dt)
for irow in range(r):
for icol in range(c):
M_ret[:, irow, icol] = np.asarray(M[irow][icol])
return M_ret
def _extract_submatrices(M, block_indices, block_size, axis):
"""
Extracts selected blocks of a matrices *M* depending on parameters
*block_indices* and *block_size*.
Returns the array of extracted matrices *Mres* so that:
M_res[...,ir,:] = M[(block_indices*block_size+ir), :]
"""
assert block_indices.ndim == 1
assert axis in [0, 1]
r, c = M.shape
if axis == 0:
sh = [block_indices.shape[0], block_size, c]
elif axis == 1:
sh = [block_indices.shape[0], r, block_size]
dt = M.dtype
M_res = np.empty(sh, dtype=dt)
if axis == 0:
for ir in range(block_size):
M_res[:, ir, :] = M[(block_indices*block_size+ir), :]
elif axis == 1:
for ic in range(block_size):
M_res[:, :, ic] = M[:, (block_indices*block_size+ic)]
return M_res
| mit |
cbertinato/pandas | pandas/tests/generic/test_label_or_level_utils.py | 1 | 10041 | import pytest
from pandas.core.dtypes.missing import array_equivalent
import pandas as pd
# Fixtures
# ========
@pytest.fixture
def df():
"""DataFrame with columns 'L1', 'L2', and 'L3' """
return pd.DataFrame({'L1': [1, 2, 3],
'L2': [11, 12, 13],
'L3': ['A', 'B', 'C']})
@pytest.fixture(params=[[], ['L1'], ['L1', 'L2'], ['L1', 'L2', 'L3']])
def df_levels(request, df):
"""DataFrame with columns or index levels 'L1', 'L2', and 'L3' """
levels = request.param
if levels:
df = df.set_index(levels)
return df
@pytest.fixture
def df_ambig(df):
"""DataFrame with levels 'L1' and 'L2' and labels 'L1' and 'L3' """
df = df.set_index(['L1', 'L2'])
df['L1'] = df['L3']
return df
@pytest.fixture
def df_duplabels(df):
"""DataFrame with level 'L1' and labels 'L2', 'L3', and 'L2' """
df = df.set_index(['L1'])
df = pd.concat([df, df['L2']], axis=1)
return df
# Test is label/level reference
# =============================
def get_labels_levels(df_levels):
expected_labels = list(df_levels.columns)
expected_levels = [name for name in df_levels.index.names
if name is not None]
return expected_labels, expected_levels
def assert_label_reference(frame, labels, axis):
for label in labels:
assert frame._is_label_reference(label, axis=axis)
assert not frame._is_level_reference(label, axis=axis)
assert frame._is_label_or_level_reference(label, axis=axis)
def assert_level_reference(frame, levels, axis):
for level in levels:
assert frame._is_level_reference(level, axis=axis)
assert not frame._is_label_reference(level, axis=axis)
assert frame._is_label_or_level_reference(level, axis=axis)
# DataFrame
# ---------
def test_is_level_or_label_reference_df_simple(df_levels, axis):
# Compute expected labels and levels
expected_labels, expected_levels = get_labels_levels(df_levels)
# Transpose frame if axis == 1
if axis in {1, 'columns'}:
df_levels = df_levels.T
# Perform checks
assert_level_reference(df_levels, expected_levels, axis=axis)
assert_label_reference(df_levels, expected_labels, axis=axis)
def test_is_level_reference_df_ambig(df_ambig, axis):
# Transpose frame if axis == 1
if axis in {1, 'columns'}:
df_ambig = df_ambig.T
# df has both an on-axis level and off-axis label named L1
# Therefore L1 should reference the label, not the level
assert_label_reference(df_ambig, ['L1'], axis=axis)
# df has an on-axis level named L2 and it is not ambiguous
# Therefore L2 is an level reference
assert_level_reference(df_ambig, ['L2'], axis=axis)
# df has a column named L3 and it not an level reference
assert_label_reference(df_ambig, ['L3'], axis=axis)
# Series
# ------
def test_is_level_reference_series_simple_axis0(df):
# Make series with L1 as index
s = df.set_index('L1').L2
assert_level_reference(s, ['L1'], axis=0)
assert not s._is_level_reference('L2')
# Make series with L1 and L2 as index
s = df.set_index(['L1', 'L2']).L3
assert_level_reference(s, ['L1', 'L2'], axis=0)
assert not s._is_level_reference('L3')
def test_is_level_reference_series_axis1_error(df):
# Make series with L1 as index
s = df.set_index('L1').L2
with pytest.raises(ValueError, match="No axis named 1"):
s._is_level_reference('L1', axis=1)
# Test _check_label_or_level_ambiguity_df
# =======================================
# DataFrame
# ---------
def test_check_label_or_level_ambiguity_df(df_ambig, axis):
# Transpose frame if axis == 1
if axis in {1, "columns"}:
df_ambig = df_ambig.T
if axis in {0, "index"}:
msg = "'L1' is both an index level and a column label"
else:
msg = "'L1' is both a column level and an index label"
# df_ambig has both an on-axis level and off-axis label named L1
# Therefore, L1 is ambiguous.
with pytest.raises(ValueError, match=msg):
df_ambig._check_label_or_level_ambiguity("L1", axis=axis)
# df_ambig has an on-axis level named L2,, and it is not ambiguous.
df_ambig._check_label_or_level_ambiguity("L2", axis=axis)
# df_ambig has an off-axis label named L3, and it is not ambiguous
assert not df_ambig._check_label_or_level_ambiguity("L3", axis=axis)
# Series
# ------
def test_check_label_or_level_ambiguity_series(df):
# A series has no columns and therefore references are never ambiguous
# Make series with L1 as index
s = df.set_index("L1").L2
s._check_label_or_level_ambiguity("L1", axis=0)
s._check_label_or_level_ambiguity("L2", axis=0)
# Make series with L1 and L2 as index
s = df.set_index(["L1", "L2"]).L3
s._check_label_or_level_ambiguity("L1", axis=0)
s._check_label_or_level_ambiguity("L2", axis=0)
s._check_label_or_level_ambiguity("L3", axis=0)
def test_check_label_or_level_ambiguity_series_axis1_error(df):
# Make series with L1 as index
s = df.set_index('L1').L2
with pytest.raises(ValueError, match="No axis named 1"):
s._check_label_or_level_ambiguity('L1', axis=1)
# Test _get_label_or_level_values
# ===============================
def assert_label_values(frame, labels, axis):
for label in labels:
if axis in {0, 'index'}:
expected = frame[label]._values
else:
expected = frame.loc[label]._values
result = frame._get_label_or_level_values(label, axis=axis)
assert array_equivalent(expected, result)
def assert_level_values(frame, levels, axis):
for level in levels:
if axis in {0, "index"}:
expected = frame.index.get_level_values(level=level)._values
else:
expected = frame.columns.get_level_values(level=level)._values
result = frame._get_label_or_level_values(level, axis=axis)
assert array_equivalent(expected, result)
# DataFrame
# ---------
def test_get_label_or_level_values_df_simple(df_levels, axis):
# Compute expected labels and levels
expected_labels, expected_levels = get_labels_levels(df_levels)
# Transpose frame if axis == 1
if axis in {1, 'columns'}:
df_levels = df_levels.T
# Perform checks
assert_label_values(df_levels, expected_labels, axis=axis)
assert_level_values(df_levels, expected_levels, axis=axis)
def test_get_label_or_level_values_df_ambig(df_ambig, axis):
# Transpose frame if axis == 1
if axis in {1, 'columns'}:
df_ambig = df_ambig.T
# df has an on-axis level named L2, and it is not ambiguous.
assert_level_values(df_ambig, ['L2'], axis=axis)
# df has an off-axis label named L3, and it is not ambiguous.
assert_label_values(df_ambig, ['L3'], axis=axis)
def test_get_label_or_level_values_df_duplabels(df_duplabels, axis):
# Transpose frame if axis == 1
if axis in {1, 'columns'}:
df_duplabels = df_duplabels.T
# df has unambiguous level 'L1'
assert_level_values(df_duplabels, ['L1'], axis=axis)
# df has unique label 'L3'
assert_label_values(df_duplabels, ['L3'], axis=axis)
# df has duplicate labels 'L2'
if axis in {0, 'index'}:
expected_msg = "The column label 'L2' is not unique"
else:
expected_msg = "The index label 'L2' is not unique"
with pytest.raises(ValueError, match=expected_msg):
assert_label_values(df_duplabels, ['L2'], axis=axis)
# Series
# ------
def test_get_label_or_level_values_series_axis0(df):
# Make series with L1 as index
s = df.set_index('L1').L2
assert_level_values(s, ['L1'], axis=0)
# Make series with L1 and L2 as index
s = df.set_index(['L1', 'L2']).L3
assert_level_values(s, ['L1', 'L2'], axis=0)
def test_get_label_or_level_values_series_axis1_error(df):
# Make series with L1 as index
s = df.set_index('L1').L2
with pytest.raises(ValueError, match="No axis named 1"):
s._get_label_or_level_values('L1', axis=1)
# Test _drop_labels_or_levels
# ===========================
def assert_labels_dropped(frame, labels, axis):
for label in labels:
df_dropped = frame._drop_labels_or_levels(label, axis=axis)
if axis in {0, 'index'}:
assert label in frame.columns
assert label not in df_dropped.columns
else:
assert label in frame.index
assert label not in df_dropped.index
def assert_levels_dropped(frame, levels, axis):
for level in levels:
df_dropped = frame._drop_labels_or_levels(level, axis=axis)
if axis in {0, 'index'}:
assert level in frame.index.names
assert level not in df_dropped.index.names
else:
assert level in frame.columns.names
assert level not in df_dropped.columns.names
# DataFrame
# ---------
def test_drop_labels_or_levels_df(df_levels, axis):
# Compute expected labels and levels
expected_labels, expected_levels = get_labels_levels(df_levels)
# Transpose frame if axis == 1
if axis in {1, 'columns'}:
df_levels = df_levels.T
# Perform checks
assert_labels_dropped(df_levels, expected_labels, axis=axis)
assert_levels_dropped(df_levels, expected_levels, axis=axis)
with pytest.raises(ValueError, match="not valid labels or levels"):
df_levels._drop_labels_or_levels('L4', axis=axis)
# Series
# ------
def test_drop_labels_or_levels_series(df):
# Make series with L1 as index
s = df.set_index('L1').L2
assert_levels_dropped(s, ['L1'], axis=0)
with pytest.raises(ValueError, match="not valid labels or levels"):
s._drop_labels_or_levels('L4', axis=0)
# Make series with L1 and L2 as index
s = df.set_index(['L1', 'L2']).L3
assert_levels_dropped(s, ['L1', 'L2'], axis=0)
with pytest.raises(ValueError, match="not valid labels or levels"):
s._drop_labels_or_levels('L4', axis=0)
| bsd-3-clause |
lukashermann/pytorch-rl | core/env.py | 1 | 3187 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from copy import deepcopy
from gym.spaces.box import Box
import inspect
import cv2
from sklearn.utils.extmath import cartesian
from utils.helpers import Experience # NOTE: here state0 is always "None"
from utils.helpers import preprocessAtari, rgb2gray, rgb2y, scale, preprocessMujocoRgb, preprocessMujocoRgbd, preprocessMujocoRgbdLow
class Env(object):
def __init__(self, args, env_ind=0):
self.logger = args.logger
self.ind = env_ind # NOTE: for creating multiple environment instances
# general setup
self.mode = args.mode # NOTE: save frames when mode=2
if self.mode == 2:
try:
import scipy.misc
self.imsave = scipy.misc.imsave
except ImportError as e: self.logger.warning("WARNING: scipy.misc not found")
self.img_dir = args.root_dir + "/imgs/"
self.frame_ind = 0
self.seed = args.seed + self.ind # NOTE: so to give a different seed to each instance
self.visualize = args.visualize
if self.visualize:
self.vis = args.vis
self.refs = args.refs
self.win_state1 = "win_state1"
self.env_type = args.env_type
self.game = args.game
self._reset_experience()
self.dof = None # only used in mujoco env
self.enable_mjc_dis = False
self.logger.warning("<-----------------------------------> Env")
self.logger.warning("Creating {" + self.env_type + " | " + self.game + "} w/ Seed: " + str(self.seed))
def _reset_experience(self):
self.exp_state0 = None # NOTE: always None in this module
self.exp_action = None
self.exp_reward = None
self.exp_state1 = None
self.exp_terminal1 = None
def _get_experience(self):
return Experience(state0 = self.exp_state0, # NOTE: here state0 is always None
action = self.exp_action,
reward = self.exp_reward,
state1 = self._preprocessState(self.exp_state1),
terminal1 = self.exp_terminal1)
def _preprocessState(self, state):
raise NotImplementedError("not implemented in base calss")
@property
def state_shape(self):
raise NotImplementedError("not implemented in base calss")
@property
def action_dim(self):
if isinstance(self.env.action_space, Box):
return self.env.action_space.shape[0]
else:
return self.env.action_space.n
def render(self): # render using the original gl window
raise NotImplementedError("not implemented in base calss")
def visual(self): # visualize onto visdom
raise NotImplementedError("not implemented in base calss")
def reset(self):
raise NotImplementedError("not implemented in base calss")
def step(self, action):
raise NotImplementedError("not implemented in base calss")
| mit |
datapythonista/pandas | pandas/tests/tslibs/test_conversion.py | 4 | 3986 | from datetime import datetime
import numpy as np
import pytest
from pytz import UTC
from pandas._libs.tslibs import (
OutOfBoundsTimedelta,
conversion,
iNaT,
timezones,
tzconversion,
)
from pandas import (
Timestamp,
date_range,
)
import pandas._testing as tm
def _compare_utc_to_local(tz_didx):
def f(x):
return tzconversion.tz_convert_from_utc_single(x, tz_didx.tz)
result = tzconversion.tz_convert_from_utc(tz_didx.asi8, tz_didx.tz)
expected = np.vectorize(f)(tz_didx.asi8)
tm.assert_numpy_array_equal(result, expected)
def _compare_local_to_utc(tz_didx, naive_didx):
# Check that tz_localize behaves the same vectorized and pointwise.
err1 = err2 = None
try:
result = tzconversion.tz_localize_to_utc(naive_didx.asi8, tz_didx.tz)
err1 = None
except Exception as err:
err1 = err
try:
expected = naive_didx.map(lambda x: x.tz_localize(tz_didx.tz)).asi8
except Exception as err:
err2 = err
if err1 is not None:
assert type(err1) == type(err2)
else:
assert err2 is None
tm.assert_numpy_array_equal(result, expected)
def test_tz_convert_single_matches_tz_convert_hourly(tz_aware_fixture):
tz = tz_aware_fixture
tz_didx = date_range("2014-03-01", "2015-01-10", freq="H", tz=tz)
naive_didx = date_range("2014-03-01", "2015-01-10", freq="H")
_compare_utc_to_local(tz_didx)
_compare_local_to_utc(tz_didx, naive_didx)
@pytest.mark.parametrize("freq", ["D", "A"])
def test_tz_convert_single_matches_tz_convert(tz_aware_fixture, freq):
tz = tz_aware_fixture
tz_didx = date_range("2000-01-01", "2020-01-01", freq=freq, tz=tz)
naive_didx = date_range("2000-01-01", "2020-01-01", freq=freq)
_compare_utc_to_local(tz_didx)
_compare_local_to_utc(tz_didx, naive_didx)
@pytest.mark.parametrize(
"arr",
[
pytest.param(np.array([], dtype=np.int64), id="empty"),
pytest.param(np.array([iNaT], dtype=np.int64), id="all_nat"),
],
)
def test_tz_convert_corner(arr):
result = tzconversion.tz_convert_from_utc(arr, timezones.maybe_get_tz("Asia/Tokyo"))
tm.assert_numpy_array_equal(result, arr)
def test_tz_convert_readonly():
# GH#35530
arr = np.array([0], dtype=np.int64)
arr.setflags(write=False)
result = tzconversion.tz_convert_from_utc(arr, UTC)
tm.assert_numpy_array_equal(result, arr)
@pytest.mark.parametrize("copy", [True, False])
@pytest.mark.parametrize("dtype", ["M8[ns]", "M8[s]"])
def test_length_zero_copy(dtype, copy):
arr = np.array([], dtype=dtype)
result = conversion.ensure_datetime64ns(arr, copy=copy)
assert result.base is (None if copy else arr)
def test_ensure_datetime64ns_bigendian():
# GH#29684
arr = np.array([np.datetime64(1, "ms")], dtype=">M8[ms]")
result = conversion.ensure_datetime64ns(arr)
expected = np.array([np.datetime64(1, "ms")], dtype="M8[ns]")
tm.assert_numpy_array_equal(result, expected)
def test_ensure_timedelta64ns_overflows():
arr = np.arange(10).astype("m8[Y]") * 100
msg = r"Out of bounds for nanosecond timedelta64\[Y\] 900"
with pytest.raises(OutOfBoundsTimedelta, match=msg):
conversion.ensure_timedelta64ns(arr)
class SubDatetime(datetime):
pass
@pytest.mark.parametrize(
"dt, expected",
[
pytest.param(
Timestamp("2000-01-01"), Timestamp("2000-01-01", tz=UTC), id="timestamp"
),
pytest.param(
datetime(2000, 1, 1), datetime(2000, 1, 1, tzinfo=UTC), id="datetime"
),
pytest.param(
SubDatetime(2000, 1, 1),
SubDatetime(2000, 1, 1, tzinfo=UTC),
id="subclassed_datetime",
),
],
)
def test_localize_pydatetime_dt_types(dt, expected):
# GH 25851
# ensure that subclassed datetime works with
# localize_pydatetime
result = conversion.localize_pydatetime(dt, UTC)
assert result == expected
| bsd-3-clause |
ornlneutronimaging/ResoFit | ResoFit/calibration.py | 1 | 31136 | import ImagingReso._utilities as reso_util
import matplotlib.pyplot as plt
from lmfit import Parameters
from lmfit import minimize
import pandas as pd
from itertools import cycle
import pprint
import ResoFit._utilities as fit_util
from ResoFit._gap_functions import y_gap_for_calibration
# from ResoFit._gap_functions import y_gap_for_adv_calibration
from ResoFit.experiment import Experiment
from ResoFit.simulation import Simulation
_exp_time_offset_us = 5.2
class Calibration(object):
def __init__(self,
# Initialize ResoFit.experiment
spectra_file: str,
data_file: str,
folder: str,
exp_source_to_detector_m, exp_offset_us,
baseline: bool,
baseline_deg: int,
# Initialize ResoFit.simulation
layer: fit_util.Layer,
energy_min, energy_max, energy_step,
database: str,
x_type: str,
y_type: str):
"""
Initialization with passed file location and sample info
:param spectra_file:
:type spectra_file:
:param data_file:
:type data_file:
:param layer: Layer()
:type layer:
:param energy_min:
:type energy_min:
:param energy_max:
:type energy_max:
:param energy_step:
:type energy_step:
:param folder:
:type folder:
:param baseline: True -> to remove baseline/background by detrend
:type baseline: boolean
"""
self.x_type = x_type
self.y_type = y_type
self.energy_min = energy_min
self.energy_max = energy_max
self.energy_step = energy_step
self.simulation = Simulation(energy_min=energy_min,
energy_max=energy_max,
energy_step=energy_step,
database=database)
self.simulation.add_Layer(layer=layer)
self.experiment = Experiment(spectra_file=spectra_file,
data_file=data_file,
folder=folder,
source_to_detector_m=exp_source_to_detector_m,
offset_us=exp_offset_us,
baseline=baseline,
baseline_deg=baseline_deg)
self.experiment.t_start_us = self.experiment.t_start_us + _exp_time_offset_us
self.init_source_to_detector_m = exp_source_to_detector_m
self.init_offset_us = exp_offset_us
self.calibrated_offset_us = None
self.calibrated_source_to_detector_m = None
self.calibrate_result = None
self.params_to_calibrate = None
def calibrate(self, source_to_detector_m=None, offset_us=None, vary='all',
each_step=False):
"""
calibrate the instrumental parameters: source-to-detector-distance & detector delay
:param each_step: boolean. True -> show values and chi^2 of each step
:param source_to_detector_m: estimated distance in m
:param offset_us: estimated time offset in us
:param vary: vary one of or both of 'source_to_detector' and 'offset' to calibrate (default: 'all')
:return: lmfit MinimizerResult
"""
# Overwrite init values if input detected
if source_to_detector_m is None:
source_to_detector_m = self.init_source_to_detector_m
if offset_us is None:
offset_us = self.init_offset_us
vary_type_list = ['source_to_detector', 'offset', 'all', 'none']
if vary not in vary_type_list:
raise ValueError("'vary=' can only be one of '{}'".format(vary_type_list))
simu_x = self.simulation.get_x(x_type='energy', offset_us=offset_us, source_to_detector_m=source_to_detector_m)
simu_y = self.simulation.get_y(y_type='attenuation')
_run = True
if vary == 'all':
source_to_detector_vary_tag = True
offset_vary_tag = True
elif vary == 'source_to_detector':
source_to_detector_vary_tag = True
offset_vary_tag = False
elif vary == 'offset':
source_to_detector_vary_tag = False
offset_vary_tag = True
else: # vary == 'none':
source_to_detector_vary_tag = False
offset_vary_tag = False
_run = False
self.params_to_calibrate = Parameters()
self.params_to_calibrate.add('source_to_detector_m',
value=source_to_detector_m,
vary=source_to_detector_vary_tag)
self.params_to_calibrate.add('offset_us',
value=offset_us,
vary=offset_vary_tag)
# Print before
print("+----------------- Calibration -----------------+\nParams before:")
self.params_to_calibrate.pretty_print()
# Use lmfit to obtain 'source_to_detector_m' & 'offset_us' to minimize 'y_gap_for_calibration'
if _run:
self.calibrate_result = minimize(y_gap_for_calibration,
self.params_to_calibrate,
method='leastsq',
args=(simu_x, simu_y,
self.energy_min, self.energy_max, self.energy_step,
self.experiment, 'energy', 'attenuation', each_step))
# Print after
print("\nParams after:")
self.calibrate_result.__dict__['params'].pretty_print()
# Print chi^2
# self.calibrated_residual = self.calibrate_result.__dict__['residual']
print("Calibration chi^2 : {}\n".format(self.calibrate_result.__dict__['chisqr']))
self.calibrated_offset_us = self.calibrate_result.__dict__['params'].valuesdict()['offset_us']
self.calibrated_source_to_detector_m = \
self.calibrate_result.__dict__['params'].valuesdict()['source_to_detector_m']
return self.calibrate_result
else:
self.calibrated_offset_us = offset_us
self.calibrated_source_to_detector_m = source_to_detector_m
print("\ncalibrate() was not run as requested, input values used:\n"
"calibrated_offset_us = {}\ncalibrated_source_to_detector_m = {}".format(offset_us,
source_to_detector_m))
# self.experiment.xy_scaled(energy_min=self.energy_min,
# energy_max=self.energy_max,
# energy_step=self.energy_step,
# x_type='energy',
# y_type='attenuation',
# offset_us=offset_us,
# source_to_detector_m=source_to_detector_m,
# )
def __find_peak(self, thres, min_dist):
# load detected peak with x in image number
# if self.calibrate_result is None:
if self.calibrated_source_to_detector_m is None or self.calibrated_offset_us is None:
raise ValueError("Instrument params have not been calibrated.")
self.experiment.find_peak(x_type=self.x_type, y_type=self.y_type,
thres=thres, min_dist=min_dist)
# self.experiment.o_peak._scale_peak_df(energy_min=self.energy_min, energy_max=self.energy_max,
# )
return self.experiment.o_peak.peak_dict
def index_peak(self, thres_exp, min_dist_exp, thres_map, min_dist_map, rel_tol, impr_reso=True):
if self.experiment.o_peak is None:
self.__find_peak(thres=thres_exp, min_dist=min_dist_exp)
# find peak map using Simulation.peak_map()
_peak_map_dict = self.simulation.peak_map(thres=thres_map, min_dist=min_dist_map, impr_reso=impr_reso,
x_type=self.x_type, y_type=self.y_type,
offset_us=self.calibrated_offset_us,
source_to_detector_m=self.calibrated_source_to_detector_m,
t_unit=self.experiment.t_unit,
t_start_us=self.experiment.t_start_us,
time_resolution_us=self.experiment.time_resolution_us,
num_offset=self.experiment.img_start)
# pass peak map to Peak()
assert _peak_map_dict['x_type'] == self.experiment.o_peak.peak_dict['x_type']
assert _peak_map_dict['y_type'] == self.experiment.o_peak.peak_dict['y_type']
self.experiment.o_peak.peak_map_full = _peak_map_dict['peak_map']
# index using Peak()
self.experiment.o_peak.index_peak(_peak_map_dict, rel_tol=rel_tol)
# return self.experiment.o_peak.peak_map_indexed
def analyze_peak(self, fit_model, report=False, show_fit=False):
if self.experiment.o_peak is None:
raise AttributeError("Please run 'Calibration.index_peak()' before peak analysis.")
self.experiment.o_peak.analyze(report=report, fit_model=fit_model)
if show_fit:
self.experiment.o_peak.plot_fit()
# def calibrate_peak_pos(self, thres=0.15, min_dist=2, vary='all', each_step=False):
# """
# calibrate the instrumental parameters: source-to-detector-distance & detector delay
# based on peak positions obtained from the instrument parameters after Calibration.calibrate().
#
# :param thres:
# :type thres:
# :param min_dist:
# :type min_dist:
# :param vary: vary one of or both of 'source_to_detector' and 'offset' to calibrate (default: 'all')
# :type vary:
# :param each_step: True -> show values and chi^2 of each step
# :type each_step: boolean.
# :return: calibration result
# :rtype: lmfit MinimizerResult
# """
# if self.peak_map_indexed is None:
# raise ValueError('Calibrate must be run before running advanced calibration.')
# # self.init_source_to_detector_m = source_to_detector_m
# # self.init_offset_us = offset_us
# if vary not in ['source_to_detector', 'offset', 'all', 'none']:
# raise ValueError("'vary=' can only be one of ['source_to_detector', 'offset', 'all' 'none']")
# ideal_x = []
# for _ele in self.peak_map_indexed.keys():
# ideal_x = ideal_x + list(self.peak_map_indexed[_ele]['ideal']['x'])
# sorted(ideal_x)
# print(ideal_x)
#
# source_to_detector_vary_tag = True
# offset_vary_tag = True
# if vary == 'source_to_detector':
# offset_vary_tag = False
# if vary == 'offset':
# source_to_detector_vary_tag = False
# if vary == 'none':
# source_to_detector_vary_tag = False
# offset_vary_tag = False
# self.params_to_calibrate = Parameters()
# self.params_to_calibrate.add('source_to_detector_m',
# value=self.calibrated_source_to_detector_m,
# vary=source_to_detector_vary_tag)
# self.params_to_calibrate.add('offset_us',
# value=self.calibrated_offset_us,
# vary=offset_vary_tag)
# # Print before
# print("-------Calibration(advanced)-------\nParams before:")
# self.params_to_calibrate.pretty_print()
# # Use lmfit to obtain 'source_to_detector_m' & 'offset_us' to minimize 'y_gap_for_calibration'
# self.calibrate_result = minimize(y_gap_for_adv_calibration,
# self.params_to_calibrate,
# method='leastsq',
# args=(ideal_x, thres, min_dist,
# self.experiment, each_step))
# # Print after
# print("Params after:")
# self.calibrate_result.__dict__['params'].pretty_print()
# # Print chi^2
# self.calibrated_residual = self.calibrate_result.__dict__['residual']
# print("Calibration chi^2 : {}\n".format(sum(self.calibrated_residual ** 2)))
# self.calibrated_offset_us = self.calibrate_result.__dict__['params'].valuesdict()['offset_us']
# self.calibrated_source_to_detector_m = \
# self.calibrate_result.__dict__['params'].valuesdict()['source_to_detector_m']
#
# # Save the calibrated experimental x & y in Calibration class
# self.exp_x_raw_calibrated = self.experiment.x_raw(angstrom=False,
# offset_us=self.calibrated_offset_us,
# source_to_detector_m=self.calibrated_source_to_detector_m)
# self.exp_y_raw_calibrated = self.experiment.y_raw(transmission=False, baseline=self.baseline)
#
# self.exp_x_interp_calibrated, self.exp_y_interp_calibrated = self.experiment.xy_scaled(
# energy_min=self.energy_min,
# energy_max=self.energy_max,
# energy_step=self.energy_step,
# offset_us=self.calibrated_offset_us,
# source_to_detector_m=self.calibrated_source_to_detector_m,
# baseline=self.baseline)
#
# return self.calibrate_result
def plot(self, x_type=None, y_type=None, t_unit='us',
index_level='iso', peak_id='indexed', peak_exp='indexed',
peak_height=True,
before=False, interp=False, mixed=False,
logx=True, logy=False, table=True, grid=True, save_fig=False):
""""""
fit_util.check_if_in_list(peak_id, fit_util.peak_type_list)
fit_util.check_if_in_list(peak_exp, fit_util.peak_type_list)
fit_util.check_if_in_list(index_level, fit_util.index_level_list)
if x_type is None:
x_type = self.x_type
if y_type is None:
y_type = self.y_type
old_colors = ['b', 'g', 'r', 'c', 'm', 'y', 'k']
new_colors = ['#1f77b4', '#ff7f0e', '#2ca02c', '#d62728',
'#9467bd', '#8c564b', '#e377c2', '#7f7f7f',
'#bcbd22', '#17becf']
marker_styles = ['o', 'v', '^', '<', '>', '8', 's', 'p', '*', 'h', 'H', 'D', 'd', 'P', 'X']
color_cycle = cycle(new_colors)
# color_cycle_2 = cycle(new_colors)
# color_cycle_3 = cycle(new_colors)
# color_cycle_4 = cycle(new_colors)
style_cycle = cycle(marker_styles)
simu_label = 'Ideal'
exp_label = 'Exp'
exp_before_label = 'Exp_init'
exp_interp_label = 'Exp_interp'
sample_name = ' & '.join(self.simulation.layer_list)
fig_title = "Calibration result of sample ('{}')".format(sample_name)
fig = plt.Figure()
# plot table + graph
if table:
ax1 = plt.subplot2grid(shape=(10, 10), loc=(0, 1), rowspan=8, colspan=8)
# plot graph only
else:
ax1 = plt.subplot(111)
# Plot simulated total signal
if mixed:
_x = self.simulation.get_x(x_type=x_type,
t_unit=t_unit,
offset_us=self.calibrated_offset_us,
source_to_detector_m=self.calibrated_source_to_detector_m,
t_start_us=self.experiment.t_start_us,
time_resolution_us=self.experiment.time_resolution_us,
num_offset=self.experiment.slice_start
)
_y = self.simulation.get_y(y_type=y_type)
ax1.plot(_x, _y, 'b-', label=simu_label, linewidth=1)
"""Plot options"""
# 1.
if before:
# Plot the raw data before fitting
_x_init = self.experiment.get_x(x_type=x_type,
t_unit=t_unit,
offset_us=self.init_offset_us,
source_to_detector_m=self.init_source_to_detector_m,
)
_y_init = self.experiment.get_y(y_type=y_type)
ax1.plot(_x_init,
_y_init,
linestyle='-', linewidth=1,
marker='o', markersize=2,
color='c', label=exp_before_label)
# 2.
if interp:
_exp_x_interp_calibrated, _exp_y_interp_calibrated = self.experiment.xy_scaled(
x_type=x_type,
y_type=y_type,
energy_min=self.energy_min,
energy_max=self.energy_max,
energy_step=self.energy_step,
t_unit=t_unit,
offset_us=self.calibrated_offset_us,
source_to_detector_m=self.calibrated_source_to_detector_m,
)
# plot the interpolated raw data
ax1.plot(_exp_x_interp_calibrated,
_exp_y_interp_calibrated,
'r:', label=exp_interp_label, linewidth=1)
else:
# plot the calibrated raw data
_x_cali = self.experiment.get_x(x_type=x_type,
t_unit=t_unit,
offset_us=self.calibrated_offset_us,
source_to_detector_m=self.calibrated_source_to_detector_m)
_y_cali = self.experiment.get_y(y_type=y_type)
ax1.plot(_x_cali,
_y_cali,
linestyle='-', linewidth=1,
marker='o', markersize=2,
color='r', label=exp_label)
if peak_exp == 'all':
# _peak_x_exp = fit_util.convert_exp_peak_df(x_type=x_type, peak_df=_peak_df_scaled, t_unit=t_unit)
_peak_df_scaled = self.experiment.o_peak.peak_dict['df']
_peak_x_exp = _peak_df_scaled['x']
# if x_type == 'time':
# _peak_x_exp = fit_util.convert_s(x=_peak_x_exp, t_unit=t_unit)
# _peak_y_exp = fit_util.convert_attenuation_to(y_type=y_type, y=_peak_df_scaled['y'])
_peak_y_exp = _peak_df_scaled['y']
ax1.scatter(_peak_x_exp,
_peak_y_exp,
c='k',
marker='x',
# s=30,
# marker='o',
# facecolors='none',
# edgecolors='k',
label='_nolegend_')
# plot peaks detected and indexed
if self.experiment.o_peak is not None:
if self.experiment.o_peak.peak_map_indexed_dict is not None:
if y_type == 'transmission':
_start_point = 1
ax1.set_ylim(top=1.1, bottom=-0.01)
_pos = 1.05
else:
_start_point = 0
ax1.set_ylim(top=1.01, bottom=-0.1)
_pos = -0.05
_peak_map_indexed = self.experiment.o_peak.peak_map_indexed_dict['peak_map_indexed']
_peak_map_full = self.experiment.o_peak.peak_map_full
if index_level == 'iso':
_peak_name_list = [_name for _name in _peak_map_indexed.keys() if '-' in _name]
else:
_peak_name_list = [_name for _name in _peak_map_indexed.keys() if '-' not in _name]
if peak_id == 'all':
_current_peak_map = _peak_map_full
# _tag = 'ideal'
else: # peak_id == 'indexed'
_current_peak_map = _peak_map_indexed
_tag = 'ideal'
for _peak_name in _peak_name_list:
if len(_current_peak_map[_peak_name][_tag]) > 0:
_peak_x = _current_peak_map[_peak_name][_tag]['x']
_peak_y = _current_peak_map[_peak_name][_tag]['y']
if peak_exp == 'indexed':
_legend_name = '_nolegend_'
else:
_legend_name = _peak_name
_current_color = next(color_cycle)
_current_style = next(style_cycle)
ax1.plot(_peak_x,
[_pos] * len(_peak_x),
'|', ms=10,
color=_current_color,
label=_legend_name)
if peak_height:
ax1.plot(_peak_x,
_peak_y,
'_',
# marker=next(style_cycle_1),
# ms=4,
color=_current_color,
label='_nolegend_')
ax1.vlines(_peak_x,
_start_point,
_peak_y,
color=_current_color,
alpha=1,
label='_nolegend_')
if peak_exp == 'indexed':
_peak_x_exp = _peak_map_indexed[_peak_name]['exp']['x']
_peak_y_exp = _peak_map_indexed[_peak_name]['exp']['y']
ax1.scatter(_peak_x_exp,
_peak_y_exp,
marker=_current_style,
# ms=4,
color=_current_color,
label=_peak_name)
if 'peak_span' in _peak_map_indexed[_peak_name].keys():
if len(_peak_map_indexed[_peak_name]['exp']) > 0:
_data_point_x = _peak_map_indexed[_peak_name]['peak_span']['x']
_data_point_y = _peak_map_indexed[_peak_name]['peak_span']['y']
ax1.scatter(_data_point_x,
_data_point_y,
label='_nolegend_')
# Set plot limit and captions
ax1 = fit_util.set_plt(ax1, fig_title=fig_title, grid=grid,
x_type=x_type, y_type=y_type, t_unit=t_unit,
logx=logx, logy=logy)
# Plot table
if table:
# ax2 = plt.subplot2grid(shape=(10, 7), loc=(0, 1), rowspan=4, colspan=5)
# ax2.axis('off')
# columns = list(self.calibrate_result.__dict__['params'].valuesdict().keys())
columns_to_show = [r'$L$ (m)', r'$\Delta t$ ($\rm{\mu}$s)']
rows = ['Before', 'After']
_row_before = [self.init_source_to_detector_m, self.init_offset_us]
_row_after = [self.calibrated_source_to_detector_m, self.calibrated_offset_us]
# for _each in columns:
# _row_after.append(self.calibrate_result.__dict__['params'].valuesdict()[_each])
# _row_before.append(self.params_to_calibrate.valuesdict()[_each])
table = ax1.table(rowLabels=rows, colLabels=columns_to_show, # colWidths=
cellText=[_row_before, _row_after], # rows of data values
bbox=[0, -0.33, 1.0, 0.18] # [left,bottom,width,height]
)
# table.scale(0.5, 1)
table.auto_set_font_size(False)
table.set_fontsize(10)
plt.tight_layout()
if save_fig:
_sample_name = '_'.join(self.simulation.layer_list)
_filename = 'calibration_' + _sample_name + '.png'
plt.savefig(_filename, dpi=600, transparent=True)
plt.close()
return ax1
def export(self, x_type='energy', y_type='attenuation', t_unit='us',
index_level='iso', peak_id='indexed',
before=False, interp=False, mixed=True):
simu_label = 'ideal'
exp_label = 'exp_raw'
exp_before_label = 'exp_init'
exp_interp_label = 'exp_interp'
_df = pd.DataFrame()
_col_suffix = fit_util.get_df_col_name(x_type=x_type)
# Simulated total signal
if mixed:
_x = self.simulation.get_x(x_type=x_type,
t_unit=t_unit,
offset_us=self.calibrated_offset_us,
source_to_detector_m=self.calibrated_source_to_detector_m,
t_start_us=self.experiment.t_start_us,
time_resolution_us=self.experiment.time_resolution_us)
_y = self.simulation.get_y(y_type=y_type)
_df['x_' + simu_label] = _x
_df['y_' + simu_label] = _y
"""Plot options"""
# Raw data before fitting
if before:
_x_init = self.experiment.get_x(x_type=x_type,
t_unit=t_unit,
offset_us=self.init_offset_us,
source_to_detector_m=self.init_source_to_detector_m)
_y_init = self.experiment.get_y(y_type=y_type,
baseline=self.baseline)
_df['x_' + exp_before_label] = _x_init
_df['y_' + exp_before_label] = _y_init
# 2.
if interp:
_exp_x_interp_calibrated, _exp_y_interp_calibrated = self.experiment.xy_scaled(
x_type=x_type,
y_type=y_type,
energy_min=self.energy_min,
energy_max=self.energy_max,
energy_step=self.energy_step,
t_unit=t_unit,
offset_us=self.calibrated_offset_us,
source_to_detector_m=self.calibrated_source_to_detector_m,
baseline=self.baseline)
# Interpolated raw data
_df['x_' + exp_interp_label + _col_suffix] = _exp_x_interp_calibrated
_df['y_' + exp_interp_label] = _exp_y_interp_calibrated
else:
# plot the calibrated raw data
_x_cali = self.experiment.get_x(x_type=x_type,
t_unit=t_unit,
offset_us=self.calibrated_offset_us,
source_to_detector_m=self.calibrated_source_to_detector_m)
_y_cali = self.experiment.get_y(y_type=y_type,
baseline=self.baseline)
_df['x_' + exp_label + _col_suffix] = pd.Series(_x_cali)
_df['y_' + exp_label] = pd.Series(_y_cali)
# plot peaks detected and indexed
if self.experiment.o_peak and self.experiment.o_peak.peak_map_indexed is not None:
_peak_df_scaled = self.experiment.o_peak.peak_df_scaled
_peak_map_indexed = self.experiment.o_peak.peak_map_indexed
_peak_map_full = self.experiment.o_peak.peak_map_full
_x_peak_exp_all = fit_util.convert_exp_peak_df(x_type=x_type, peak_df=_peak_df_scaled, t_unit=t_unit)
_y_peak_exp_all = fit_util.convert_attenuation_to(y_type=y_type, y=_peak_df_scaled['y'])
# _df = pd.concat([_df, _peak_df_scaled], axis=1)
_df['x_peak_exp_all'] = pd.Series(_x_peak_exp_all)
_df['y_peak_exp_all'] = pd.Series(_y_peak_exp_all)
x_tag = fit_util.get_peak_tag(x_type=x_type)
for _peak_name in _peak_map_indexed.keys():
if len(_peak_map_full[_peak_name]['ideal']) > 0:
_x_peak_ideal_all = _peak_map_full[_peak_name]['ideal'][x_tag]
_y_peak_ideal_all = _peak_map_full[_peak_name]['ideal']['y']
_df['x_peak_ideal_all(' + _peak_name + ')'] = _x_peak_ideal_all
_df['y_peak_ideal_all(' + _peak_name + ')'] = _y_peak_ideal_all
if len(_peak_map_indexed[_peak_name]['ideal']) > 0:
_x_peak_ideal_indexed = _peak_map_indexed[_peak_name]['ideal'][x_tag]
_y_peak_ideal_indexed = _peak_map_indexed[_peak_name]['ideal']['y']
_x_peak_exp_indexed = _peak_map_indexed[_peak_name]['exp'][x_tag]
_y_peak_exp_indexed = _peak_map_indexed[_peak_name]['exp']['y']
_df['x_peak_exp(' + _peak_name + ')'] = _x_peak_exp_indexed
_df['y_peak_exp(' + _peak_name + ')'] = _y_peak_exp_indexed
_df['x_peak_ideal(' + _peak_name + ')'] = _x_peak_ideal_indexed
_df['y_peak_ideal(' + _peak_name + ')'] = _y_peak_ideal_indexed
_df.to_clipboard(index=False)
return _df
# def export_simu(self, filename=None, x_axis='energy', y_axis='attenuation',
# all_layers=False, all_elements=False, all_isotopes=False, items_to_export=None,
# t_start_us=1, time_resolution_us=0.16, time_unit='us'):
# if items_to_export is not None:
# # Shape items
# items = fit_util.Items(o_reso=self.simulation.o_reso, database=self.database)
# items_to_export = items.shaped(items_list=items_to_export)
#
# self.simulation._export(filename=filename,
# x_axis=x_axis,
# y_axis=y_axis,
# all_layers=all_layers,
# all_elements=all_elements,
# all_isotopes=all_isotopes,
# items_to_export=items_to_export,
# offset_us=self.calibrated_offset_us,
# source_to_detector_m=self.calibrated_source_to_detector_m,
# t_start_us=t_start_us,
# time_resolution_us=time_resolution_us,
# time_unit=time_unit)
| bsd-3-clause |
ephes/scikit-learn | doc/sphinxext/gen_rst.py | 142 | 40026 | """
Example generation for the scikit learn
Generate the rst files for the examples by iterating over the python
example files.
Files that generate images should start with 'plot'
"""
from __future__ import division, print_function
from time import time
import ast
import os
import re
import shutil
import traceback
import glob
import sys
import gzip
import posixpath
import subprocess
import warnings
from sklearn.externals import six
# Try Python 2 first, otherwise load from Python 3
try:
from StringIO import StringIO
import cPickle as pickle
import urllib2 as urllib
from urllib2 import HTTPError, URLError
except ImportError:
from io import StringIO
import pickle
import urllib.request
import urllib.error
import urllib.parse
from urllib.error import HTTPError, URLError
try:
# Python 2 built-in
execfile
except NameError:
def execfile(filename, global_vars=None, local_vars=None):
with open(filename, encoding='utf-8') as f:
code = compile(f.read(), filename, 'exec')
exec(code, global_vars, local_vars)
try:
basestring
except NameError:
basestring = str
import token
import tokenize
import numpy as np
try:
# make sure that the Agg backend is set before importing any
# matplotlib
import matplotlib
matplotlib.use('Agg')
except ImportError:
# this script can be imported by nosetest to find tests to run: we should not
# impose the matplotlib requirement in that case.
pass
from sklearn.externals import joblib
###############################################################################
# A tee object to redict streams to multiple outputs
class Tee(object):
def __init__(self, file1, file2):
self.file1 = file1
self.file2 = file2
def write(self, data):
self.file1.write(data)
self.file2.write(data)
def flush(self):
self.file1.flush()
self.file2.flush()
###############################################################################
# Documentation link resolver objects
def _get_data(url):
"""Helper function to get data over http or from a local file"""
if url.startswith('http://'):
# Try Python 2, use Python 3 on exception
try:
resp = urllib.urlopen(url)
encoding = resp.headers.dict.get('content-encoding', 'plain')
except AttributeError:
resp = urllib.request.urlopen(url)
encoding = resp.headers.get('content-encoding', 'plain')
data = resp.read()
if encoding == 'plain':
pass
elif encoding == 'gzip':
data = StringIO(data)
data = gzip.GzipFile(fileobj=data).read()
else:
raise RuntimeError('unknown encoding')
else:
with open(url, 'r') as fid:
data = fid.read()
fid.close()
return data
mem = joblib.Memory(cachedir='_build')
get_data = mem.cache(_get_data)
def parse_sphinx_searchindex(searchindex):
"""Parse a Sphinx search index
Parameters
----------
searchindex : str
The Sphinx search index (contents of searchindex.js)
Returns
-------
filenames : list of str
The file names parsed from the search index.
objects : dict
The objects parsed from the search index.
"""
def _select_block(str_in, start_tag, end_tag):
"""Select first block delimited by start_tag and end_tag"""
start_pos = str_in.find(start_tag)
if start_pos < 0:
raise ValueError('start_tag not found')
depth = 0
for pos in range(start_pos, len(str_in)):
if str_in[pos] == start_tag:
depth += 1
elif str_in[pos] == end_tag:
depth -= 1
if depth == 0:
break
sel = str_in[start_pos + 1:pos]
return sel
def _parse_dict_recursive(dict_str):
"""Parse a dictionary from the search index"""
dict_out = dict()
pos_last = 0
pos = dict_str.find(':')
while pos >= 0:
key = dict_str[pos_last:pos]
if dict_str[pos + 1] == '[':
# value is a list
pos_tmp = dict_str.find(']', pos + 1)
if pos_tmp < 0:
raise RuntimeError('error when parsing dict')
value = dict_str[pos + 2: pos_tmp].split(',')
# try to convert elements to int
for i in range(len(value)):
try:
value[i] = int(value[i])
except ValueError:
pass
elif dict_str[pos + 1] == '{':
# value is another dictionary
subdict_str = _select_block(dict_str[pos:], '{', '}')
value = _parse_dict_recursive(subdict_str)
pos_tmp = pos + len(subdict_str)
else:
raise ValueError('error when parsing dict: unknown elem')
key = key.strip('"')
if len(key) > 0:
dict_out[key] = value
pos_last = dict_str.find(',', pos_tmp)
if pos_last < 0:
break
pos_last += 1
pos = dict_str.find(':', pos_last)
return dict_out
# Make sure searchindex uses UTF-8 encoding
if hasattr(searchindex, 'decode'):
searchindex = searchindex.decode('UTF-8')
# parse objects
query = 'objects:'
pos = searchindex.find(query)
if pos < 0:
raise ValueError('"objects:" not found in search index')
sel = _select_block(searchindex[pos:], '{', '}')
objects = _parse_dict_recursive(sel)
# parse filenames
query = 'filenames:'
pos = searchindex.find(query)
if pos < 0:
raise ValueError('"filenames:" not found in search index')
filenames = searchindex[pos + len(query) + 1:]
filenames = filenames[:filenames.find(']')]
filenames = [f.strip('"') for f in filenames.split(',')]
return filenames, objects
class SphinxDocLinkResolver(object):
""" Resolve documentation links using searchindex.js generated by Sphinx
Parameters
----------
doc_url : str
The base URL of the project website.
searchindex : str
Filename of searchindex, relative to doc_url.
extra_modules_test : list of str
List of extra module names to test.
relative : bool
Return relative links (only useful for links to documentation of this
package).
"""
def __init__(self, doc_url, searchindex='searchindex.js',
extra_modules_test=None, relative=False):
self.doc_url = doc_url
self.relative = relative
self._link_cache = {}
self.extra_modules_test = extra_modules_test
self._page_cache = {}
if doc_url.startswith('http://'):
if relative:
raise ValueError('Relative links are only supported for local '
'URLs (doc_url cannot start with "http://)"')
searchindex_url = doc_url + '/' + searchindex
else:
searchindex_url = os.path.join(doc_url, searchindex)
# detect if we are using relative links on a Windows system
if os.name.lower() == 'nt' and not doc_url.startswith('http://'):
if not relative:
raise ValueError('You have to use relative=True for the local'
' package on a Windows system.')
self._is_windows = True
else:
self._is_windows = False
# download and initialize the search index
sindex = get_data(searchindex_url)
filenames, objects = parse_sphinx_searchindex(sindex)
self._searchindex = dict(filenames=filenames, objects=objects)
def _get_link(self, cobj):
"""Get a valid link, False if not found"""
fname_idx = None
full_name = cobj['module_short'] + '.' + cobj['name']
if full_name in self._searchindex['objects']:
value = self._searchindex['objects'][full_name]
if isinstance(value, dict):
value = value[next(iter(value.keys()))]
fname_idx = value[0]
elif cobj['module_short'] in self._searchindex['objects']:
value = self._searchindex['objects'][cobj['module_short']]
if cobj['name'] in value.keys():
fname_idx = value[cobj['name']][0]
if fname_idx is not None:
fname = self._searchindex['filenames'][fname_idx] + '.html'
if self._is_windows:
fname = fname.replace('/', '\\')
link = os.path.join(self.doc_url, fname)
else:
link = posixpath.join(self.doc_url, fname)
if hasattr(link, 'decode'):
link = link.decode('utf-8', 'replace')
if link in self._page_cache:
html = self._page_cache[link]
else:
html = get_data(link)
self._page_cache[link] = html
# test if cobj appears in page
comb_names = [cobj['module_short'] + '.' + cobj['name']]
if self.extra_modules_test is not None:
for mod in self.extra_modules_test:
comb_names.append(mod + '.' + cobj['name'])
url = False
if hasattr(html, 'decode'):
# Decode bytes under Python 3
html = html.decode('utf-8', 'replace')
for comb_name in comb_names:
if hasattr(comb_name, 'decode'):
# Decode bytes under Python 3
comb_name = comb_name.decode('utf-8', 'replace')
if comb_name in html:
url = link + u'#' + comb_name
link = url
else:
link = False
return link
def resolve(self, cobj, this_url):
"""Resolve the link to the documentation, returns None if not found
Parameters
----------
cobj : dict
Dict with information about the "code object" for which we are
resolving a link.
cobi['name'] : function or class name (str)
cobj['module_short'] : shortened module name (str)
cobj['module'] : module name (str)
this_url: str
URL of the current page. Needed to construct relative URLs
(only used if relative=True in constructor).
Returns
-------
link : str | None
The link (URL) to the documentation.
"""
full_name = cobj['module_short'] + '.' + cobj['name']
link = self._link_cache.get(full_name, None)
if link is None:
# we don't have it cached
link = self._get_link(cobj)
# cache it for the future
self._link_cache[full_name] = link
if link is False or link is None:
# failed to resolve
return None
if self.relative:
link = os.path.relpath(link, start=this_url)
if self._is_windows:
# replace '\' with '/' so it on the web
link = link.replace('\\', '/')
# for some reason, the relative link goes one directory too high up
link = link[3:]
return link
###############################################################################
rst_template = """
.. _example_%(short_fname)s:
%(docstring)s
**Python source code:** :download:`%(fname)s <%(fname)s>`
.. literalinclude:: %(fname)s
:lines: %(end_row)s-
"""
plot_rst_template = """
.. _example_%(short_fname)s:
%(docstring)s
%(image_list)s
%(stdout)s
**Python source code:** :download:`%(fname)s <%(fname)s>`
.. literalinclude:: %(fname)s
:lines: %(end_row)s-
**Total running time of the example:** %(time_elapsed) .2f seconds
(%(time_m) .0f minutes %(time_s) .2f seconds)
"""
# The following strings are used when we have several pictures: we use
# an html div tag that our CSS uses to turn the lists into horizontal
# lists.
HLIST_HEADER = """
.. rst-class:: horizontal
"""
HLIST_IMAGE_TEMPLATE = """
*
.. image:: images/%s
:scale: 47
"""
SINGLE_IMAGE = """
.. image:: images/%s
:align: center
"""
# The following dictionary contains the information used to create the
# thumbnails for the front page of the scikit-learn home page.
# key: first image in set
# values: (number of plot in set, height of thumbnail)
carousel_thumbs = {'plot_classifier_comparison_001.png': (1, 600),
'plot_outlier_detection_001.png': (3, 372),
'plot_gp_regression_001.png': (2, 250),
'plot_adaboost_twoclass_001.png': (1, 372),
'plot_compare_methods_001.png': (1, 349)}
def extract_docstring(filename, ignore_heading=False):
""" Extract a module-level docstring, if any
"""
if six.PY2:
lines = open(filename).readlines()
else:
lines = open(filename, encoding='utf-8').readlines()
start_row = 0
if lines[0].startswith('#!'):
lines.pop(0)
start_row = 1
docstring = ''
first_par = ''
line_iterator = iter(lines)
tokens = tokenize.generate_tokens(lambda: next(line_iterator))
for tok_type, tok_content, _, (erow, _), _ in tokens:
tok_type = token.tok_name[tok_type]
if tok_type in ('NEWLINE', 'COMMENT', 'NL', 'INDENT', 'DEDENT'):
continue
elif tok_type == 'STRING':
docstring = eval(tok_content)
# If the docstring is formatted with several paragraphs, extract
# the first one:
paragraphs = '\n'.join(
line.rstrip() for line
in docstring.split('\n')).split('\n\n')
if paragraphs:
if ignore_heading:
if len(paragraphs) > 1:
first_par = re.sub('\n', ' ', paragraphs[1])
first_par = ((first_par[:95] + '...')
if len(first_par) > 95 else first_par)
else:
raise ValueError("Docstring not found by gallery.\n"
"Please check the layout of your"
" example file:\n {}\n and make sure"
" it's correct".format(filename))
else:
first_par = paragraphs[0]
break
return docstring, first_par, erow + 1 + start_row
def generate_example_rst(app):
""" Generate the list of examples, as well as the contents of
examples.
"""
root_dir = os.path.join(app.builder.srcdir, 'auto_examples')
example_dir = os.path.abspath(os.path.join(app.builder.srcdir, '..',
'examples'))
generated_dir = os.path.abspath(os.path.join(app.builder.srcdir,
'modules', 'generated'))
try:
plot_gallery = eval(app.builder.config.plot_gallery)
except TypeError:
plot_gallery = bool(app.builder.config.plot_gallery)
if not os.path.exists(example_dir):
os.makedirs(example_dir)
if not os.path.exists(root_dir):
os.makedirs(root_dir)
if not os.path.exists(generated_dir):
os.makedirs(generated_dir)
# we create an index.rst with all examples
fhindex = open(os.path.join(root_dir, 'index.rst'), 'w')
# Note: The sidebar button has been removed from the examples page for now
# due to how it messes up the layout. Will be fixed at a later point
fhindex.write("""\
.. raw:: html
<style type="text/css">
div#sidebarbutton {
/* hide the sidebar collapser, while ensuring vertical arrangement */
display: none;
}
</style>
.. _examples-index:
Examples
========
""")
# Here we don't use an os.walk, but we recurse only twice: flat is
# better than nested.
seen_backrefs = set()
generate_dir_rst('.', fhindex, example_dir, root_dir, plot_gallery, seen_backrefs)
for directory in sorted(os.listdir(example_dir)):
if os.path.isdir(os.path.join(example_dir, directory)):
generate_dir_rst(directory, fhindex, example_dir, root_dir, plot_gallery, seen_backrefs)
fhindex.flush()
def extract_line_count(filename, target_dir):
# Extract the line count of a file
example_file = os.path.join(target_dir, filename)
if six.PY2:
lines = open(example_file).readlines()
else:
lines = open(example_file, encoding='utf-8').readlines()
start_row = 0
if lines and lines[0].startswith('#!'):
lines.pop(0)
start_row = 1
line_iterator = iter(lines)
tokens = tokenize.generate_tokens(lambda: next(line_iterator))
check_docstring = True
erow_docstring = 0
for tok_type, _, _, (erow, _), _ in tokens:
tok_type = token.tok_name[tok_type]
if tok_type in ('NEWLINE', 'COMMENT', 'NL', 'INDENT', 'DEDENT'):
continue
elif (tok_type == 'STRING') and check_docstring:
erow_docstring = erow
check_docstring = False
return erow_docstring+1+start_row, erow+1+start_row
def line_count_sort(file_list, target_dir):
# Sort the list of examples by line-count
new_list = [x for x in file_list if x.endswith('.py')]
unsorted = np.zeros(shape=(len(new_list), 2))
unsorted = unsorted.astype(np.object)
for count, exmpl in enumerate(new_list):
docstr_lines, total_lines = extract_line_count(exmpl, target_dir)
unsorted[count][1] = total_lines - docstr_lines
unsorted[count][0] = exmpl
index = np.lexsort((unsorted[:, 0].astype(np.str),
unsorted[:, 1].astype(np.float)))
if not len(unsorted):
return []
return np.array(unsorted[index][:, 0]).tolist()
def _thumbnail_div(subdir, full_dir, fname, snippet):
"""Generates RST to place a thumbnail in a gallery"""
thumb = os.path.join(full_dir, 'images', 'thumb', fname[:-3] + '.png')
link_name = os.path.join(full_dir, fname).replace(os.path.sep, '_')
ref_name = os.path.join(subdir, fname).replace(os.path.sep, '_')
if ref_name.startswith('._'):
ref_name = ref_name[2:]
out = []
out.append("""
.. raw:: html
<div class="thumbnailContainer" tooltip="{}">
""".format(snippet))
out.append('.. figure:: %s\n' % thumb)
if link_name.startswith('._'):
link_name = link_name[2:]
if full_dir != '.':
out.append(' :target: ./%s/%s.html\n\n' % (full_dir, fname[:-3]))
else:
out.append(' :target: ./%s.html\n\n' % link_name[:-3])
out.append(""" :ref:`example_%s`
.. raw:: html
</div>
""" % (ref_name))
return ''.join(out)
def generate_dir_rst(directory, fhindex, example_dir, root_dir, plot_gallery, seen_backrefs):
""" Generate the rst file for an example directory.
"""
if not directory == '.':
target_dir = os.path.join(root_dir, directory)
src_dir = os.path.join(example_dir, directory)
else:
target_dir = root_dir
src_dir = example_dir
if not os.path.exists(os.path.join(src_dir, 'README.txt')):
raise ValueError('Example directory %s does not have a README.txt' %
src_dir)
fhindex.write("""
%s
""" % open(os.path.join(src_dir, 'README.txt')).read())
if not os.path.exists(target_dir):
os.makedirs(target_dir)
sorted_listdir = line_count_sort(os.listdir(src_dir),
src_dir)
if not os.path.exists(os.path.join(directory, 'images', 'thumb')):
os.makedirs(os.path.join(directory, 'images', 'thumb'))
for fname in sorted_listdir:
if fname.endswith('py'):
backrefs = generate_file_rst(fname, target_dir, src_dir, root_dir, plot_gallery)
new_fname = os.path.join(src_dir, fname)
_, snippet, _ = extract_docstring(new_fname, True)
fhindex.write(_thumbnail_div(directory, directory, fname, snippet))
fhindex.write("""
.. toctree::
:hidden:
%s/%s
""" % (directory, fname[:-3]))
for backref in backrefs:
include_path = os.path.join(root_dir, '../modules/generated/%s.examples' % backref)
seen = backref in seen_backrefs
with open(include_path, 'a' if seen else 'w') as ex_file:
if not seen:
# heading
print(file=ex_file)
print('Examples using ``%s``' % backref, file=ex_file)
print('-----------------%s--' % ('-' * len(backref)),
file=ex_file)
print(file=ex_file)
rel_dir = os.path.join('../../auto_examples', directory)
ex_file.write(_thumbnail_div(directory, rel_dir, fname, snippet))
seen_backrefs.add(backref)
fhindex.write("""
.. raw:: html
<div class="clearer"></div>
""") # clear at the end of the section
# modules for which we embed links into example code
DOCMODULES = ['sklearn', 'matplotlib', 'numpy', 'scipy']
def make_thumbnail(in_fname, out_fname, width, height):
"""Make a thumbnail with the same aspect ratio centered in an
image with a given width and height
"""
# local import to avoid testing dependency on PIL:
try:
from PIL import Image
except ImportError:
import Image
img = Image.open(in_fname)
width_in, height_in = img.size
scale_w = width / float(width_in)
scale_h = height / float(height_in)
if height_in * scale_w <= height:
scale = scale_w
else:
scale = scale_h
width_sc = int(round(scale * width_in))
height_sc = int(round(scale * height_in))
# resize the image
img.thumbnail((width_sc, height_sc), Image.ANTIALIAS)
# insert centered
thumb = Image.new('RGB', (width, height), (255, 255, 255))
pos_insert = ((width - width_sc) // 2, (height - height_sc) // 2)
thumb.paste(img, pos_insert)
thumb.save(out_fname)
# Use optipng to perform lossless compression on the resized image if
# software is installed
if os.environ.get('SKLEARN_DOC_OPTIPNG', False):
try:
subprocess.call(["optipng", "-quiet", "-o", "9", out_fname])
except Exception:
warnings.warn('Install optipng to reduce the size of the generated images')
def get_short_module_name(module_name, obj_name):
""" Get the shortest possible module name """
parts = module_name.split('.')
short_name = module_name
for i in range(len(parts) - 1, 0, -1):
short_name = '.'.join(parts[:i])
try:
exec('from %s import %s' % (short_name, obj_name))
except ImportError:
# get the last working module name
short_name = '.'.join(parts[:(i + 1)])
break
return short_name
class NameFinder(ast.NodeVisitor):
"""Finds the longest form of variable names and their imports in code
Only retains names from imported modules.
"""
def __init__(self):
super(NameFinder, self).__init__()
self.imported_names = {}
self.accessed_names = set()
def visit_Import(self, node, prefix=''):
for alias in node.names:
local_name = alias.asname or alias.name
self.imported_names[local_name] = prefix + alias.name
def visit_ImportFrom(self, node):
self.visit_Import(node, node.module + '.')
def visit_Name(self, node):
self.accessed_names.add(node.id)
def visit_Attribute(self, node):
attrs = []
while isinstance(node, ast.Attribute):
attrs.append(node.attr)
node = node.value
if isinstance(node, ast.Name):
# This is a.b, not e.g. a().b
attrs.append(node.id)
self.accessed_names.add('.'.join(reversed(attrs)))
else:
# need to get a in a().b
self.visit(node)
def get_mapping(self):
for name in self.accessed_names:
local_name = name.split('.', 1)[0]
remainder = name[len(local_name):]
if local_name in self.imported_names:
# Join import path to relative path
full_name = self.imported_names[local_name] + remainder
yield name, full_name
def identify_names(code):
"""Builds a codeobj summary by identifying and resovles used names
>>> code = '''
... from a.b import c
... import d as e
... print(c)
... e.HelloWorld().f.g
... '''
>>> for name, o in sorted(identify_names(code).items()):
... print(name, o['name'], o['module'], o['module_short'])
c c a.b a.b
e.HelloWorld HelloWorld d d
"""
finder = NameFinder()
finder.visit(ast.parse(code))
example_code_obj = {}
for name, full_name in finder.get_mapping():
# name is as written in file (e.g. np.asarray)
# full_name includes resolved import path (e.g. numpy.asarray)
module, attribute = full_name.rsplit('.', 1)
# get shortened module name
module_short = get_short_module_name(module, attribute)
cobj = {'name': attribute, 'module': module,
'module_short': module_short}
example_code_obj[name] = cobj
return example_code_obj
def generate_file_rst(fname, target_dir, src_dir, root_dir, plot_gallery):
""" Generate the rst file for a given example.
Returns the set of sklearn functions/classes imported in the example.
"""
base_image_name = os.path.splitext(fname)[0]
image_fname = '%s_%%03d.png' % base_image_name
this_template = rst_template
last_dir = os.path.split(src_dir)[-1]
# to avoid leading . in file names, and wrong names in links
if last_dir == '.' or last_dir == 'examples':
last_dir = ''
else:
last_dir += '_'
short_fname = last_dir + fname
src_file = os.path.join(src_dir, fname)
example_file = os.path.join(target_dir, fname)
shutil.copyfile(src_file, example_file)
# The following is a list containing all the figure names
figure_list = []
image_dir = os.path.join(target_dir, 'images')
thumb_dir = os.path.join(image_dir, 'thumb')
if not os.path.exists(image_dir):
os.makedirs(image_dir)
if not os.path.exists(thumb_dir):
os.makedirs(thumb_dir)
image_path = os.path.join(image_dir, image_fname)
stdout_path = os.path.join(image_dir,
'stdout_%s.txt' % base_image_name)
time_path = os.path.join(image_dir,
'time_%s.txt' % base_image_name)
thumb_file = os.path.join(thumb_dir, base_image_name + '.png')
time_elapsed = 0
if plot_gallery and fname.startswith('plot'):
# generate the plot as png image if file name
# starts with plot and if it is more recent than an
# existing image.
first_image_file = image_path % 1
if os.path.exists(stdout_path):
stdout = open(stdout_path).read()
else:
stdout = ''
if os.path.exists(time_path):
time_elapsed = float(open(time_path).read())
if not os.path.exists(first_image_file) or \
os.stat(first_image_file).st_mtime <= os.stat(src_file).st_mtime:
# We need to execute the code
print('plotting %s' % fname)
t0 = time()
import matplotlib.pyplot as plt
plt.close('all')
cwd = os.getcwd()
try:
# First CD in the original example dir, so that any file
# created by the example get created in this directory
orig_stdout = sys.stdout
os.chdir(os.path.dirname(src_file))
my_buffer = StringIO()
my_stdout = Tee(sys.stdout, my_buffer)
sys.stdout = my_stdout
my_globals = {'pl': plt}
execfile(os.path.basename(src_file), my_globals)
time_elapsed = time() - t0
sys.stdout = orig_stdout
my_stdout = my_buffer.getvalue()
if '__doc__' in my_globals:
# The __doc__ is often printed in the example, we
# don't with to echo it
my_stdout = my_stdout.replace(
my_globals['__doc__'],
'')
my_stdout = my_stdout.strip().expandtabs()
if my_stdout:
stdout = '**Script output**::\n\n %s\n\n' % (
'\n '.join(my_stdout.split('\n')))
open(stdout_path, 'w').write(stdout)
open(time_path, 'w').write('%f' % time_elapsed)
os.chdir(cwd)
# In order to save every figure we have two solutions :
# * iterate from 1 to infinity and call plt.fignum_exists(n)
# (this requires the figures to be numbered
# incrementally: 1, 2, 3 and not 1, 2, 5)
# * iterate over [fig_mngr.num for fig_mngr in
# matplotlib._pylab_helpers.Gcf.get_all_fig_managers()]
fig_managers = matplotlib._pylab_helpers.Gcf.get_all_fig_managers()
for fig_mngr in fig_managers:
# Set the fig_num figure as the current figure as we can't
# save a figure that's not the current figure.
fig = plt.figure(fig_mngr.num)
kwargs = {}
to_rgba = matplotlib.colors.colorConverter.to_rgba
for attr in ['facecolor', 'edgecolor']:
fig_attr = getattr(fig, 'get_' + attr)()
default_attr = matplotlib.rcParams['figure.' + attr]
if to_rgba(fig_attr) != to_rgba(default_attr):
kwargs[attr] = fig_attr
fig.savefig(image_path % fig_mngr.num, **kwargs)
figure_list.append(image_fname % fig_mngr.num)
except:
print(80 * '_')
print('%s is not compiling:' % fname)
traceback.print_exc()
print(80 * '_')
finally:
os.chdir(cwd)
sys.stdout = orig_stdout
print(" - time elapsed : %.2g sec" % time_elapsed)
else:
figure_list = [f[len(image_dir):]
for f in glob.glob(image_path.replace("%03d",
'[0-9][0-9][0-9]'))]
figure_list.sort()
# generate thumb file
this_template = plot_rst_template
car_thumb_path = os.path.join(os.path.split(root_dir)[0], '_build/html/stable/_images/')
# Note: normaly, make_thumbnail is used to write to the path contained in `thumb_file`
# which is within `auto_examples/../images/thumbs` depending on the example.
# Because the carousel has different dimensions than those of the examples gallery,
# I did not simply reuse them all as some contained whitespace due to their default gallery
# thumbnail size. Below, for a few cases, seperate thumbnails are created (the originals can't
# just be overwritten with the carousel dimensions as it messes up the examples gallery layout).
# The special carousel thumbnails are written directly to _build/html/stable/_images/,
# as for some reason unknown to me, Sphinx refuses to copy my 'extra' thumbnails from the
# auto examples gallery to the _build folder. This works fine as is, but it would be cleaner to
# have it happen with the rest. Ideally the should be written to 'thumb_file' as well, and then
# copied to the _images folder during the `Copying Downloadable Files` step like the rest.
if not os.path.exists(car_thumb_path):
os.makedirs(car_thumb_path)
if os.path.exists(first_image_file):
# We generate extra special thumbnails for the carousel
carousel_tfile = os.path.join(car_thumb_path, base_image_name + '_carousel.png')
first_img = image_fname % 1
if first_img in carousel_thumbs:
make_thumbnail((image_path % carousel_thumbs[first_img][0]),
carousel_tfile, carousel_thumbs[first_img][1], 190)
make_thumbnail(first_image_file, thumb_file, 400, 280)
if not os.path.exists(thumb_file):
# create something to replace the thumbnail
make_thumbnail('images/no_image.png', thumb_file, 200, 140)
docstring, short_desc, end_row = extract_docstring(example_file)
# Depending on whether we have one or more figures, we're using a
# horizontal list or a single rst call to 'image'.
if len(figure_list) == 1:
figure_name = figure_list[0]
image_list = SINGLE_IMAGE % figure_name.lstrip('/')
else:
image_list = HLIST_HEADER
for figure_name in figure_list:
image_list += HLIST_IMAGE_TEMPLATE % figure_name.lstrip('/')
time_m, time_s = divmod(time_elapsed, 60)
f = open(os.path.join(target_dir, base_image_name + '.rst'), 'w')
f.write(this_template % locals())
f.flush()
# save variables so we can later add links to the documentation
if six.PY2:
example_code_obj = identify_names(open(example_file).read())
else:
example_code_obj = \
identify_names(open(example_file, encoding='utf-8').read())
if example_code_obj:
codeobj_fname = example_file[:-3] + '_codeobj.pickle'
with open(codeobj_fname, 'wb') as fid:
pickle.dump(example_code_obj, fid, pickle.HIGHEST_PROTOCOL)
backrefs = set('{module_short}.{name}'.format(**entry)
for entry in example_code_obj.values()
if entry['module'].startswith('sklearn'))
return backrefs
def embed_code_links(app, exception):
"""Embed hyperlinks to documentation into example code"""
if exception is not None:
return
print('Embedding documentation hyperlinks in examples..')
if app.builder.name == 'latex':
# Don't embed hyperlinks when a latex builder is used.
return
# Add resolvers for the packages for which we want to show links
doc_resolvers = {}
doc_resolvers['sklearn'] = SphinxDocLinkResolver(app.builder.outdir,
relative=True)
resolver_urls = {
'matplotlib': 'http://matplotlib.org',
'numpy': 'http://docs.scipy.org/doc/numpy-1.6.0',
'scipy': 'http://docs.scipy.org/doc/scipy-0.11.0/reference',
}
for this_module, url in resolver_urls.items():
try:
doc_resolvers[this_module] = SphinxDocLinkResolver(url)
except HTTPError as e:
print("The following HTTP Error has occurred:\n")
print(e.code)
except URLError as e:
print("\n...\n"
"Warning: Embedding the documentation hyperlinks requires "
"internet access.\nPlease check your network connection.\n"
"Unable to continue embedding `{0}` links due to a URL "
"Error:\n".format(this_module))
print(e.args)
example_dir = os.path.join(app.builder.srcdir, 'auto_examples')
html_example_dir = os.path.abspath(os.path.join(app.builder.outdir,
'auto_examples'))
# patterns for replacement
link_pattern = '<a href="%s">%s</a>'
orig_pattern = '<span class="n">%s</span>'
period = '<span class="o">.</span>'
for dirpath, _, filenames in os.walk(html_example_dir):
for fname in filenames:
print('\tprocessing: %s' % fname)
full_fname = os.path.join(html_example_dir, dirpath, fname)
subpath = dirpath[len(html_example_dir) + 1:]
pickle_fname = os.path.join(example_dir, subpath,
fname[:-5] + '_codeobj.pickle')
if os.path.exists(pickle_fname):
# we have a pickle file with the objects to embed links for
with open(pickle_fname, 'rb') as fid:
example_code_obj = pickle.load(fid)
fid.close()
str_repl = {}
# generate replacement strings with the links
for name, cobj in example_code_obj.items():
this_module = cobj['module'].split('.')[0]
if this_module not in doc_resolvers:
continue
try:
link = doc_resolvers[this_module].resolve(cobj,
full_fname)
except (HTTPError, URLError) as e:
print("The following error has occurred:\n")
print(repr(e))
continue
if link is not None:
parts = name.split('.')
name_html = period.join(orig_pattern % part
for part in parts)
str_repl[name_html] = link_pattern % (link, name_html)
# do the replacement in the html file
# ensure greediness
names = sorted(str_repl, key=len, reverse=True)
expr = re.compile(r'(?<!\.)\b' + # don't follow . or word
'|'.join(re.escape(name)
for name in names))
def substitute_link(match):
return str_repl[match.group()]
if len(str_repl) > 0:
with open(full_fname, 'rb') as fid:
lines_in = fid.readlines()
with open(full_fname, 'wb') as fid:
for line in lines_in:
line = line.decode('utf-8')
line = expr.sub(substitute_link, line)
fid.write(line.encode('utf-8'))
print('[done]')
def setup(app):
app.connect('builder-inited', generate_example_rst)
app.add_config_value('plot_gallery', True, 'html')
# embed links after build is finished
app.connect('build-finished', embed_code_links)
# Sphinx hack: sphinx copies generated images to the build directory
# each time the docs are made. If the desired image name already
# exists, it appends a digit to prevent overwrites. The problem is,
# the directory is never cleared. This means that each time you build
# the docs, the number of images in the directory grows.
#
# This question has been asked on the sphinx development list, but there
# was no response: http://osdir.com/ml/sphinx-dev/2011-02/msg00123.html
#
# The following is a hack that prevents this behavior by clearing the
# image build directory each time the docs are built. If sphinx
# changes their layout between versions, this will not work (though
# it should probably not cause a crash). Tested successfully
# on Sphinx 1.0.7
build_image_dir = '_build/html/_images'
if os.path.exists(build_image_dir):
filelist = os.listdir(build_image_dir)
for filename in filelist:
if filename.endswith('png'):
os.remove(os.path.join(build_image_dir, filename))
def setup_module():
# HACK: Stop nosetests running setup() above
pass
| bsd-3-clause |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.