repo_name stringlengths 6 67 | path stringlengths 5 185 | copies stringlengths 1 3 | size stringlengths 4 6 | content stringlengths 1.02k 962k | license stringclasses 15 values |
|---|---|---|---|---|---|
nkmk/python-snippets | notebook/pandas_ohlc_candlestick_chart.py | 1 | 5049 | import pandas as pd
import matplotlib.pyplot as plt
import matplotlib.dates as mdates
import mpl_finance
df_org = pd.read_csv('data/src/aapl_2015_2019.csv', index_col=0, parse_dates=True)['2017']
print(df_org)
# open high low close volume
# 2017-01-03 115.80 116.3300 114.760 116.15 28781865
# 2017-01-04 115.85 116.5100 115.750 116.02 21118116
# 2017-01-05 115.92 116.8642 115.810 116.61 22193587
# 2017-01-06 116.78 118.1600 116.470 117.91 31751900
# 2017-01-09 117.95 119.4300 117.940 118.99 33561948
# ... ... ... ... ... ...
# 2017-12-22 174.68 175.4240 174.500 175.01 16052615
# 2017-12-26 170.80 171.4700 169.679 170.57 32968167
# 2017-12-27 170.10 170.7800 169.710 170.60 21672062
# 2017-12-28 171.00 171.8500 170.480 171.08 15997739
# 2017-12-29 170.52 170.5900 169.220 169.23 25643711
#
# [251 rows x 5 columns]
df = df_org.copy()
df.index = mdates.date2num(df.index)
data = df.reset_index().values
print(type(data))
# <class 'numpy.ndarray'>
print(data.shape)
# (251, 6)
print(data)
# [[7.3633200e+05 1.1580000e+02 1.1633000e+02 1.1476000e+02 1.1615000e+02
# 2.8781865e+07]
# [7.3633300e+05 1.1585000e+02 1.1651000e+02 1.1575000e+02 1.1602000e+02
# 2.1118116e+07]
# [7.3633400e+05 1.1592000e+02 1.1686420e+02 1.1581000e+02 1.1661000e+02
# 2.2193587e+07]
# ...
# [7.3669000e+05 1.7010000e+02 1.7078000e+02 1.6971000e+02 1.7060000e+02
# 2.1672062e+07]
# [7.3669100e+05 1.7100000e+02 1.7185000e+02 1.7048000e+02 1.7108000e+02
# 1.5997739e+07]
# [7.3669200e+05 1.7052000e+02 1.7059000e+02 1.6922000e+02 1.6923000e+02
# 2.5643711e+07]]
fig = plt.figure(figsize=(12, 4))
ax = fig.add_subplot(1, 1, 1)
mpl_finance.candlestick_ohlc(ax, data, width=2, alpha=0.5, colorup='r', colordown='b')
ax.grid()
locator = mdates.AutoDateLocator()
ax.xaxis.set_major_locator(locator)
ax.xaxis.set_major_formatter(mdates.AutoDateFormatter(locator))
plt.savefig('data/dst/candlestick_day.png')
plt.close()
# 
fig = plt.figure(figsize=(12, 4))
ax = fig.add_subplot(1, 1, 1)
mpl_finance.candlestick_ohlc(ax, data, width=2, alpha=0.5, colorup='r', colordown='b')
ax.grid()
ax.xaxis.set_major_locator(mdates.MonthLocator())
ax.xaxis.set_major_formatter(mdates.DateFormatter('%Y/%m'))
plt.savefig('data/dst/candlestick_day_format.png')
plt.close()
# 
d_ohlcv = {'open': 'first',
'high': 'max',
'low': 'min',
'close': 'last',
'volume': 'sum'}
df_w = df_org.resample('W-MON', closed='left', label='left').agg(d_ohlcv)
df_w.index = mdates.date2num(df_w.index)
data_w = df_w.reset_index().values
fig = plt.figure(figsize=(12, 4))
ax = fig.add_subplot(1, 1, 1)
mpl_finance.candlestick_ohlc(ax, data_w, width=4, alpha=0.75, colorup='r', colordown='b')
ax.grid()
locator = mdates.AutoDateLocator()
ax.xaxis.set_major_locator(locator)
ax.xaxis.set_major_formatter(mdates.AutoDateFormatter(locator))
plt.savefig('data/dst/candlestick_week.png')
plt.close()
# 
fig = plt.figure(figsize=(12, 4))
ax = fig.add_subplot(1, 1, 1)
mpl_finance.candlestick_ohlc(ax, data_w, width=4, alpha=0.75, colorup='r', colordown='b')
ax.plot(df_w.index, df_w['close'].rolling(4).mean())
ax.plot(df_w.index, df_w['close'].rolling(13).mean())
ax.plot(df_w.index, df_w['close'].rolling(26).mean())
ax.grid()
locator = mdates.AutoDateLocator()
ax.xaxis.set_major_locator(locator)
ax.xaxis.set_major_formatter(mdates.AutoDateFormatter(locator))
plt.savefig('data/dst/candlestick_week_sma.png')
plt.close()
# 
fig, axes = plt.subplots(nrows=2, ncols=1, figsize=(12, 4), sharex=True,
gridspec_kw={'height_ratios': [3, 1]})
mpl_finance.candlestick_ohlc(axes[0], data_w, width=4, alpha=0.75, colorup='r', colordown='b')
axes[1].bar(df_w.index, df_w['volume'], width=4, color='navy')
axes[0].grid()
axes[1].grid()
locator = mdates.AutoDateLocator()
axes[0].xaxis.set_major_locator(locator)
axes[0].xaxis.set_major_formatter(mdates.AutoDateFormatter(locator))
plt.savefig('data/dst/candlestick_week_v.png')
plt.close()
# 
fig, axes = plt.subplots(nrows=2, ncols=1, figsize=(12, 4), sharex=True,
gridspec_kw={'height_ratios': [3, 1]})
mpl_finance.candlestick_ohlc(axes[0], data_w, width=4, alpha=0.75, colorup='r', colordown='b')
axes[0].plot(df_w.index, df_w['close'].rolling(4).mean())
axes[0].plot(df_w.index, df_w['close'].rolling(13).mean())
axes[0].plot(df_w.index, df_w['close'].rolling(26).mean())
axes[1].bar(df_w.index, df_w['volume'], width=4, color='navy')
axes[0].grid()
axes[1].grid()
locator = mdates.AutoDateLocator()
axes[0].xaxis.set_major_locator(locator)
axes[0].xaxis.set_major_formatter(mdates.AutoDateFormatter(locator))
plt.savefig('data/dst/candlestick_week_sma_v.png')
plt.close()
# 
| mit |
rsivapr/scikit-learn | sklearn/tree/tests/test_export.py | 3 | 2897 | """
Testing for export functions of decision trees (sklearn.tree.export).
"""
from numpy.testing import assert_equal
from nose.tools import assert_raises
from sklearn.tree import DecisionTreeClassifier
from sklearn.tree import export_graphviz
from sklearn.externals.six import StringIO
# toy sample
X = [[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]]
y = [-1, -1, -1, 1, 1, 1]
T = [[-1, -1], [2, 2], [3, 2]]
true_result = [-1, 1, 1]
def test_graphviz_toy():
"""Check correctness of export_graphviz"""
clf = DecisionTreeClassifier(max_depth=3,
min_samples_split=1,
criterion="gini",
random_state=1)
clf.fit(X, y)
# Test export code
out = StringIO()
export_graphviz(clf, out_file=out)
contents1 = out.getvalue()
contents2 = "digraph Tree {\n" \
"0 [label=\"X[0] <= 0.0000\\ngini = 0.5\\n" \
"samples = 6\", shape=\"box\"] ;\n" \
"1 [label=\"gini = 0.0000\\nsamples = 3\\n" \
"value = [ 3. 0.]\", shape=\"box\"] ;\n" \
"0 -> 1 ;\n" \
"2 [label=\"gini = 0.0000\\nsamples = 3\\n" \
"value = [ 0. 3.]\", shape=\"box\"] ;\n" \
"0 -> 2 ;\n" \
"}"
assert_equal(contents1, contents2)
# Test with feature_names
out = StringIO()
export_graphviz(clf, out_file=out, feature_names=["feature0", "feature1"])
contents1 = out.getvalue()
contents2 = "digraph Tree {\n" \
"0 [label=\"feature0 <= 0.0000\\ngini = 0.5\\n" \
"samples = 6\", shape=\"box\"] ;\n" \
"1 [label=\"gini = 0.0000\\nsamples = 3\\n" \
"value = [ 3. 0.]\", shape=\"box\"] ;\n" \
"0 -> 1 ;\n" \
"2 [label=\"gini = 0.0000\\nsamples = 3\\n" \
"value = [ 0. 3.]\", shape=\"box\"] ;\n" \
"0 -> 2 ;\n" \
"}"
assert_equal(contents1, contents2)
# Test max_depth
out = StringIO()
export_graphviz(clf, out_file=out, max_depth=0)
contents1 = out.getvalue()
contents2 = "digraph Tree {\n" \
"0 [label=\"X[0] <= 0.0000\\ngini = 0.5\\n" \
"samples = 6\", shape=\"box\"] ;\n" \
"1 [label=\"(...)\", shape=\"box\"] ;\n" \
"0 -> 1 ;\n" \
"2 [label=\"(...)\", shape=\"box\"] ;\n" \
"0 -> 2 ;\n" \
"}"
assert_equal(contents1, contents2)
def test_graphviz_errors():
"""Check for errors of export_graphviz"""
clf = DecisionTreeClassifier(max_depth=3, min_samples_split=1)
clf.fit(X, y)
out = StringIO()
assert_raises(IndexError, export_graphviz, clf, out, feature_names=[])
if __name__ == "__main__":
import nose
nose.runmodule()
| bsd-3-clause |
eickenberg/scikit-learn | sklearn/feature_selection/tests/test_from_model.py | 244 | 1593 | import numpy as np
import scipy.sparse as sp
from nose.tools import assert_raises, assert_true
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import assert_greater
from sklearn.datasets import load_iris
from sklearn.linear_model import LogisticRegression
from sklearn.linear_model import SGDClassifier
from sklearn.svm import LinearSVC
iris = load_iris()
def test_transform_linear_model():
for clf in (LogisticRegression(C=0.1),
LinearSVC(C=0.01, dual=False),
SGDClassifier(alpha=0.001, n_iter=50, shuffle=True,
random_state=0)):
for thresh in (None, ".09*mean", "1e-5 * median"):
for func in (np.array, sp.csr_matrix):
X = func(iris.data)
clf.set_params(penalty="l1")
clf.fit(X, iris.target)
X_new = clf.transform(X, thresh)
if isinstance(clf, SGDClassifier):
assert_true(X_new.shape[1] <= X.shape[1])
else:
assert_less(X_new.shape[1], X.shape[1])
clf.set_params(penalty="l2")
clf.fit(X_new, iris.target)
pred = clf.predict(X_new)
assert_greater(np.mean(pred == iris.target), 0.7)
def test_invalid_input():
clf = SGDClassifier(alpha=0.1, n_iter=10, shuffle=True, random_state=None)
clf.fit(iris.data, iris.target)
assert_raises(ValueError, clf.transform, iris.data, "gobbledigook")
assert_raises(ValueError, clf.transform, iris.data, ".5 * gobbledigook")
| bsd-3-clause |
posborne/Anvil | anvil/examples/commit_histogram.py | 2 | 1787 | # Copyright (c) 2012 Paul Osborne <osbpau@gmail.com>
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"), to
# deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
from anvil import Anvil
from anvil.examples.helpers import find_changesets_for_authors
from matplotlib import pyplot as pp
import datetime
import times
START_DT = datetime.datetime(2000, 1, 1)
def main():
anvil = Anvil("spectrum")
anvil.create_session_by_prompting()
print "collecting related changesets"
changesets = find_changesets_for_authors(
anvil, ['Paul Osborne', ], datetime.datetime(2009, 1, 1)).values()[0]
commit_hour = [times.to_local(c.date_time, 'US/Central').hour
for c in changesets]
pp.hist(commit_hour, 24)
a = pp.gca()
a.set_xlim([0, 23])
pp.show()
if __name__ == '__main__':
main()
| mit |
natasasdj/OpenWPM | analysis_parallel/08_third-party_images.py | 1 | 2769 | import sqlite3
import os
import pandas as pd
# on how many pages appear third-party/one-pixel/zero-size images
# on how many homesites appear third party/one-pixel/zero-size images
# on how many first links appear third-party/one-pixel/zero-size images
# on how many domains appear third party/one-pixel/zero-size images
################ df3 - third party images
main_dir = '/root/OpenWPM/analysis_parallel/'
res_dir = os.path.join(main_dir,'results')
db = os.path.join(res_dir,'images3.sqlite')
conn3 = sqlite3.connect(db)
query = 'SELECT * FROM Images3'
df3 = pd.read_sql_query(query,conn3)
conn3.close()
#find how many unique site_id, link_id are there
#find how many unique site_id where link_id=0
#find how many unique site_id, link_id where link_id!=0
#find how many unique site_ids are
pages = df3[['site_id','link_id']].drop_duplicates()
no_pages = pages.shape[0]
total_pages = 4347837 # 00_statistics.py
float(no_pages)/total_pages
#0.8963063702710106
homesites = pages[pages['link_id']==0]
no_homesites = homesites.shape[0]
total_homesites = 34716
float(no_homesites)/total_homesites
#0.8837135614702155
firstLinks = pages[pages['link_id']!=0]
no_firstLinks = firstLinks.shape[0]
total_firstLinks = 4313319
float(no_firstLinks)/total_firstLinks
# 0.896366579888944
domains = pages['site_id'].drop_duplicates()
no_domains = domains.shape[0]
float(no_domains)/total_homesites
# 0.9049717709413527
##### ##### ###### ##### one-pixel images
pages = df3[df3['pixels']==1][['site_id','link_id']].drop_duplicates()
no_pages = pages.shape[0]
total_pages = 4347837 # 00_statistics.py
float(no_pages)/total_pages
# 0.8333189583694145
homesites = pages[pages['link_id']==0]
no_homesites = homesites.shape[0]
total_homesites = 34716
float(no_homesites)/total_homesites
# 0.8169720013826478
firstLinks = pages[pages['link_id']!=0]
no_firstLinks = firstLinks.shape[0]
total_firstLinks = 4313319
float(no_firstLinks)/total_firstLinks
# 0.8334122748630463
domains = pages['site_id'].drop_duplicates()
no_domains = domains.shape[0]
float(no_domains)/total_homesites
# 0.8664304643392096
##### ##### ##### ##### zero-size images
pages = df3[df3['size']==0][['site_id','link_id']].drop_duplicates()
no_pages = pages.shape[0]
total_pages = 4347837 # 00_statistics.py
float(no_pages)/total_pages
# 0.32237892082890873
homesites = pages[pages['link_id']==0]
no_homesites = homesites.shape[0]
total_homesites = 34716
float(no_homesites)/total_homesites
# 0.2408975688443369
firstLinks = pages[pages['link_id']!=0]
no_firstLinks = firstLinks.shape[0]
total_firstLinks = 4313319
float(no_firstLinks)/total_firstLinks
# 0.32301992966437215
domains = pages['site_id'].drop_duplicates()
no_domains = domains.shape[0]
float(no_domains)/total_homesites
# 0.6728309713100588
| gpl-3.0 |
tcrossland/time_series_prediction | ann/scenario.py | 1 | 2422 | import time
import matplotlib.pyplot as plt
import numpy as np
class Config:
def __init__(self, time_series, look_back=6, batch_size=1, topology=None, validation_split=0.3,
include_index=False, activation='tanh', optimizer='adam'):
self.time_series = time_series
self.look_back = look_back
self.batch_size = batch_size
if topology is None:
topology = [5]
self.topology = topology
self.validation_split = validation_split
self.activation = activation
self.include_index = include_index
self.optimizer = optimizer
def __str__(self):
return "w{}-b{}".format(self.look_back, self.batch_size)
class Scenario:
def __init__(self, model, config):
self.model = model
self.time_series = config.time_series
self.dataset = config.time_series.dataset
self.epochs = 0
self.config = config
def execute(self, epochs):
print()
print()
self.epochs = self.epochs + epochs
print(">>>> {} + {} (epochs={}, topology={})".format(self.model, self.time_series, self.epochs,
self.config.topology))
self.model.summary()
start = time.clock()
self.model.train(epochs=epochs, batch_size=self.config.batch_size)
self.training_time = time.clock() - start
print("Training time: %.3f" % self.training_time)
prediction = self.model.evaluate()
# self.plot(predictions)
return prediction
def create_empty_plot(self):
plot = np.empty_like(self.dataset)
plot[:, :] = np.nan
return plot
def create_left_plot(self, data):
offset = self.config.look_back
plot = self.create_empty_plot()
plot[offset:len(data) + offset, :] = data
return plot
def create_right_plot(self, data):
plot = self.create_empty_plot()
plot[-len(data):, :] = data
return plot
def plot(self, predictions):
plt.figure(figsize=(16, 12))
plt.xlim(self.dataset.size * 0.6, self.dataset.size * 0.8)
plt.plot(self.dataset)
for pred in predictions:
plt.plot(self.create_right_plot(pred))
filename = "out/{}/{}-{}.png".format(self.time_series, self.model, self.config)
plt.savefig(filename)
plt.close()
| mit |
manjunaths/tensorflow | tensorflow/contrib/learn/python/learn/learn_io/pandas_io.py | 18 | 6444 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Methods to allow pandas.DataFrame."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.contrib.learn.python.learn.dataframe.queues import feeding_functions
try:
# pylint: disable=g-import-not-at-top
import pandas as pd
HAS_PANDAS = True
except ImportError:
HAS_PANDAS = False
PANDAS_DTYPES = {
'int8': 'int',
'int16': 'int',
'int32': 'int',
'int64': 'int',
'uint8': 'int',
'uint16': 'int',
'uint32': 'int',
'uint64': 'int',
'float16': 'float',
'float32': 'float',
'float64': 'float',
'bool': 'i'
}
def extract_pandas_data(data):
"""Extract data from pandas.DataFrame for predictors.
Given a DataFrame, will extract the values and cast them to float. The
DataFrame is expected to contain values of type int, float or bool.
Args:
data: `pandas.DataFrame` containing the data to be extracted.
Returns:
A numpy `ndarray` of the DataFrame's values as floats.
Raises:
ValueError: if data contains types other than int, float or bool.
"""
if not isinstance(data, pd.DataFrame):
return data
bad_data = [column for column in data
if data[column].dtype.name not in PANDAS_DTYPES]
if not bad_data:
return data.values.astype('float')
else:
error_report = [("'" + str(column) + "' type='" +
data[column].dtype.name + "'") for column in bad_data]
raise ValueError('Data types for extracting pandas data must be int, '
'float, or bool. Found: ' + ', '.join(error_report))
def extract_pandas_matrix(data):
"""Extracts numpy matrix from pandas DataFrame.
Args:
data: `pandas.DataFrame` containing the data to be extracted.
Returns:
A numpy `ndarray` of the DataFrame's values.
"""
if not isinstance(data, pd.DataFrame):
return data
return data.as_matrix()
def extract_pandas_labels(labels):
"""Extract data from pandas.DataFrame for labels.
Args:
labels: `pandas.DataFrame` or `pandas.Series` containing one column of
labels to be extracted.
Returns:
A numpy `ndarray` of labels from the DataFrame.
Raises:
ValueError: if more than one column is found or type is not int, float or
bool.
"""
if isinstance(labels,
pd.DataFrame): # pandas.Series also belongs to DataFrame
if len(labels.columns) > 1:
raise ValueError('Only one column for labels is allowed.')
bad_data = [column for column in labels
if labels[column].dtype.name not in PANDAS_DTYPES]
if not bad_data:
return labels.values
else:
error_report = ["'" + str(column) + "' type="
+ str(labels[column].dtype.name) for column in bad_data]
raise ValueError('Data types for extracting labels must be int, '
'float, or bool. Found: ' + ', '.join(error_report))
else:
return labels
def pandas_input_fn(x,
y=None,
batch_size=128,
num_epochs=1,
shuffle=True,
queue_capacity=1000,
num_threads=1,
target_column='target'):
"""Returns input function that would feed Pandas DataFrame into the model.
Note: `y`'s index must match `x`'s index.
Args:
x: pandas `DataFrame` object.
y: pandas `Series` object.
batch_size: int, size of batches to return.
num_epochs: int, number of epochs to iterate over data. If not `None`,
read attempts that would exceed this value will raise `OutOfRangeError`.
shuffle: bool, whether to read the records in random order.
queue_capacity: int, size of the read queue. If `None`, it will be set
roughly to the size of `x`.
num_threads: int, number of threads used for reading and enqueueing.
target_column: str, name to give the target column `y`.
Returns:
Function, that has signature of ()->(dict of `features`, `target`)
Raises:
ValueError: if `x` already contains a column with the same name as `y`, or
if the indexes of `x` and `y` don't match.
"""
x = x.copy()
if y is not None:
if target_column in x:
raise ValueError(
'Cannot use name %s for target column: DataFrame already has a '
'column with that name: %s' % (target_column, x.columns))
if not np.array_equal(x.index, y.index):
raise ValueError('Index for x and y are mismatched.\nIndex for x: %s\n'
'Index for y: %s\n' % (x.index, y.index))
x[target_column] = y
# TODO(mdan): These are memory copies. We probably don't need 4x slack space.
# The sizes below are consistent with what I've seen elsewhere.
if queue_capacity is None:
if shuffle:
queue_capacity = 4 * len(x)
else:
queue_capacity = len(x)
min_after_dequeue = max(queue_capacity / 4, 1)
def input_fn():
"""Pandas input function."""
queue = feeding_functions.enqueue_data(
x,
queue_capacity,
shuffle=shuffle,
min_after_dequeue=min_after_dequeue,
num_threads=num_threads,
enqueue_size=batch_size,
num_epochs=num_epochs)
if num_epochs is None:
features = queue.dequeue_many(batch_size)
else:
features = queue.dequeue_up_to(batch_size)
assert len(features) == len(x.columns) + 1, ('Features should have one '
'extra element for the index.')
features = features[1:]
features = dict(zip(list(x.columns), features))
if y is not None:
target = features.pop(target_column)
return features, target
return features
return input_fn
| apache-2.0 |
Sibada/VIC_Hime | Hime/calibrater.py | 1 | 13255 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import copy
from collections import OrderedDict
from datetime import datetime
import numpy as np
import pandas as pd
from Hime import log
from Hime.uh_creater import load_rout_data
from Hime.routing import confluence, gather_to_month, gather_to_year
from Hime.statistic import nmse, bias
from Hime.utils import set_nc_value
from Hime.vic_execer import vic_exec
########################################################################################################################
# calibrate configures should look like this.
# calib_configs = {
# "driver_path": "file_path",
# "global_file": "file_path",
# "params_file": "file_path",
# "mpi": False,
# "ncores": 4,
#
# "start_date": [1960, 1, 1],
# "end_date": [1970, 1, 1],
# "calib_start_date": [1961, 1, 1],
#
# "rout_data_file": "file_path",
# "domain_file": "file_path",
# "vic_out_file": "file_path",
# "time_scale": "D",
# "obs_data_file": "file_path",
# "obs_start_date": [1960, 1, 1],
#
# "calib_range" = NONE / "file_path"
# "BPC": 0.5,
# "only_bias": False,
# "turns": 2,
# "max_itr": 20,
# "toler": 0.005,
#
# "rout_data": None,
# "obs_data": None,
# "p_init": [[0.05, 0.25, 0.45],[...],[...]...]
# }
########################################################################################################################
########################################################################################################################
#
# Run vic and routing and return a accuracy index of the simulation.
#
########################################################################################################################
def vic_try(calib_configs):
time_scale = calib_configs["time_scale"]
global_file = calib_configs["global_file"]
domain_file = calib_configs["domain_file"]
vic_out_file = calib_configs["vic_out_file"]
ymd = calib_configs["obs_start_date"]
obs_start_date = datetime(ymd[0], ymd[1], ymd[2])
ymd = calib_configs["start_date"]
start_date = datetime(ymd[0], ymd[1], ymd[2])
ymd = calib_configs["calib_start_date"]
calib_start_date = datetime(ymd[0], ymd[1], ymd[2])
ymd = calib_configs["end_date"]
end_date = datetime(ymd[0], ymd[1], ymd[2])
driver = calib_configs["driver_path"]
mpi = calib_configs["mpi"]
ncores = calib_configs["ncores"]
BPC = calib_configs["BPC"]
only_bias = calib_configs["only_bias"]
rout_data = calib_configs["rout_data"]
obs_data = calib_configs["obs_data"]
# Load observation runoff data
obs = obs_data
if len(obs.shape) > 1:
obs = obs[:, -1]
ts_obs = pd.date_range(obs_start_date, periods=len(obs), freq=time_scale)
obs = pd.Series(obs, ts_obs)
obs = obs[calib_start_date: end_date]
# Run VIC
status_code, logs, logs_err = vic_exec(driver, global_file, mpi=mpi, n_cores=ncores)
if status_code != 0:
log.info(logs_err)
raise ValueError("VIC run fail. Return %d" % status_code)
# Confluence.
sim = confluence(vic_out_file, rout_data, domain_file, start_date, end_date)
sim = sim[calib_start_date: end_date]
if time_scale == "M":
sim = gather_to_month(sim)
if time_scale == "A":
sim = gather_to_year(sim)
# Calculate statistic indexes.
NMSE = nmse(obs, sim)
BIAS = bias(obs, sim)
if only_bias:
e = BIAS
else:
e = np.abs(BIAS) * BPC + NMSE
return e, NMSE, BIAS
def set_params(params_file, calib_range, var_id, value):
if var_id == 0:
set_nc_value(params_file, "infilt", value, mask=calib_range)
if var_id == 1:
set_nc_value(params_file, "Ds", value, mask=calib_range)
if var_id == 2:
set_nc_value(params_file, "Dsmax", value, mask=calib_range)
if var_id == 3:
set_nc_value(params_file, "Ws", value, mask=calib_range)
if var_id == 4:
set_nc_value(params_file, "depth", value, mask=calib_range, dim=1)
if var_id == 5:
set_nc_value(params_file, "depth", value, mask=calib_range, dim=2)
def vic_try_with_param(calib_configs, var_id, value):
params_file = calib_configs["params_file"]
calib_range = calib_configs["calib_range"]
set_params(params_file, calib_range, var_id, value)
e, NMSE, BIAS = vic_try(calib_configs)
log.debug("VIC runs result value: %.3f E: %.3f NMSE: %.3f BIAS: %.3f" % (value, e, NMSE, BIAS))
return [e, NMSE, BIAS]
########################################################################################################################
#
# Auto-calibrate VIC model
#
########################################################################################################################
def calibrate(proj, calib_configs):
params_file = calib_configs["params_file"]
domain_file = calib_configs["domain_file"]
start_date = calib_configs["start_date"]
end_date = calib_configs["end_date"]
p_rngs = np.array(calib_configs["p_init"])
calib_configs["obs_data"] = np.loadtxt(calib_configs["obs_data_file"])
calib_configs["rout_data"] = load_rout_data(calib_configs["rout_data_file"])
calib_range = calib_configs["rout_data"]["basin"]\
if calib_configs.get("calib_range_file") is None \
else np.array(pd.read_table(calib_configs.get("calib_range_file"), header=None, sep="[\s,]"), dtype=int) - 1
# Fucking np not support regex for delimiter and force me to use such a ugly method.
calib_configs["calib_range"] = calib_range
turns = calib_configs["turns"]
max_itr = calib_configs["max_itr"]
toler = calib_configs["toler"]
BPC = calib_configs["BPC"]
# Set run area.
run_range = calib_configs["rout_data"]["basin"]
set_nc_value(domain_file, "mask", 0)
set_nc_value(domain_file, "mask", 1, mask=run_range)
###########################################################################
# Create a single global file specially for calibration.
###########################################################################
proj_calib = copy.deepcopy(proj)
proj_path = proj_calib.proj_params["proj_path"]
if proj_path[-1] != "/":
proj_path += "/"
proj_calib.set_start_time(start_date)
proj_calib.set_end_time(end_date)
proj_calib.global_params["out_path"] = proj_path
out_file_calib = OrderedDict({
"out_file": "for_calibrate",
"out_format": "NETCDF4",
"compress": "FALSE",
"aggfreq": "NDAYS 1",
"out_var": ["OUT_RUNOFF",
"OUT_BASEFLOW"]
})
proj_calib.global_params["out_file"] = [out_file_calib]
proj_calib.global_params["param_file"] = calib_configs["params_file"]
proj_calib.global_params["domain_file"] = calib_configs["domain_file"]
global_file = proj_path + "global_calibrate.txt"
vic_out_file = "%sfor_calibrate.%04d-%02d-%02d.nc" % (proj_path,
start_date[0], start_date[1], start_date[2])
proj_calib.write_global_file(global_file)
calib_configs["global_file"] = global_file
calib_configs["vic_out_file"] = vic_out_file
###########################################################################
# Presets of auto-calibration.
###########################################################################
param_names = ["infilt", "Ds", "Dsmax", "Ws", "d2", "d3"]
lob = [0, 0, 0, 0, -1, -1] # Left open boundary, -1 means not boundary.
rob = [1, 1, -1, 1, -1, -1] # Right open boundary.
lcb = [-1, -1, -1, -1, 0.1, 0.1] # Left close boundary. Params can get this value.
rcb = [-1, -1, -1, -1, 10, 10] # Right close boundary.
step_r = None
rs = [None, None, None]
step_params = p_rngs[:, 1]
log.info("###### Automatical calibration start... ######")
log.info("\nTurns:\t%d\nmax itr:\t%d\ntoler:\t%.5f\nBPC:\t%.2f" % (turns, max_itr, toler, BPC))
# MDZZ Algorithm for calibration of VIC.
for t in range(turns):
turn = t + 1
log.info("Turns %d:" % turn)
p_seq = range(6)
for p in p_seq:
param_name = param_names[p]
log.info("Calibrate %s:" % param_name)
[set_params(params_file, calib_range, i, step_params[i]) for i in range(6)]
x = list(p_rngs[p, :])
rs[0] = vic_try_with_param(calib_configs, p, x[0])
rs[2] = vic_try_with_param(calib_configs, p, x[2])
if step_r is not None:
rs[1] = step_r
else:
rs[1] = vic_try_with_param(calib_configs, p, x[1])
od = order(rs) # Order of the results sorted by optimise level. The od[0]'s is the optimized.
es, NMSEs, BIASs = [r[0] for r in rs], [r[1] for r in rs], [r[2] for r in rs]
opt_E = es[od[0]]
opt_val = x[od[0]]
NMSE = NMSEs[od[0]]
BIAS = BIASs[od[0]]
step_params[p] = opt_val
step_r = rs[od[0]]
de = es[od[2]] - es[od[0]]
itr = 2 # Iteration times of single parameter.
while de >= toler:
if es[1] < es[0] and es[1] < es[2]:
x[0] = (x[0] + x[1])/2
x[2] = (x[1] + x[2])/2
rs[0] = vic_try_with_param(calib_configs, p, x[0])
rs[2] = vic_try_with_param(calib_configs, p, x[2])
elif es[0] < es[1] < es[2]:
if lcb[p] > -1 and x[0] == lcb[p]:
x[2], rs[2] = x[1], rs[1]
x[1] = (x[2] + x[0])/2
rs[1] = vic_try_with_param(calib_configs, p, x[1])
else:
x[2], x[1], x[0] = x[1], x[0], x[0]-(x[2]-x[0])/2
rs[2], rs[1] = rs[1], rs[0]
if lcb[p] > -1 and x[0] < lcb[p]:
x[0] = lcb[p]
elif lob[p] > -1 and x[0] <= lob[p]:
x[0] = x[1] - 0.618 * (x[1]-lob[p])
rs[0] = vic_try_with_param(calib_configs, p, x[0])
elif es[0] > es[1] > es[2]:
if rcb[p] > -1 and x[2] == rcb[p]:
x[0], rs[0] = x[1], rs[1]
x[1] = (x[2] + x[0])/2
rs[1] = vic_try_with_param(calib_configs, p, x[1])
else:
x[0], x[1], x[2] = x[1], x[2], x[2]+(x[2]-x[0])/2
rs[0], rs[1] = rs[1], rs[2]
if x[2] > rcb[p] > -1:
x[2] = lcb[p]
elif x[2] >= rob[p] > -1:
x[2] = x[1] + 0.618 * (rob[p]-x[1])
rs[2] = vic_try_with_param(calib_configs, p, x[2])
es, NMSEs, BIASs = [r[0] for r in rs], [r[1] for r in rs], [r[2] for r in rs]
od = order(rs)
opt_E = es[od[0]]
opt_val = x[od[0]]
NMSE = NMSEs[od[0]]
BIAS = BIASs[od[0]]
step_params[p] = opt_val
step_r = rs[od[0]]
de = es[od[2]] - es[od[0]]
log.info("Iteration %d: param value=%.3f, E=%.3f, NSCE=%.3f, BIAS=%.3f" %
(itr, opt_val, opt_E, 1-NMSE, BIAS))
log.debug("[%.3f, %.3f, %.3f, %.3f, %.3f, %.3f] => VIC => E:%.3f, NMSE:%.3f, BIAS:%.3f"
% (step_params[0], step_params[1], step_params[2], step_params[3],
step_params[4], step_params[5], opt_E, NMSE, BIAS))
itr += 1
if itr > max_itr:
break
log.info("Parameter %s calibrated. Optimized value: %.3f, E: %.3f, NSCE: %.3f, BIAS: %.3f" %
(param_name, opt_val, opt_E, 1-NMSE, BIAS))
# Reset range.
n_rngs = (p_rngs[:, 2] - p_rngs[:, 0]) / 2 * 0.618
p_rngs[:, 1] = step_params
p_rngs[:, 0], p_rngs[:, 2] = step_params - n_rngs, step_params + n_rngs
for i in range(6):
if lob[i] > 0 and p_rngs[i, 0] <= lob[i]:
p_rngs[i, 0] = step_params[i] - (step_params[i] - lob[i]) * 0.618
if not rob[i] <= 0 and p_rngs[i, 2] >= rob[i]:
p_rngs[i, 2] = step_params[i] + (rob[i] - step_params[i]) * 0.618
if lcb[i] > 0 and p_rngs[i, 0] < lcb[i]:
p_rngs[i, 0] = lcb[i]
if not rcb[i] <= 0 and p_rngs[i, 2] > rcb[i]:
p_rngs[i, 2] = rcb[i]
log.info("######### calibration completed. #########")
# Apply optimized parameters to file.
[set_params(params_file, calib_range, i, step_params[i]) for i in range(6)]
return step_params
########################################################################################################################
#
# Take the orders of an array like in R.
#
########################################################################################################################
def order(array):
return list(pd.DataFrame(array).sort_values(by=0).index)
| gpl-3.0 |
shadowleaves/deep_learning | theano/rnn_minibatch.py | 1 | 30441 | """ Vanilla RNN
Parallelizes scan over sequences by using mini-batches.
@author Graham Taylor
"""
import numpy as np
import theano
import theano.tensor as T
# from sklearn.base import BaseEstimator
import logging
import time
import os
import datetime
import cPickle as pickle
from collections import OrderedDict
logger = logging.getLogger(__name__)
import matplotlib.pyplot as plt
plt.ion()
mode = theano.Mode(linker='cvm')
#mode = 'DEBUG_MODE'
def xavier(shape):
assert len(shape) == 2
n_in, n_out = shape
a = np.sqrt(6.0 / (n_in + n_out))
return np.random.uniform(-a, a, (n_in, n_out))
def numpy_floatX(data):
return numpy.asarray(data, dtype=config.floatX)
# zeros = lambda shape: np.zeros(shape)
class RNN(object):
""" Recurrent neural network class
Supported output types:
real : linear output units, use mean-squared error
binary : binary output units, use cross-entropy error
softmax : single softmax out, use cross-entropy error
"""
def __init__(self, input, n_in, n_hidden, n_out, activation=T.tanh,
output_type='real'):
self.input = input
self.activation = activation
self.output_type = output_type
self.batch_size = T.iscalar()
# theta is a vector of all trainable parameters
# it represents the value of W, W_in, W_out, h0, bh, by
theta_shape = n_hidden ** 2 + n_in * n_hidden + n_hidden * n_out + \
n_hidden + n_hidden + n_out
self.theta = theano.shared(value=np.zeros(theta_shape,
dtype=theano.config.floatX))
# Parameters are reshaped views of theta
param_idx = 0 # pointer to somewhere along parameter vector
# recurrent weights as a shared variable
self.W = self.theta[param_idx:(param_idx + n_hidden ** 2)].reshape(
(n_hidden, n_hidden))
self.W.name = 'W'
W_init = xavier((n_hidden, n_hidden))
param_idx += n_hidden ** 2
# input to hidden layer weights
self.W_in = self.theta[param_idx:(param_idx + n_in *
n_hidden)].reshape((n_in, n_hidden))
self.W_in.name = 'W_in'
W_in_init = xavier((n_in, n_hidden))
param_idx += n_in * n_hidden
# hidden to output layer weights
self.W_out = self.theta[param_idx:(param_idx + n_hidden *
n_out)].reshape((n_hidden, n_out))
self.W_out.name = 'W_out'
W_out_init = xavier((n_hidden, n_out))
param_idx += n_hidden * n_out
self.h0 = self.theta[param_idx:(param_idx + n_hidden)]
self.h0.name = 'h0'
h0_init = np.zeros((n_hidden,), dtype=theano.config.floatX)
param_idx += n_hidden
self.bh = self.theta[param_idx:(param_idx + n_hidden)]
self.bh.name = 'bh'
bh_init = np.zeros((n_hidden,), dtype=theano.config.floatX)
param_idx += n_hidden
self.by = self.theta[param_idx:(param_idx + n_out)]
self.by.name = 'by'
by_init = np.zeros((n_out,), dtype=theano.config.floatX)
param_idx += n_out
assert(param_idx == theta_shape)
# for convenience
self.params = [self.W, self.W_in, self.W_out, self.h0, self.bh,
self.by]
# shortcut to norms (for monitoring)
self.l2_norms = {}
for param in self.params:
self.l2_norms[param] = T.sqrt(T.sum(param ** 2))
# initialize parameters
# DEBUG_MODE gives division by zero error when we leave parameters
# as zeros
self.theta.set_value(
np.concatenate([x.ravel() for x in
(
W_init, W_in_init, W_out_init, h0_init, bh_init, by_init)]))
self.theta_update = theano.shared(
value=np.zeros(theta_shape, dtype=theano.config.floatX))
# recurrent function (using tanh activation function) and arbitrary output
# activation function
def step(x_t, h_tm1):
h_t = self.activation(T.dot(x_t, self.W_in) +
T.dot(h_tm1, self.W) + self.bh)
y_t = T.dot(h_t, self.W_out) + self.by
return h_t, y_t
# the hidden state `h` for the entire sequence, and the output for the
# entire sequence `y` (first dimension is always time)
# Note the implementation of weight-sharing h0 across variable-size
# batches using T.ones multiplying h0
# Alternatively, T.alloc approach is more robust
[self.h, self.y_pred], _ = theano.scan(
step,
sequences=self.input,
outputs_info=[T.alloc(self.h0, self.input.shape[1],
n_hidden), None])
# outputs_info=[T.ones(shape=(self.input.shape[1],
# self.h0.shape[0])) * self.h0, None])
# L1 norm ; one regularization option is to enforce L1 norm to
# be small
self.L1 = 0
self.L1 += abs(self.W.sum())
self.L1 += abs(self.W_in.sum())
self.L1 += abs(self.W_out.sum())
# square of L2 norm ; one regularization option is to enforce
# square of L2 norm to be small
self.L2_sqr = 0
self.L2_sqr += (self.W ** 2).sum()
self.L2_sqr += (self.W_in ** 2).sum()
self.L2_sqr += (self.W_out ** 2).sum()
if self.output_type == 'real':
self.loss = lambda y: self.mse(y)
elif self.output_type == 'binary':
# push through sigmoid
self.p_y_given_x = T.nnet.sigmoid(self.y_pred) # apply sigmoid
self.y_out = T.round(self.p_y_given_x) # round to {0,1}
self.loss = lambda y: self.nll_binary(y)
elif self.output_type == 'softmax':
# push through softmax, computing vector of class-membership
# probabilities in symbolic form
#
# T.nnet.softmax will not operate on T.tensor3 types, only matrices
# We take our n_steps x n_seq x n_classes output from the net
# and reshape it into a (n_steps * n_seq) x n_classes matrix
# apply softmax, then reshape back
y_p = self.y_pred
y_p_m = T.reshape(y_p, (y_p.shape[0] * y_p.shape[1], -1))
y_p_s = T.nnet.softmax(y_p_m)
self.p_y_given_x = T.reshape(y_p_s, y_p.shape)
# compute prediction as class whose probability is maximal
self.y_out = T.argmax(self.p_y_given_x, axis=-1)
self.loss = lambda y: self.nll_multiclass(y)
else:
raise NotImplementedError
def mse(self, y):
# error between output and target
return T.mean((self.y_pred - y) ** 2)
def nll_binary(self, y):
# negative log likelihood based on binary cross entropy error
return T.mean(T.nnet.binary_crossentropy(self.p_y_given_x, y))
def nll_multiclass(self, y):
# negative log likelihood based on multiclass cross entropy error
#
# Theano's advanced indexing is limited
# therefore we reshape our n_steps x n_seq x n_classes tensor3 of probs
# to a (n_steps * n_seq) x n_classes matrix of probs
# so that we can use advanced indexing (i.e. get the probs which
# correspond to the true class)
# the labels y also must be flattened when we do this to use the
# advanced indexing
p_y = self.p_y_given_x
p_y_m = T.reshape(p_y, (p_y.shape[0] * p_y.shape[1], -1))
y_f = y.flatten(ndim=1)
return -T.mean(T.log(p_y_m)[T.arange(p_y_m.shape[0]), y_f])
def errors(self, y):
"""Return a float representing the number of errors in the minibatch
over the total number of examples of the minibatch ; zero one
loss over the size of the minibatch
:type y: theano.tensor.TensorType
:param y: corresponds to a vector that gives for each example the
correct label
"""
# check if y has same dimension of y_pred
if y.ndim != self.y_out.ndim:
raise TypeError('y should have the same shape as self.y_out',
('y', y.type, 'y_out', self.y_out.type))
# check if y is of the correct datatype
if y.dtype.startswith('int'):
# the T.neq operator returns a vector of 0s and 1s, where 1
# represents a mistake in prediction
return T.mean(T.neq(self.y_out, y))
else:
raise NotImplementedError()
class MetaRNN(object):
def __init__(self, n_in=5, n_hidden=50, n_out=5, learning_rate=0.01,
n_epochs=100, batch_size=100, L1_reg=0.00, L2_reg=0.00,
learning_rate_decay=1,
activation='tanh', output_type='real', final_momentum=0.9,
initial_momentum=0.5, momentum_switchover=5,
snapshot_every=None, snapshot_path='/tmp'):
self.n_in = int(n_in)
self.n_hidden = int(n_hidden)
self.n_out = int(n_out)
self.learning_rate = float(learning_rate)
self.learning_rate_decay = float(learning_rate_decay)
self.n_epochs = int(n_epochs)
self.batch_size = int(batch_size)
self.L1_reg = float(L1_reg)
self.L2_reg = float(L2_reg)
self.activation = activation
self.output_type = output_type
self.initial_momentum = float(initial_momentum)
self.final_momentum = float(final_momentum)
self.momentum_switchover = int(momentum_switchover)
if snapshot_every is not None:
self.snapshot_every = int(snapshot_every)
else:
self.snapshot_every = None
self.snapshot_path = snapshot_path
self.ready()
def ready(self):
# input (where first dimension is time)
self.x = T.tensor3(name='x')
# target (where first dimension is time)
if self.output_type == 'real':
self.y = T.tensor3(name='y', dtype=theano.config.floatX)
elif self.output_type == 'binary':
self.y = T.tensor3(name='y', dtype='int32')
elif self.output_type == 'softmax': # now it is a matrix (T x n_seq)
self.y = T.matrix(name='y', dtype='int32')
else:
raise NotImplementedError
# learning rate
self.lr = T.scalar()
if self.activation == 'tanh':
activation = T.tanh
elif self.activation == 'sigmoid':
activation = T.nnet.sigmoid
elif self.activation == 'relu':
activation = lambda x: x * (x > 0)
elif self.activation == 'cappedrelu':
activation = lambda x: T.minimum(x * (x > 0), 6)
else:
raise NotImplementedError
self.rnn = RNN(input=self.x, n_in=self.n_in,
n_hidden=self.n_hidden, n_out=self.n_out,
activation=activation, output_type=self.output_type)
if self.output_type == 'real':
self.predict = theano.function(inputs=[self.x, ],
outputs=self.rnn.y_pred,
mode=mode)
elif self.output_type == 'binary':
self.predict_proba = theano.function(
inputs=[self.x, ],
outputs=self.rnn.p_y_given_x, mode=mode)
self.predict = theano.function(
inputs=[self.x, ],
outputs=T.round(
self.rnn.p_y_given_x),
mode=mode)
elif self.output_type == 'softmax':
self.predict_proba = theano.function(
inputs=[self.x, ],
outputs=self.rnn.p_y_given_x, mode=mode)
self.predict = theano.function(inputs=[self.x, ],
outputs=self.rnn.y_out, mode=mode)
else:
raise NotImplementedError
def shared_dataset(self, data_xy, borrow=True):
""" Load the dataset into shared variables """
data_x, data_y = data_xy
shared_x = theano.shared(np.asarray(data_x,
dtype=theano.config.floatX),
borrow=True)
shared_y = theano.shared(np.asarray(data_y,
dtype=theano.config.floatX),
borrow=True)
if self.output_type in ('binary', 'softmax'):
return shared_x, T.cast(shared_y, 'int32')
else:
return shared_x, shared_y
def __getstate__(self):
""" Return state sequence."""
params = self._get_params() # parameters set in constructor
theta = self.rnn.theta.get_value()
state = (params, theta)
return state
def _set_weights(self, theta):
""" Set fittable parameters from weights sequence.
"""
self.rnn.theta.set_value(theta)
def __setstate__(self, state):
""" Set parameters from state sequence.
"""
params, theta = state
self.set_params(**params)
self.ready()
self._set_weights(theta)
def save(self, fpath='.', fname=None):
""" Save a pickled representation of Model state. """
fpathstart, fpathext = os.path.splitext(fpath)
if fpathext == '.pkl':
# User supplied an absolute path to a pickle file
fpath, fname = os.path.split(fpath)
elif fname is None:
# Generate filename based on date
date_obj = datetime.datetime.now()
date_str = date_obj.strftime('%Y-%m-%d-%H:%M:%S')
class_name = self.__class__.__name__
fname = '%s.%s.pkl' % (class_name, date_str)
fabspath = os.path.join(fpath, fname)
logger.info("Saving to %s ..." % fabspath)
file = open(fabspath, 'wb')
state = self.__getstate__()
pickle.dump(state, file, protocol=pickle.HIGHEST_PROTOCOL)
file.close()
def load(self, path):
""" Load model parameters from path. """
logger.info("Loading from %s ..." % path)
file = open(path, 'rb')
state = pickle.load(file)
self.__setstate__(state)
file.close()
def optional_output(self, train_set_x, show_norms=True, show_output=True):
""" Produces some debugging output. """
if show_norms:
norm_output = []
for param in self.rnn.params:
norm_output.append('%s: %6.4f' % (param.name,
self.get_norms[param]()))
logger.info("norms: {" + ', '.join(norm_output) + "}")
if show_output:
# show output for a single case
if self.output_type == 'binary':
output_fn = self.predict_proba
else:
output_fn = self.predict
logger.info(
"sample output: " +
str(output_fn(train_set_x.get_value(
borrow=True)[:, 0, :][:, np.newaxis, :]).flatten()))
def rmsprop(lr, tparams, grads, x, mask, y, cost):
"""
A variant of SGD that scales the step size by running average of the
recent step norms.
Parameters
----------
lr : Theano SharedVariable
Initial learning rate
tpramas: Theano SharedVariable
Model parameters
grads: Theano variable
Gradients of cost w.r.t to parameres
x: Theano variable
Model inputs
mask: Theano variable
Sequence mask
y: Theano variable
Targets
cost: Theano variable
Objective fucntion to minimize
Notes
-----
For more information, see [Hint2014]_.
.. [Hint2014] Geoff Hinton, *Neural Networks for Machine Learning*,
lecture 6a,
http://cs.toronto.edu/~tijmen/csc321/slides/lecture_slides_lec6.pdf
"""
zipped_grads = [theano.shared(p.get_value() * numpy_floatX(0.),
name='%s_grad' % k)
for k, p in tparams.items()]
running_grads = [theano.shared(p.get_value() * numpy_floatX(0.),
name='%s_rgrad' % k)
for k, p in tparams.items()]
running_grads2 = [theano.shared(p.get_value() * numpy_floatX(0.),
name='%s_rgrad2' % k)
for k, p in tparams.items()]
zgup = [(zg, g) for zg, g in zip(zipped_grads, grads)]
rgup = [(rg, 0.95 * rg + 0.05 * g)
for rg, g in zip(running_grads, grads)]
rg2up = [(rg2, 0.95 * rg2 + 0.05 * (g ** 2))
for rg2, g in zip(running_grads2, grads)]
f_grad_shared = theano.function([x, mask, y], cost,
updates=zgup + rgup + rg2up,
name='rmsprop_f_grad_shared')
updir = [theano.shared(p.get_value() * numpy_floatX(0.),
name='%s_updir' % k)
for k, p in tparams.items()]
updir_new = [(ud, 0.9 * ud - 1e-4 * zg / tensor.sqrt(rg2 - rg ** 2 + 1e-4))
for ud, zg, rg, rg2 in zip(updir, zipped_grads, running_grads,
running_grads2)]
param_up = [(p, p + udn[1])
for p, udn in zip(tparams.values(), updir_new)]
f_update = theano.function([lr], [], updates=updir_new + param_up,
on_unused_input='ignore',
name='rmsprop_f_update')
return f_grad_shared, f_update
def fit(self, X_train, Y_train, X_test=None, Y_test=None,
validate_every=100, optimizer='sgd', compute_zero_one=False,
show_norms=True, show_output=True):
""" Fit model
Pass in X_test, Y_test to compute test error and report during
training.
X_train : ndarray (T x n_in)
Y_train : ndarray (T x n_out)
validation_frequency : int
in terms of number of epochs
optimizer : string
Optimizer type.
Possible values:
'sgd' : batch stochastic gradient descent
'cg' : nonlinear conjugate gradient algorithm
(scipy.optimize.fmin_cg)
'bfgs' : quasi-Newton method of Broyden, Fletcher, Goldfarb,
and Shanno (scipy.optimize.fmin_bfgs)
'l_bfgs_b' : Limited-memory BFGS (scipy.optimize.fmin_l_bfgs_b)
compute_zero_one : bool
in the case of binary output, compute zero-one error in addition to
cross-entropy error
show_norms : bool
Show L2 norms of individual parameter groups while training.
show_output : bool
Show the model output on first training case while training.
"""
if X_test is not None:
assert(Y_test is not None)
self.interactive = True
test_set_x, test_set_y = self.shared_dataset((X_test, Y_test))
else:
self.interactive = False
train_set_x, train_set_y = self.shared_dataset((X_train, Y_train))
if compute_zero_one:
assert(self.output_type == 'binary' or
self.output_type == 'softmax')
# compute number of minibatches for training
# note that cases are the second dimension, not the first
n_train = train_set_x.get_value(borrow=True).shape[0]
n_train_batches = int(np.ceil(1.0 * n_train / self.batch_size))
if self.interactive:
n_test = test_set_x.get_value(borrow=True).shape[0]
n_test_batches = int(np.ceil(1.0 * n_test / self.batch_size))
# validate_every is specified in terms of epochs
validation_frequency = validate_every * n_train_batches
######################
# BUILD ACTUAL MODEL #
######################
logger.info('... building the model')
index = T.lscalar('index') # index to a [mini]batch
n_ex = T.lscalar('n_ex') # total number of examples
# learning rate (may change)
l_r = T.scalar('l_r', dtype=theano.config.floatX)
mom = T.scalar('mom', dtype=theano.config.floatX) # momentum
cost = self.rnn.loss(self.y) \
+ self.L1_reg * self.rnn.L1 \
+ self.L2_reg * self.rnn.L2_sqr
# Proper implementation of variable-batch size evaluation
# Note that classifier.errors() returns the mean error
# But the last batch may be a smaller size
# So we keep around the effective_batch_size (whose last element may
# be smaller than the rest)
# And weight the reported error by the batch_size when we average
# Also, by keeping batch_start and batch_stop as symbolic variables,
# we make the theano function easier to read
batch_start = index * self.batch_size
batch_stop = T.minimum(n_ex, (index + 1) * self.batch_size)
effective_batch_size = batch_stop - batch_start
get_batch_size = theano.function(
inputs=[index, n_ex],
outputs=effective_batch_size)
compute_train_error = theano.function(
inputs=[index, n_ex],
outputs=self.rnn.loss(self.y),
givens={self.x: train_set_x[batch_start:batch_stop, :],
self.y: train_set_y[batch_start:batch_stop, :]},
mode=mode)
if compute_zero_one:
compute_train_zo = theano.function(
inputs=[index, n_ex],
outputs=self.rnn.errors(self.y),
givens={self.x: train_set_x[batch_start:batch_stop, :],
self.y: train_set_y[batch_start:batch_stop, :]},
mode=mode)
if self.interactive:
compute_test_error = theano.function(
inputs=[index, n_ex],
outputs=self.rnn.loss(self.y),
givens={self.x: test_set_x[batch_start:batch_stop, :],
self.y: test_set_y[batch_start:batch_stop, :]},
mode=mode)
if compute_zero_one:
compute_test_zo = theano.function(
inputs=[index, n_ex],
outputs=self.rnn.errors(
self.y),
givens={self.x: test_set_x[batch_start:batch_stop, :],
self.y: test_set_y[batch_start:batch_stop, :]},
mode=mode)
self.get_norms = {}
for param in self.rnn.params:
self.get_norms[param] = theano.function(
inputs=[],
outputs=self.rnn.l2_norms[param], mode=mode)
# compute the gradient of cost with respect to theta using BPTT
gtheta = T.grad(cost, self.rnn.theta)
if optimizer == 'sgd':
updates = OrderedDict()
theta = self.rnn.theta
theta_update = self.rnn.theta_update
# careful here, update to the shared variable
# cannot depend on an updated other shared variable
# since updates happen in parallel
# so we need to be explicit
upd = mom * theta_update - l_r * gtheta
updates[theta_update] = upd
updates[theta] = theta + upd
# compiling a Theano function `train_model` that returns the
# cost, but in the same time updates the parameter of the
# model based on the rules defined in `updates`
train_model = theano.function(
inputs=[index, n_ex, l_r, mom],
outputs=cost,
updates=updates,
givens={self.x: train_set_x[batch_start:batch_stop, :],
self.y: train_set_y[batch_start:batch_stop, :]},
mode=mode)
###############
# TRAIN MODEL #
###############
logger.info('... training')
epoch = 0
while (epoch < self.n_epochs):
epoch = epoch + 1
effective_momentum = self.final_momentum \
if epoch > self.momentum_switchover \
else self.initial_momentum
for minibatch_idx in xrange(n_train_batches):
minibatch_avg_cost = train_model(
minibatch_idx, n_train,
self.learning_rate,
effective_momentum)
# iteration number (how many weight updates have we made?)
# epoch is 1-based, index is 0 based
iter = (epoch - 1) * n_train_batches + minibatch_idx + 1
if iter % validation_frequency == 0:
# compute loss on training set
train_losses = [compute_train_error(i, n_train)
for i in xrange(n_train_batches)]
train_batch_sizes = [get_batch_size(i, n_train)
for i in xrange(n_train_batches)]
this_train_loss = np.average(train_losses,
weights=train_batch_sizes)
if compute_zero_one:
train_zero_one = [compute_train_zo(i, n_train)
for i in xrange(n_train_batches)]
this_train_zero_one = np.average(
train_zero_one,
weights=train_batch_sizes)
if self.interactive:
test_losses = [
compute_test_error(i, n_test)
for i in xrange(n_test_batches)]
test_batch_sizes = [
get_batch_size(i, n_test)
for i in xrange(n_test_batches)]
this_test_loss = np.average(
test_losses,
weights=test_batch_sizes)
if compute_zero_one:
test_zero_one = [
compute_test_zo(i, n_test)
for i in xrange(n_test_batches)]
this_test_zero_one = np.average(
test_zero_one,
weights=test_batch_sizes)
if compute_zero_one:
logger.info(
'epoch %i, mb %i/%i, tr loss %f, '
'tr zo %f, te loss %f '
'te zo %f lr: %f' %
(epoch, minibatch_idx + 1,
n_train_batches,
this_train_loss, this_train_zero_one,
this_test_loss, this_test_zero_one,
self.learning_rate))
else:
logger.info(
'epoch %i, mb %i/%i, tr loss %f '
'te loss %f lr: %f' %
(epoch, minibatch_idx + 1, n_train_batches,
this_train_loss, this_test_loss,
self.learning_rate))
else:
if compute_zero_one:
logger.info(
'epoch %i, mb %i/%i, train loss %f'
' train zo %f '
'lr: %f' % (epoch,
minibatch_idx + 1,
n_train_batches,
this_train_loss,
this_train_zero_one,
self.learning_rate))
else:
logger.info(
'epoch %i, mb %i/%i, train loss %f'
' lr: %f' % (epoch,
minibatch_idx + 1,
n_train_batches,
this_train_loss,
self.learning_rate))
self.optional_output(train_set_x, show_norms,
show_output)
self.learning_rate *= self.learning_rate_decay
if self.snapshot_every is not None:
if (epoch + 1) % self.snapshot_every == 0:
date_obj = datetime.datetime.now()
date_str = date_obj.strftime('%Y-%m-%d-%H:%M:%S')
class_name = self.__class__.__name__
fname = '%s.%s-snapshot-%d.pkl' % (class_name,
date_str, epoch + 1)
fabspath = os.path.join(self.snapshot_path, fname)
self.save(fpath=fabspath)
###############
# TRAIN MODEL #
###############
logger.info('... training')
# using scipy conjugate gradient optimizer
# import scipy.optimize
# logger.info("Optimizing using %s..." % of.__name__)
start_time = time.clock()
# keep track of epochs externally
# these get updated through callback
self.epoch = 0
end_time = time.clock()
print "Optimization time: %f" % (end_time - start_time)
else:
raise NotImplementedError
| mit |
naoyak/Agile_Data_Code_2 | ch07/train_sklearn_model.py | 1 | 5282 | import sys, os, re
sys.path.append("lib")
import utils
import numpy as np
import sklearn
import iso8601
import datetime
print("Imports loaded...")
# Load and check the size of our training data. May take a minute.
print("Original JSON file size: {:,} Bytes".format(os.path.getsize("data/simple_flight_delay_features.jsonl")))
training_data = utils.read_json_lines_file('data/simple_flight_delay_features.jsonl')
print("Training items: {:,}".format(len(training_data))) # 5,714,008
print("Data loaded...")
# Inspect a record before we alter them
print("Size of training data in RAM: {:,} Bytes".format(sys.getsizeof(training_data))) # 50MB
print(training_data[0])
# # Sample down our training data at first...
# sampled_training_data = training_data#np.random.choice(training_data, 1000000)
# print("Sampled items: {:,} Bytes".format(len(training_data)))
# print("Data sampled...")
# Separate our results from the rest of the data, vectorize and size up
results = [record['ArrDelay'] for record in training_data]
results_vector = np.array(results)
sys.getsizeof(results_vector) # 45,712,160 Bytes
print("Results vectorized...")
# Remove the two delay fields and the flight date from our training data
for item in training_data:
item.pop('ArrDelay', None)
item.pop('FlightDate', None)
print("ArrDelay and FlightDate removed from training data...")
# Must convert datetime strings to unix times
for item in training_data:
if isinstance(item['CRSArrTime'], str):
dt = iso8601.parse_date(item['CRSArrTime'])
unix_time = int(dt.timestamp())
item['CRSArrTime'] = unix_time
if isinstance(item['CRSDepTime'], str):
dt = iso8601.parse_date(item['CRSDepTime'])
unix_time = int(dt.timestamp())
item['CRSDepTime'] = unix_time
print("Datetimes converted to unix times...")
# Use DictVectorizer to convert feature dicts to vectors
from sklearn.feature_extraction import DictVectorizer
print("Original dimensions: [{:,}]".format(len(training_data)))
vectorizer = DictVectorizer()
training_vectors = vectorizer.fit_transform(training_data)
print("Size of DictVectorized vectors: {:,} Bytes".format(training_vectors.data.nbytes))
print("Training data vectorized...")
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(
training_vectors,
results_vector,
test_size=0.1,
random_state=43
)
print(X_train.shape, X_test.shape)
print(y_train.shape, y_test.shape)
print("Test train split performed...")
# Train a regressor
from sklearn.linear_model import LinearRegression
from sklearn.model_selection import train_test_split, cross_val_predict
from sklearn.metrics import median_absolute_error, r2_score
print("Regressor library and metrics imported...")
regressor = LinearRegression()
print("Regressor instantiated...")
from sklearn.ensemble import GradientBoostingRegressor
regressor = GradientBoostingRegressor
print("Swapped gradient boosting trees for linear regression!")
# Lets go back for now...
regressor = LinearRegression()
print("Swapped back to linear regression!")
regressor.fit(X_train, y_train)
print("Regressor fitted...")
predicted = regressor.predict(X_test)
print("Predictions made for X_test...")
# Definitions from http://scikit-learn.org/stable/modules/model_evaluation.html
from sklearn.metrics import median_absolute_error, r2_score
# Median absolute error is the median of all absolute differences between the target and the prediction.
# Less is better, more indicates a high error between target and prediction.
medae = median_absolute_error(y_test, predicted)
print("Median absolute error: {:.3g}".format(medae))
# R2 score is the coefficient of determination. Ranges from 1-0, 1.0 is best, 0.0 is worst.
# Measures how well future samples are likely to be predicted.
r2 = r2_score(y_test, predicted)
print("r2 score: {:.3g}".format(r2))
# Plot outputs, compare actual vs predicted values
# import matplotlib.pyplot as plt
#
# plt.scatter(
# y_test,
# predicted,
# color='blue',
# linewidth=1
# )
#
# plt.xticks(())
# plt.yticks(())
#
# plt.show()
#
# Persist model using pickle
#
print("Testing model persistance...")
import pickle
project_home = os.environ["PROJECT_HOME"]
# Dump the model itself
regressor_path = "{}/data/sklearn_regressor.pkl".format(project_home)
regressor_bytes = pickle.dumps(regressor)
model_f = open(regressor_path, 'wb')
model_f.write(regressor_bytes)
# Dump the DictVectorizer that vectorizes the features
vectorizer_path = "{}/data/sklearn_vectorizer.pkl".format(project_home)
vectorizer_bytes = pickle.dumps(vectorizer)
vectorizer_f = open(vectorizer_path, 'wb')
vectorizer_f.write(vectorizer_bytes)
# Load the model itself
model_f = open(regressor_path, 'rb')
model_bytes = model_f.read()
regressor = pickle.loads(model_bytes)
# Load the DictVectorizer
vectorizer_f = open(vectorizer_path, 'rb')
vectorizer_bytes = vectorizer_f.read()
vectorizer = pickle.loads(vectorizer_bytes)
#
# Persist model using sklearn.externals.joblib
#
from sklearn.externals import joblib
# Dump the model and vectorizer
joblib.dump(regressor, regressor_path)
joblib.dump(vectorizer, vectorizer_path)
# Load the model and vectorizer
regressor = joblib.load(regressor_path)
vectorizer = joblib.load(vectorizer_path)
| mit |
daniel-vainsencher/regularized_weighting | src/simpleInterface.py | 1 | 6881 | from numpy import array, inf, ones, zeros
import matplotlib.pyplot as plt
from sklearn.svm import SVC
#from sklearn.metrics import zero_one_score
import time
import alternatingAlgorithms as aa
import weightedModelTypes as wmt
from minL2PenalizedLossOverSimplex import penalizedMultipleWeightedLoss2, weightsForLosses, weightsCombinedForAM, gOfTs
#from exploreSvm import hinge_losses
#from svmutil import svm_train, svm_problem, svm_predict
#from svm import svm_parameter, SVC
def optimize(data, model_class, alpha, eta, model_parameters=None,
dual_optimizer='coordinate-wise', dual_to_primal='partition',
primal_optimizer='fista'):
k = eta.shape[0]
L = model_class.randomModelLosses(data, k, modelParameters=model_parameters)
_, n = L.shape
t = zeros(k)
W = ones((k, n)) / n
pvals = []
dvals = []
for i in range(10):
def report(label, W, l2, ts):
if W is not None:
pvals.append((time.time(), penalizedMultipleWeightedLoss2(L, W, alpha, eta=eta)))
elif ts is not None:
dvals.append((time.time(), gOfTs(L, alpha, ts, eta)))
W, t = weightsCombinedForAM(L, alpha, eta, ts0=t, W0=W, report=report) # solve for weights and t, with a relative duality gap or iteration complexity based stopping rules
states = [model_class(data, w, model_parameters) for w in W]
L = array([s.squaredLosses() for s in states])
dtimes, dvs = zip(*dvals)
ptimes, pvs = zip(*pvals)
#plt.plot(dtimes, dvs, label='dual')
#plt.plot(ptimes, pvs, label='primal')
#plt.legend()
#plt.show()
return states, t
def clustering(X, k, alpha, n_init=10):
"""
Cluster the points in columns of X into k clusters.
"""
minJointLoss = inf
for i in xrange(n_init):
#initialStates = aa.jointlyPenalizedInitialStates(X.T, wmt.ClusteringState, alpha, k, dualityGapGoal=1e-5)
#finalStates = aa.learnJointlyPenalizedMultipleModels(initialStates, alpha, maxSteps=100, dualityGapGoal=1e-2)
eta = ones(k) / k
finalStates, t = optimize(X.T, wmt.ClusteringState, alpha, eta)
centroids = [s.center for s in finalStates]
W = array([s.weights for s in finalStates])
L = array([s.squaredLosses() for s in finalStates])
jointLoss = penalizedMultipleWeightedLoss2(L, W, alpha)
if jointLoss < minJointLoss:
minJointLoss = jointLoss
highestWeights = W.argmax(0)
zeroWeights = W.max(0) < 10 ** -9
labels = highestWeights
labels[zeroWeights] = -1
bestCentroids = centroids
return (bestCentroids, labels)
def mixtureGaussians(X, k, alpha, n_init=10):
"""
Find a mixture of k Gaussians to explain points.
"""
minJointLoss = inf
for i in xrange(n_init):
initialStates = aa.jointlyPenalizedInitialStates(X.T, wmt.ScalarGaussianState, alpha, k, dualityGapGoal=1e-5)
finalStates = aa.learnJointlyPenalizedMultipleModels(initialStates, alpha, maxSteps=10, dualityGapGoal=1e-2)
means = [s.mean for s in finalStates]
W = array([s.weights for s in finalStates])
L = array([s.squaredLosses() for s in finalStates])
jointLoss = penalizedMultipleWeightedLoss2(L, W, alpha)
if jointLoss < minJointLoss:
minJointLoss = jointLoss
highestWeights = W.argmax(0)
zeroWeights = W.max(0) < 10 ** -9
labels = highestWeights
labels[zeroWeights] = -1
bestMeans = means
bestVariances = [s.variance for s in finalStates]
return (bestMeans, bestVariances, labels)
def linearRegressionClustering(X, Y, k, alpha, regularizationStrength, n_init=10):
minJointLoss = inf
for i in xrange(n_init):
initialStates = aa.jointlyPenalizedInitialStates((X.T, Y), wmt.MultiLinearRegressionState, alpha, k=k,
modelParameters={
'regularizationStrength': regularizationStrength})
finalStates = aa.learnJointlyPenalizedMultipleModels(initialStates, alpha, maxSteps=10, dualityGapGoal=1e-2)
linearModels = [s.r for s in finalStates]
W = array([s.weights for s in finalStates])
L = array([s.squaredLosses() for s in finalStates])
jointLoss = penalizedMultipleWeightedLoss2(L, W, alpha)
if jointLoss < minJointLoss:
minJointLoss = jointLoss
highestWeights = W.argmax(0)
zeroWeights = W.max(0) < 10 ** -9
associations = highestWeights
associations[zeroWeights] = -1
bestLinearModels = linearModels
return (bestLinearModels, associations)
def sk_hinge_losses(clf, X, Y):
decs = clf.decision_function(X)
margin = Y * decs[:, 0]
hlosses = 1 - margin
hlosses[hlosses <= 0] = 0
return hlosses
def sk_weightedCSVMrbf(Xain, Yain, Xst, Yst, alpha, C, gamma, n_init=10):
clf = SVC(C=C, kernel='rbf', gamma=gamma)
n_train_samples, n_dims = Xain.shape
w = ones(n_train_samples) / n_train_samples
#clf.fit(Xain, Yain)
clf.fit(Xain, Yain, sample_weight=w * C + 0.0000000001)
for _ in range(n_init):
test_hlosses = sk_hinge_losses(clf, Xst, Yst)
mistakeRatio = zero_one_score(Yst, clf.predict(Xst))
print "mean loss on test data: %s %% correct on test data: %s" % (test_hlosses.mean(), mistakeRatio)
train_hlosses = sk_hinge_losses(clf, Xain, Yain)
w = weightsForLosses(train_hlosses, alpha)
clf.fit(Xain, Yain, sample_weight=w * C + 0.0000000001)
return clf, w
def hinge_losses(m, x, y):
p_label, p_acc, p_val = svm_predict(y, x, m)
margins = array([(ay * av) for (ay, [av]) in zip(y, p_val)])
hlosses = 1 - margins
hlosses[hlosses < 0] = 0
return hlosses
def arrayToLibSvm(X):
xlist = X.tolist()
dim = len(xlist[0])
return [dict(zip(range(dim), r)) for r in xlist]
def weightedCSVMrbf(Xain, Yain, Xst, Yst, alpha, C, gamma, n_init=10):
xain = arrayToLibSvm(Xain)
yain = list(Yain)
xst = arrayToLibSvm(Xst)
yst = list(Yst)
n_train_samples, n_dims = Xain.shape
param = svm_parameter('-c %f -g %f' % (C, gamma))
W = list(C * ones(n_train_samples) / n_train_samples)
for _ in range(n_init):
prob = svm_problem(W, yain, xain)
m = svm_train(prob, param)
test_hlosses = hinge_losses(m, xst, yst)
p_labels, _, _ = svm_predict(yst, xst, m)
mistakeRatio = zero_one_score(Yst, array(p_labels))
print "mean loss on test data: %s; accuracy on test data: %s" % (test_hlosses.mean(), mistakeRatio)
train_hlosses = hinge_losses(m, xain, yain)
W = list(C * weightsForLosses(train_hlosses, alpha))
return m, W
| bsd-2-clause |
endangeredoxen/pywebify | setup.py | 1 | 4007 | """A setuptools based setup module.
See:
https://packaging.python.org/en/latest/distributing.html
https://github.com/pypa/sampleproject
"""
# Always prefer setuptools over distutils
from setuptools import setup, find_packages
# To use a consistent encoding
from codecs import open
from os import path
here = path.abspath(path.dirname(__file__))
# Get the long description from the README file
with open(path.join(here, 'README.rst'), encoding='utf-8') as f:
long_description = f.read()
with open(path.join('pywebify', 'version.txt'), 'r') as input:
__version__ = input.readlines()[0]
setup(
name='pywebify',
# Versions should comply with PEP440. For a discussion on single-sourcing
# the version across setup.py and the project code, see
# https://packaging.python.org/en/latest/single_source_version.html
version=__version__,
description='Browser-based html/image file report builder',
long_description='Browser-based html/image file report builder',
# The project's main homepage.
url='https://github.com/endangeredoxen/pywebify',
download_url = 'https://github.com/endangeredoxen/pywebify/archive/v%s.tar.gz' % __version__,
# Author details
author='Steve Nicholes',
author_email='endangeredoxen@users.noreply.github.com',
# Choose your license
license='GPLv3',
# See https://pypi.python.org/pypi?%3Aaction=list_classifiers
# classifiers=[
# # How mature is this project? Common values are
# # 3 - Alpha
# # 4 - Beta
# # 5 - Production/Stable
# 'Development Status :: 5',
# # Indicate who your project is intended for
# 'Intended Audience :: Engineers/Scientists',
# 'Topic :: Data Analysis',
# # Pick your license as you wish (should match "license" above)
# 'License :: GPL v3 License',
# # Specify the Python versions you support here. In particular, ensure
# # that you indicate whether you support Python 2, Python 3 or both.
# 'Programming Language :: Python :: 3.6',
# ],
# What does your project relate to?
keywords=['data', 'web report'],
# You can just specify the packages manually here if your project is
# simple. Or you can use find_packages().
packages=find_packages(exclude=['contrib', 'docs']),
# List run-time dependencies here. These will be installed by pip when
# your project is installed. For an analysis of "install_requires" vs pip's
# requirements files see:
# https://packaging.python.org/en/latest/requirements.html
# dependency_links = [],
install_requires=['pandas','numpy','natsort','fivecentfileio'],
# List additional groups of dependencies here (e.g. development
# dependencies). You can install these using the following syntax,
# for example:
# $ pip install -e .[dev,test]
extras_require={
},
# If there are data files included in your packages that need to be
# installed, specify them here. If using Python 2.6 or less, then these
# have to be included in MANIFEST.in as well.
package_data={
'pywebify': ['config.ini', 'img/*', 'js/*', 'templates/css/*',
'templates/html/*', 'templates/jinja/*', 'setup.txt', 'version.txt'],
},
# Although 'package_data' is the preferred approach, in some case you may
# need to place data files outside of your packages. See:
# http://docs.python.org/3.4/distutils/setupscript.html#installing-additional-files # noqa
# In this case, 'data_file' will be installed into '<sys.prefix>/my_data'
# data_files=[('C:/my_data', ['pywebify/config.ini'])],
# To provide executable scripts, use entry points in preference to the
# "scripts" keyword. Entry points provide cross-platform support and allow
# pip to create the appropriate form of executable for the target platform.
# entry_points={
# 'console_scripts': [
# 'sample=sample:main',
# ],
# },
)
| gpl-2.0 |
berkeley-stat222/mousestyles | mousestyles/path_diversity/path_index.py | 3 | 2301 | from __future__ import (absolute_import, division,
print_function, unicode_literals)
import numpy as np
def path_index(movement, stop_threshold, min_path_length):
r"""
Return a list object containing start and end indices
for a specific movement. Each element in the list is
a list containing two indices: the first element is
the start index and the second element is the end index.
Parameters
----------
movement : pandas.DataFrame
CT, CX, CY coordinates and homebase status
for the unique combination of strain, mouse and day
stop_threshold : float
positive number indicating the path cutoff criteria
if the time difference between two observations is
less than this threhold, they will be in the same path
min_path_length : int
positive integer indicating how many observations in
a path
Returns
-------
paths index : a list containing the indices for all paths
Examples
--------
>>> movement = data.load_movement(1, 2, 1)
>>> paths = path_index(movement, 1, 1)[:5]
>>> paths
[[0, 2], [6, 8], [107, 113], [129, 131], [144, 152]]
"""
# check if all inputs are positive integers
conditions_value = [stop_threshold <= 0, min_path_length <= 0]
conditions_type = type(min_path_length) != int
if any(conditions_value):
raise ValueError("Input values need to be positive")
if conditions_type:
raise TypeError("min_path_length needs to be integer")
# Pull out time variable
T = movement['t'].ravel()
# Calculate time differences
TD = np.diff(T)
path = []
# index
i = 0
while i < len(TD):
start_index = i
# If time difference is less than stop_threshold
# start to track the index in this path
while TD[i] < stop_threshold:
i += 1
if i == len(TD):
break
end_index = i
# Check whether start index is equal to end index
# If they are equal jump to next index
if start_index == end_index:
next
else:
path.append([start_index, end_index])
i += 1
path = [p for p in path if (p[1] - p[0]) > min_path_length]
return path
| bsd-2-clause |
glouppe/scikit-learn | examples/applications/topics_extraction_with_nmf_lda.py | 4 | 3761 | """
=======================================================================================
Topic extraction with Non-negative Matrix Factorization and Latent Dirichlet Allocation
=======================================================================================
This is an example of applying Non-negative Matrix Factorization
and Latent Dirichlet Allocation on a corpus of documents and
extract additive models of the topic structure of the corpus.
The output is a list of topics, each represented as a list of terms
(weights are not shown).
The default parameters (n_samples / n_features / n_topics) should make
the example runnable in a couple of tens of seconds. You can try to
increase the dimensions of the problem, but be aware that the time
complexity is polynomial in NMF. In LDA, the time complexity is
proportional to (n_samples * iterations).
"""
# Author: Olivier Grisel <olivier.grisel@ensta.org>
# Lars Buitinck <L.J.Buitinck@uva.nl>
# Chyi-Kwei Yau <chyikwei.yau@gmail.com>
# License: BSD 3 clause
from __future__ import print_function
from time import time
from sklearn.feature_extraction.text import TfidfVectorizer, CountVectorizer
from sklearn.decomposition import NMF, LatentDirichletAllocation
from sklearn.datasets import fetch_20newsgroups
n_samples = 2000
n_features = 1000
n_topics = 10
n_top_words = 20
def print_top_words(model, feature_names, n_top_words):
for topic_idx, topic in enumerate(model.components_):
print("Topic #%d:" % topic_idx)
print(" ".join([feature_names[i]
for i in topic.argsort()[:-n_top_words - 1:-1]]))
print()
# Load the 20 newsgroups dataset and vectorize it. We use a few heuristics
# to filter out useless terms early on: the posts are stripped of headers,
# footers and quoted replies, and common English words, words occurring in
# only one document or in at least 95% of the documents are removed.
print("Loading dataset...")
t0 = time()
dataset = fetch_20newsgroups(shuffle=True, random_state=1,
remove=('headers', 'footers', 'quotes'))
data_samples = dataset.data
print("done in %0.3fs." % (time() - t0))
# Use tf-idf features for NMF.
print("Extracting tf-idf features for NMF...")
tfidf_vectorizer = TfidfVectorizer(max_df=0.95, min_df=2, #max_features=n_features,
stop_words='english')
t0 = time()
tfidf = tfidf_vectorizer.fit_transform(data_samples)
print("done in %0.3fs." % (time() - t0))
# Use tf (raw term count) features for LDA.
print("Extracting tf features for LDA...")
tf_vectorizer = CountVectorizer(max_df=0.95, min_df=2, max_features=n_features,
stop_words='english')
t0 = time()
tf = tf_vectorizer.fit_transform(data_samples)
print("done in %0.3fs." % (time() - t0))
# Fit the NMF model
print("Fitting the NMF model with tf-idf features,"
"n_samples=%d and n_features=%d..."
% (n_samples, n_features))
t0 = time()
nmf = NMF(n_components=n_topics, random_state=1, alpha=.1, l1_ratio=.5).fit(tfidf)
print("done in %0.3fs." % (time() - t0))
print("\nTopics in NMF model:")
tfidf_feature_names = tfidf_vectorizer.get_feature_names()
print_top_words(nmf, tfidf_feature_names, n_top_words)
print("Fitting LDA models with tf features, n_samples=%d and n_features=%d..."
% (n_samples, n_features))
lda = LatentDirichletAllocation(n_topics=n_topics, max_iter=5,
learning_method='online', learning_offset=50.,
random_state=0)
t0 = time()
lda.fit(tf)
print("done in %0.3fs." % (time() - t0))
print("\nTopics in LDA model:")
tf_feature_names = tf_vectorizer.get_feature_names()
print_top_words(lda, tf_feature_names, n_top_words)
| bsd-3-clause |
reychil/project-alpha-1 | code/utils/scripts/multi_regression_script.py | 1 | 7291 | # multi_regression_script.py
# In this file we will be creating multiple regressions using the the glm
# function. Moreover, the features added with be: seperating the conditions
# from each other (i.e. x_1 = cond1 HRF, x_2 = cond2 HRF, and x_3 = cond3 HRF.
# I will be running it with np.convolve and convolution_specialized.
# Steps:
# 1. Libraries, Location and Data
# 2. X matrix creation, specifically creation of column vectors for X matrix
# (a) np.convolve and (b) convolution_specialized
# 3. use glm to generate a linear regression
###################################
# 1. Libraries, Location and Data #
###################################
################
# a. Libraries #
################
from __future__ import absolute_import, division, print_function
import numpy as np
import numpy.linalg as npl
import matplotlib.pyplot as plt
import nibabel as nib
import pandas as pd # new
import sys # instead of os
import scipy.stats
from scipy.stats import gamma
import os
################
################
# b. Locations #
################
################
location_of_project="../../../"
location_of_data=location_of_project+"data/ds009/"
location_of_subject001=location_of_data+"sub001/"
location_of_functions= "../functions/"
location_of_our_data=location_of_project+"data/our_data/"
condition_location=location_of_subject001+"model/model001/onsets/task001_run001/"
bold_location=location_of_subject001+"BOLD/task001_run001/"
location_to_class_data=location_of_project+"data/ds114/"
location_of_images=location_of_project+"images/"
###############
# c. sys path #
###############
sys.path.append(location_of_functions) # 0
sys.path.append(bold_location) # 1
sys.path.append(condition_location) # 2
sys.path.append(location_to_class_data) # Goals: i
################
# d. functions #
################
# i. importing created convolution function for event-related fMRI functions:
from event_related_fMRI_functions import convolution, hrf_single
from event_related_fMRI_functions import convolution_specialized
# ii. importing events2neural for np.convolve built-in function
from stimuli import events2neural
# iii. import glm_multiple for multiple regression
from glm import glm_multiple, glm_diagnostics
# iv. import image viewing tool
from Image_Visualizing import present_3d
###########
# e. data #
###########
# i. load in subject001's BOLD data:
img=nib.load(location_of_subject001+"BOLD/task001_run001/"+"bold.nii")
data=img.get_data()
data=data[...,6:]
num_voxels=np.prod(data.shape[:-1])
#data.shape
# ii. load in subject001's behavioral files (condition files)
# for convolution_specialized
cond1=np.loadtxt(condition_location+"cond001.txt")
cond2=np.loadtxt(condition_location+"cond002.txt")
cond3=np.loadtxt(condition_location+"cond003.txt")
########################
########################
# 2. X Matrix Creation #
########################
########################
###################
# (a) np.convolve #
###################
TR = 2
tr_times = np.arange(0, 30, TR)
hrf_at_trs = np.array([hrf_single(x) for x in tr_times])
n_vols = data.shape[-1] # time slices
X_np = np.ones((n_vols,4))
cond_string = ["cond001.txt","cond002.txt","cond003.txt"]
for i,name in enumerate(cond_string):
nueral_prediction = events2neural(condition_location+name,TR,n_vols)
hrf_long = np.convolve(nueral_prediction, hrf_at_trs)
X_np[:,i+1] = hrf_long[:-(len(hrf_at_trs)-1)]
all_tr_times = np.arange(n_vols) * TR
###############################
# (b) convolution_specialized #
###############################
X_my = np.ones((n_vols,4))
conds = [cond1[:,0],cond2[:,0],cond3[:,0]]
for i,cond in enumerate(conds):
X_my[:,i+1]=convolution_specialized(cond,np.ones(len(cond)),hrf_single,all_tr_times)
##########
##########
# 3. GLM #
##########
##########
###################
# (a) np.convolve #
###################
B_np,junk=glm_multiple(data,X_np)
###############################
# (b) convolution_specialized #
###############################
B_my,junk=glm_multiple(data,X_my)
#############
# 4. Review #
#############
# Looks like splitting up the conditions does a few things
# 1. cond2 (exploding the balloon) have 2 effects, it continues the views from
# all conditions to some extent and also (because it occurs so rarely- see
# first plot), that also takes in the shifting of the brain on the outside
# 2. cond3's beta is centered around 20 (so it may not be able to pick enought
# up)
plt.plot(X_np[:,2])
plt.title("Condition 2 (pop) time predictions")
plt.xlabel("Time")
plt.ylabel("Hemoglobin response")
plt.savefig(location_of_images+'cond2_time.png')
plt.close()
plt.imshow(present_3d(B_np[...,2]),interpolation='nearest', cmap='seismic')
# instead of cmap="gray"
plt.title("Condition 2 (pop) beta Brain Image")
plt.colorbar()
zero_out=max(abs(np.min(present_3d(B_np[...,2]))),np.max(present_3d(B_np[...,2])))
plt.clim(-zero_out,zero_out)
plt.savefig(location_of_images+'mr_cond2_beta_brain.png')
plt.close()
plt.plot(X_np[:,3])
plt.title("Condition 3 (save) time predictions")
plt.xlabel("Time")
plt.ylabel("Hemoglobin response")
plt.savefig(location_of_images+'mr_cond3_time.png')
plt.close()
plt.imshow(present_3d(B_np[...,3]),interpolation='nearest', cmap='seismic')
# instead of cmap="gray"
zero_out=max(abs(np.min(present_3d(B_np[...,3]))),np.max(present_3d(B_np[...,3])))
plt.clim(-zero_out,zero_out)
plt.title("Condition 3 (save) beta Brain Image")
plt.colorbar()
plt.savefig(location_of_images+'mr_cond3_beta_brain.png')
plt.close()
plt.plot(X_np[:,1])
plt.title("Condition 1 time predictions")
plt.xlabel("Time")
plt.ylabel("Hemoglobin response")
plt.savefig(location_of_images+'mr_cond1_time.png')
plt.close()
plt.imshow(present_3d(B_np[...,1]),interpolation='nearest', cmap='seismic')
# instead of cmap="gray"
plt.title("Condition 1 beta Brain Image")
plt.colorbar()
plt.savefig(location_of_images+'mr_cond1_beta_brain.png')
plt.close()
difference_12=present_3d(B_np[...,1])-present_3d(B_np[...,2])
plt.imshow(difference_12,interpolation='nearest', cmap='seismic')
plt.title("Differences between Condition 1 and 2")
zero_out=max(abs(np.min(difference_12)),np.max(difference_12))
plt.clim(-zero_out,zero_out)
plt.colorbar()
plt.savefig(location_of_images+'mr_cond1-cond2_beta_brain.png')
plt.close()
plt.plot(X_np[:,1]+X_np[:,2]+X_np[:,3],label="All Conditions",color="#000019")
plt.plot([0,239],[0,0])
colors=["#000099","#1A1AFF","#9999FF"]
for i in range(3):
plt.plot(X_np[:,(i+1)]-2*(i+1),label="Condition " +str(i+1),color=colors[i])
plt.plot([0,239],[-2*(i+1),-2*(i+1)],color="#FF0000")
plt.legend(loc='center right', shadow=True,fontsize="smaller")
plt.title("Hemogoblin predicted response for different conditions")
plt.xlabel("Time")
plt.ylabel("Hemoglobin response")
plt.savefig(location_of_images+'all_cond_time.png')
plt.close()
MRSS_my, fitted_my, residuals_my = glm_diagnostics(B_my, X_my, data)
print("MRSS using multiple regression: "+str(np.mean(MRSS_my)))
plt.plot(data[41, 47, 2],label="actual HR response")
plt.plot(fitted_my[41, 47, 2],label="predicted HR response")
plt.title("Subject 001, voxel (41,47,2) HR Fitted vs actual")
plt.legend(loc='upper left', shadow=True,fontsize="smaller")
plt.savefig(location_of_images+"fitted_vs_actual_mult_regression.png")
plt.close()
| bsd-3-clause |
rhyolight/nupic.research | projects/wavelet_dataAggregation/runDataAggregationExperiment.py | 11 | 21206 | from os.path import isfile, join, exists
import pandas as pd
import numpy as np
from scipy import signal
import numpy.matlib
import csv
import os
import time
os.environ['TZ'] = 'GMT'
time.tzset()
display = True
if display:
import matplotlib.pyplot as plt
plt.close('all')
plt.ion()
def plotWaveletPower(sig, cwtmatr, time_scale, x_range=None, title=''):
"""
Display wavelet transformations along with the original data
:param sig: original sigal
:param cwtmatr: cwt coefficients
:param time_scale: time scales of wavelets
:param x_range: x range of the plot
:param title: title of the plot
"""
if x_range is None:
x_range = range(0, cwtmatr.shape[1])
fig, ax = plt.subplots(nrows=2, ncols=1)
y_time_scale_tick = ['1-sec', '1mins', '5mins', '30mins', '60mins', '2hrs', '4hrs', '12hrs', '1day', '1week']
y_time_scale = [1, 60, 300, 1800, 3600, 7200, 14400, 43200, 86400, 604800]
y_tick = (np.log10(y_time_scale) - np.log10(time_scale[0]) ) / \
(np.log10(time_scale[-1]) - np.log10(time_scale[0])) * (len(time_scale)-1)
good_tick = np.where(np.logical_and(y_tick >= 0, y_tick < len(time_scale)))[0]
y_tick = y_tick[good_tick]
y_time_scale_tick = [y_time_scale_tick[i] for i in good_tick]
ax[0].imshow(np.abs(cwtmatr[:, x_range]), aspect='auto')
ax[0].set_yticks(y_tick)
ax[0].set_yticklabels(y_time_scale_tick)
ax[0].set_xlabel(' Time ')
ax[0].set_title(title)
ax[1].plot(sig[x_range])
ax[1].set_xlabel(' Time ')
ax[1].autoscale(tight=True)
plt.show()
def calculate_cwt(sampling_interval, sig, figDir='./', fileName='./', display=True):
"""
Calculate continuous wavelet transformation (CWT)
Return variance of the cwt coefficients overtime and its cumulative
distribution
:param sampling_interval: sampling interval of the time series
:param sig: value of the time series
:param figDir: directory of cwt plots
:param fileName: name of the dataset, used for determining figDir
:param display: whether to create the cwt plot
"""
t = np.array(range(len(sig)))*sampling_interval
widths = np.logspace(0, np.log10(len(sig)/20), 50)
T = int(widths[-1])
# continulus wavelet transformation with ricker wavelet
cwtmatr = signal.cwt(sig, signal.ricker, widths)
cwtmatr = cwtmatr[:, 4*T:-4*T]
sig = sig[4*T:-4*T]
t = t[4*T:-4*T]
freq = 1/widths.astype('float') / sampling_interval / 4
time_scale = widths * sampling_interval * 4
# variance of wavelet power
cwt_var = np.var(np.abs(cwtmatr), axis=1)
cwt_var = cwt_var/np.sum(cwt_var)
cum_cwt_var = np.cumsum(cwt_var)
(useTimeOfDay, useDayOfWeek, local_min, local_max, strong_local_max) = get_local_maxima(cwt_var, time_scale)
if not exists(figDir):
os.makedirs(figDir)
if display:
# plot wavelet coefficients along with the raw signal
plt.close('all')
plotWaveletPower(sig, cwtmatr, time_scale)
plt.savefig(join(figDir, fileName + 'wavelet_transform.pdf'))
fig, axs = plt.subplots(nrows=2, ncols=1)
ax = axs[0]
ax.plot(time_scale, cwt_var, '-o')
ax.axvline(x=86400, color='c')
ax.axvline(x=604800, color='c')
for _ in xrange(len(local_max)):
ax.axvline(x=time_scale[local_max[_]], color='r')
for _ in xrange(len(strong_local_max)):
ax.axvline(x=time_scale[strong_local_max[_]], color='k')
for _ in xrange(len(local_min)):
ax.axvline(x=time_scale[local_min[_]], color='b')
ax.set_xscale('log')
ax.set_xlabel(' Time Scale (sec) ')
ax.set_ylabel(' Variance of Power')
ax.autoscale(tight='True')
ax.set_title(fileName)
ax = axs[1]
ax.plot(time_scale, cum_cwt_var, '-o')
ax.set_xscale('log')
ax.set_xlabel(' Time Scale (sec) ')
ax.set_ylabel(' Accumulated Variance of Power')
ax.autoscale(tight='True')
plt.title(['useTimeOfDay: '+str(useTimeOfDay)+' useDayOfWeek: '+str(useDayOfWeek)])
plt.savefig(join(figDir, fileName + 'aggregation_time_scale.pdf'))
return cum_cwt_var, cwt_var, time_scale
def get_local_maxima(cwt_var, time_scale):
"""
Find local maxima from the wavelet coefficient variance spectrum
A strong maxima is defined as
(1) At least 10% higher than the nearest local minima
(2) Above the baseline value
"""
# peak & valley detection
local_min = (np.diff(np.sign(np.diff(cwt_var))) > 0).nonzero()[0] + 1
local_max = (np.diff(np.sign(np.diff(cwt_var))) < 0).nonzero()[0] + 1
baseline_value = 1.0/len(cwt_var)
dayPeriod = 86400.0
weekPeriod = 604800.0
cwt_var_at_dayPeriod = np.interp(dayPeriod, time_scale, cwt_var)
cwt_var_at_weekPeriod = np.interp(weekPeriod, time_scale, cwt_var)
useTimeOfDay = False
useDayOfWeek = False
strong_local_max = []
for i in xrange(len(local_max)):
left_local_min = np.where(np.less(local_min, local_max[i]))[0]
if len(left_local_min) == 0:
left_local_min = 0
left_local_min_value = cwt_var[0]
else:
left_local_min = local_min[left_local_min[-1]]
left_local_min_value = cwt_var[left_local_min]
right_local_min = np.where(np.greater(local_min, local_max[i]))[0]
if len(right_local_min) == 0:
right_local_min = len(cwt_var)-1
right_local_min_value = cwt_var[-1]
else:
right_local_min = local_min[right_local_min[0]]
right_local_min_value = cwt_var[right_local_min]
local_max_value = cwt_var[local_max[i]]
nearest_local_min_value = np.max(left_local_min_value, right_local_min_value)
if ( (local_max_value - nearest_local_min_value)/nearest_local_min_value > 0.1 and
local_max_value > baseline_value):
strong_local_max.append(local_max[i])
if (time_scale[left_local_min] < dayPeriod and
dayPeriod < time_scale[right_local_min] and
cwt_var_at_dayPeriod > local_max_value/2.0):
# if np.abs(dayPeriod - time_scale[local_max[i]])/dayPeriod < 0.5:
useTimeOfDay = True
if (time_scale[left_local_min] < weekPeriod and
weekPeriod < time_scale[right_local_min] and
cwt_var_at_weekPeriod > local_max_value/2.0):
# if np.abs(weekPeriod - time_scale[local_max[i]])/weekPeriod < 0.5:
useDayOfWeek = True
return useTimeOfDay, useDayOfWeek, local_min, local_max, strong_local_max
def get_suggested_timescale_and_encoder(timestamp, sig, thresh=0.2):
dt = np.median(np.diff(timestamp))
dt_sec = dt.astype('float32')
# resample the data with homogeneous sampling intervals
(timestamp, sig) = resample_data(timestamp, sig, dt, display=True)
(cum_cwt_var, cwt_var, time_scale) = calculate_cwt(dt_sec, sig)
(useTimeOfDay, useDayOfWeek, local_min, local_max, strong_local_max) = get_local_maxima(cwt_var, time_scale)
cutoff_time_scale = time_scale[np.where(cum_cwt_var >= thresh)[0][0]]
aggregation_time_scale = cutoff_time_scale/10.0
if aggregation_time_scale < dt_sec*4:
aggregation_time_scale = dt_sec*4
new_sampling_interval = str(int(aggregation_time_scale/4))+'S'
return (new_sampling_interval, useTimeOfDay, useDayOfWeek)
def readCSVfiles(fileName):
"""
Read csv data file, the data file must have two columns
with header "timestamp", and "value"
"""
fileReader = csv.reader(open(fileName, 'r'))
fileReader.next() # skip header line
timestamps = []
values = []
for row in fileReader:
timestamps.append(row[0])
values.append(row[1])
timestamps = np.array(timestamps, dtype='datetime64')
values = np.array(values, dtype='float32')
return (timestamps, values)
def writeCSVfiles(fileName, timestamp, value):
"""
write data to csv file,
the data file will have two columns with header "timestamp", and "value"
"""
fileWriter = csv.writer(open(fileName, 'w'))
fileWriter.writerow(['timestamp', 'value'])
for i in xrange(len(timestamp)):
fileWriter.writerow([timestamp[i].astype('O').strftime("%Y-%m-%d %H:%M:%S"),
value[i]])
def resample_data(timestamp, sig, new_sampling_interval, display=False):
"""
Resample time series data at new sampling interval using linear interpolation
Note: the resampling function is using interpolation, it may not be appropriate for aggregation purpose
:param timestamp: timestamp in numpy datetime64 type
:param sig: value of the time series
:param new_sampling_interval: new sampling interrval
"""
nSampleNew = np.floor((timestamp[-1] - timestamp[0])/new_sampling_interval).astype('int') + 1
timestamp_new = np.empty(nSampleNew, dtype='datetime64[s]')
for sampleI in xrange(nSampleNew):
timestamp_new[sampleI] = timestamp[0] + sampleI * new_sampling_interval
sig_new = np.interp((timestamp_new-timestamp[0]).astype('float32'),
(timestamp-timestamp[0]).astype('float32'), sig)
if display:
plt.figure(3)
plt.plot(timestamp, sig)
plt.plot(timestamp_new, sig_new)
plt.legend(['before resampling', 'after resampling'])
return (timestamp_new, sig_new)
def aggregate_data(thresh_list, dataFile, aggregatedDataPath, waveletDir='./wavelet/', display=False, verbose=0):
"""
Aggregate individual dataset, the aggregated data will be saved at aggregatedDataFile
:param thresh: aggregation threshold
:param dataFile: path of the original datafile
:param aggregatedDataFile: path of the aggregated datafile
:param waveletDir: path of wavelet transformations (for visual inspection)
"""
data_file_dir = dataFile.split('/')
(timestamp, sig) = readCSVfiles(dataFile)
# dt = (timestamp[len(sig)-1] - timestamp[0])/(len(sig)-1)
dt = np.median(np.diff(timestamp))
dt_sec = dt.astype('float32')
# resample the data with homogeneous sampling intervals
(timestamp, sig) = resample_data(timestamp, sig, dt, display=True)
(cum_cwt_var, cwt_var, time_scale) = calculate_cwt(dt_sec, sig,
display=display,
figDir=join(waveletDir, data_file_dir[-2]),
fileName=data_file_dir[-1])
for thresh in thresh_list:
new_data_dir = join(aggregatedDataPath, 'thresh='+str(thresh), data_file_dir[-2])
if not exists(new_data_dir):
os.makedirs(new_data_dir)
new_data_file = join(new_data_dir, data_file_dir[-1])
# determine aggregation time scale
cutoff_time_scale = time_scale[np.where(cum_cwt_var >= thresh)[0][0]]
aggregation_time_scale = cutoff_time_scale/10.0
if aggregation_time_scale < dt_sec*4:
aggregation_time_scale = dt_sec*4
new_sampling_interval = np.timedelta64(int(aggregation_time_scale/4 * 1000), 'ms')
nSampleNew = np.floor((timestamp[-1] - timestamp[0])/new_sampling_interval).astype('int') + 1
timestamp_new = np.empty(nSampleNew, dtype='datetime64[s]')
value_new = np.empty(nSampleNew, dtype='float32')
left_sampleI = 0
new_sampleI = 0
for sampleI in xrange(len(sig)):
if timestamp[sampleI] >= timestamp[0] + new_sampleI * new_sampling_interval:
timestamp_new[new_sampleI] = timestamp[0] + new_sampleI * new_sampling_interval
value_new[new_sampleI] = (np.mean(sig[left_sampleI:sampleI+1]))
left_sampleI = sampleI+1
new_sampleI += 1
writeCSVfiles(new_data_file, timestamp_new, value_new)
if verbose > 0:
print " original length: ", len(sig), "\t file: ", dataFile
print "\t\tthreshold: ", thresh, "\t new length: ", len(value_new)
def aggregate_nab_data(thresh_list, dataPath='data/',
aggregatedDataPath='data_aggregate/',
waveletDir='wavelet/',
verbose=0):
"""
Aggregate all NAB data using the wavelet transformation based algorithm
:param thresh_list: threshold of the aggregation, a number in [0, 1)
:param dataPath: path of the original NAB data
:param aggregatedDataPath: path of the aggregated NAB data
:param waveletDir: path of wavelet transformations (for visual inspection)
"""
if not exists(aggregatedDataPath):
os.makedirs(aggregatedDataPath)
dataDirs = [join(dataPath, f) for f in os.listdir(dataPath) if not isfile(join(dataPath, f))]
for dir in dataDirs:
datafiles = [join(dir, f) for f in os.listdir(dir) if isfile(join(dir, f))]
for i in range(len(datafiles)):
aggregate_data(thresh_list, datafiles[i], aggregatedDataPath, waveletDir, verbose=verbose)
def get_pre_aggregated_anomaly_score(data_path, result_folder, result_folder_pre_aggregate):
"""
This function transforms anomaly scores on the aggregated data file (in result_folder)
to the original sampling rate of the data (in data_path) before aggregation. The new anomaly
score will be saved to result_folder_pre_aggregate
"""
dataDirs = [join(result_folder, f) for f in os.listdir(result_folder) if not isfile(join(result_folder, f))]
for dir in dataDirs:
resultfiles = [join(dir, f) for f in os.listdir(dir) if isfile(join(dir, f))]
for i in range(len(resultfiles)):
result_file_dir = resultfiles[i].split('/')
original_data_file = join(data_path, result_file_dir[-2], result_file_dir[-1][8:])
dat = pd.read_csv(original_data_file, header=0, names=['timestamp', 'value'])
result = pd.read_csv(resultfiles[i], header=0,
names=['timestamp', 'value', 'anomaly_score', 'raw_score', 'label'])
time_stamp_pre_aggregation = pd.to_datetime(dat.timestamp)
time_stamp_after_aggregation = pd.to_datetime(result.timestamp)
binary_anomaly_score_pre_aggregation = np.zeros(shape=(len(dat),))
binary_anomaly_score_after_aggregation = np.zeros(shape=(len(result),))
for j in range(len(result)):
if result.anomaly_score[j] > .5:
binary_anomaly_score_after_aggregation[j] = 1
idx_original = np.argmin(abs(time_stamp_pre_aggregation - time_stamp_after_aggregation[j]))
binary_anomaly_score_pre_aggregation[idx_original] = 1
value_pre_aggregation = dat.value.values
raw_score_pre_aggregation = np.zeros(shape=(len(dat),))
label_pre_aggregation = np.zeros(shape=(len(dat),))
# raw_score_pre_aggregation = np.interp(time_stamp_original, time_stamp_after_aggregation, result.raw_score.values)
result_pre_aggregate = pd.DataFrame(np.transpose(np.array([time_stamp_pre_aggregation,
value_pre_aggregation,
binary_anomaly_score_pre_aggregation,
raw_score_pre_aggregation,
label_pre_aggregation])),
columns=['timestamp', 'value', 'anomaly_score', 'raw_score', 'label'])
result_file_dir_pre_aggregate = join(result_folder_pre_aggregate, result_file_dir[-2])
if not exists(result_file_dir_pre_aggregate):
os.makedirs(result_file_dir_pre_aggregate)
result_file_pre_aggregate = join(result_file_dir_pre_aggregate, result_file_dir[-1])
result_pre_aggregate.to_csv(result_file_pre_aggregate, index=False)
print " write pre-aggregated file to ", result_file_pre_aggregate
# compare anomaly scores before and after aggregations for individual files
# plt.figure(2)
# plt.plot(time_stamp_after_aggregation, binary_anomaly_score_after_aggregation)
# plt.plot(time_stamp_pre_aggregation, binary_anomaly_score_pre_aggregation)
def runTimeVsDataLength(dataPath):
"""
Plot Data Aggregation Algorithm Runtime vs length of the data
"""
dataDirs = [join(dataPath, f) for f in os.listdir(dataPath) if not isfile(join(dataPath, f))]
thresh = 0.2
dataLength = []
runTime = []
for dir in dataDirs:
datafiles = [join(dir, f) for f in os.listdir(dir) if isfile(join(dir, f))]
for i in range(len(datafiles)):
(timestamp, sig) = readCSVfiles(datafiles[i])
dataLength.append(len(sig))
start_time = time.time()
aggregate_data([thresh], datafiles[i], aggregatedDataPath='data_aggregate/', display=False)
end_time = time.time()
print " length: ", len(sig), " file: ", datafiles[i], " Time: ", (end_time - start_time)
runTime.append(end_time - start_time)
plt.figure()
plt.plot(dataLength, runTime, '*')
plt.xlabel(' Dataset Size (# Record)')
plt.ylabel(' Runtime (seconds) ')
plt.savefig('RuntimeVsDatasetSize.pdf')
return (dataLength, runTime)
if __name__ == "__main__":
NABPath = '/Users/ycui/nta/NAB/'
currentPath = os.getcwd()
thresh_list = [0, 0.02, 0.04, 0.06, 0.08, 0.1, 0.12, 0.14, 0.16, 0.18, 0.2,
0.22, 0.24, 0.26, 0.28, 0.3, 0.32, 0.34, 0.36, 0.38, 0.40]
# step 1: aggregate NAB data with different threshold
print " aggregating NAB data ..."
aggregate_nab_data(thresh_list, dataPath=NABPath+'data/', verbose=2)
# step 2: run HTM on aggregated NAB data
for thresh in thresh_list:
resultsAggregatePath = currentPath + "/results_aggregate/thresh=" + str(thresh) + "/numenta"
if not os.path.exists(resultsAggregatePath):
os.os.makedirs(resultsAggregatePath)
print " run HTM on aggregated data with threshold " + str(thresh)
os.system("python " + NABPath + "run.py -d numenta --detect --dataDir " + currentPath + "/data_aggregate/thresh=" + str(thresh) + \
"/ --resultsDir "+ currentPath + "/results_aggregate/thresh=" + str(thresh) + " --skipConfirmation")
# step 3: get pre-aggregated anomaly score
for thresh in thresh_list:
preresultAggregatePath = currentPath + "/results_pre_aggregate/thresh=" + str(thresh) + "/numenta"
if not os.path.exists(preresultAggregatePath):
os.os.makedirs(preresultAggregatePath)
get_pre_aggregated_anomaly_score(data_path=NABPath+'data/',
result_folder='results_aggregate/thresh=' + str(thresh) + '/numenta',
result_folder_pre_aggregate='results_pre_aggregate/thresh=' + str(thresh) + '/numenta')
# step 4: run NAB scoring
for thresh in thresh_list:
print " run scoring on aggregated data with threshold " + str(thresh)
os.system("python " + NABPath + "run.py -d numenta --score --skipConfirmation " +
"--thresholdsFile " + NABPath + "config/thresholds.json " +
"--resultsDir " + currentPath + "/results_pre_aggregate/thresh="+str(thresh)+"/")
# step 5: read & compare scores
standard_score = []
data_length_all = []
for thresh in thresh_list:
scorefile = "./results_pre_aggregate/thresh=" + str(thresh) + "/numenta/numenta_standard_scores.csv"
scoredf = pd.read_csv(scorefile, header=0)
scoredf = scoredf.sort('File')
scoredf.index = range(len(scoredf))
standard_score.append(scoredf.Score.values[:-1])
data_length = []
for i in xrange(len(scoredf.File)-1):
datafile = './data_aggregate/thresh=' + str(thresh) + '/' + scoredf.File[i]
dat = pd.read_csv(datafile, header=0, names=['timestamp', 'value'])
data_length.append(len(dat))
data_length_all.append(data_length)
data_length_all = np.array(data_length_all)
standard_score = np.array(standard_score)
short_dat = np.where(data_length_all[0, :] < 1000)[0]
long_dat = np.where(data_length_all[0, :] > 1000)[0]
use_dat = np.array(range(data_length_all.shape[1]))
use_dat = long_dat
# plt.imshow(data_length_all, interpolation='nearest', aspect='auto')
# plot anomaly score vs aggregation threshold
anomaly_score_diff = standard_score[:, long_dat] - numpy.matlib.repmat(standard_score[0, long_dat], len(thresh_list), 1)
shortFileName = []
for i in range(len(scoredf.File.values[:-1])):
file = scoredf.File.values[i]
fileName = file.split('/')[-1]
fileName = fileName[:-4]
shortFileName.append(fileName)
fig=plt.figure()
plt.imshow(anomaly_score_diff, interpolation='nearest', aspect='auto')
ytickLoc = range(len(thresh_list))
plt.yticks(ytickLoc, thresh_list)
plt.xticks(range(len(scoredf.File)-1), shortFileName, rotation='vertical')
plt.subplots_adjust(bottom=0.6)
plt.ylabel(' Threshold')
plt.title(' Anomaly Score Relative to BaseLine')
plt.colorbar()
plt.clim(-2, 2)
plt.savefig('AnomalyScore_Vs_AggregationThreshold_EachFile.pdf')
plt.figure()
plt.subplot(2, 1, 1)
plt.plot(np.array(thresh_list)*100, np.median(standard_score[:, use_dat], 1), '-o')
plt.plot(np.array(thresh_list)*100, np.mean(standard_score[:, use_dat], 1), '-o')
plt.legend(['Median', 'Mean'])
plt.xlabel(' Threshold (%)')
plt.ylabel(' Median Anomaly Score ')
plt.subplot(2, 1, 2)
plt.plot(np.array(thresh_list)*100, np.median(data_length_all[:, use_dat], 1), '-o')
plt.plot(np.array(thresh_list)*100, np.mean(data_length_all[:, use_dat], 1), '-o')
plt.xlabel(' Threshold (%)')
plt.ylabel(' Data Length ')
plt.legend(['Median', 'Mean'])
plt.savefig('AnomalyScore_Vs_AggregationThreshold.pdf')
num_better_anomaly_score = []
for i in xrange(len(thresh_list)-1):
num_better_anomaly_score.append(len(np.where(standard_score[i+1, :] > standard_score[0, :])[0]))
(dataLength, runTime) = runTimeVsDataLength(dataPath=NABPath+'data/')
| gpl-3.0 |
crscardellino/dnnwsd | dnnwsd/pipeline/semisupervised.py | 1 | 5597 | # -*- coding: utf-8 -*-
import logging
import os
from copy import deepcopy
from sklearn import linear_model, tree
from ..corpus import sensem, semeval, unannotated
from ..experiment import results, semisupervised
from ..model import mlp
from ..processor import bowprocessor, vecprocessor
from ..utils.setup_logging import setup_logging
setup_logging()
logger = logging.getLogger(__name__)
class SemiSupervisedPipeline(object):
corpus_iterators_map = {
'sensem': sensem.SenSemCorpusDirectoryIterator,
'semeval': semeval.SemevalCorpusDirectoryIterator
}
processors_map = {
'bow': bowprocessor.SemiSupervisedBoWProcessor,
'wordvec': vecprocessor.SemiSupervisedWordVectorsProcessor,
'wordvecpos': vecprocessor.SemiSupervisedWordVectorsPoSProcessor
}
models_map = {
'decisiontree': tree.DecisionTreeClassifier,
'logreg': linear_model.LogisticRegression,
'mlp': mlp.MultiLayerPerceptron
}
def __init__(self, corpus_directory, unannotated_corpus_directory, results_directory,
experiment_set, features_path, **kwargs):
self._corpus_iterator = self.corpus_iterators_map[kwargs.pop('corpus_directory_iterator', 'sensem')](
corpus_directory, kwargs.pop('sense_filter', 3)
)
self._unannotated_corpus_iterator = unannotated.UnannotatedCorpusDirectoryIterator(
unannotated_corpus_directory
)
self._results_directory = results_directory
self._experiment_set = experiment_set
# List of 4-tuples, each defining an experiment.
# (processor, processor_parameters, model, model_parameters)
self._features_path = features_path
self._confidence_threshold = kwargs.pop("confidence_threshold", 0.99)
self._minimum_instances = kwargs.pop("minimum_instances", None)
self._max_iterations = kwargs.pop("max_iterations", 100)
self._evaluation_size = kwargs.pop("evaluation_size", 10)
self._starting_lemma = kwargs.pop("starting_lemma", 0)
def _run_for_corpus(self, annotated_corpus, unannotated_corpus, corpus_index):
"""
:param annotated_corpus: ddnwsd.corpus.sensem.SenSemCorpus
:param unannotated_corpus: ddnwsd.corpus.unannotated.UnannotatedCorpus
"""
lemma_index = self._corpus_iterator.lemmas.index(annotated_corpus.lemma)
experiments_dir = os.path.join(self._results_directory, "{:03d}".format(lemma_index))
for (pkey, pparam, mkey, mparam) in self._experiment_set:
if mkey == 'mlp':
experiment_name = "{}_{}_{}_{}".format(
pkey, mkey, mparam.get('layers'), mparam.get('pre_train_epochs', 0)
)
else:
experiment_name = "{}_{}".format(pkey, mkey)
results_save_path = os.path.join(experiments_dir, experiment_name)
os.makedirs(results_save_path)
if pkey == 'bow':
pparam['features_path'] = os.path.join(self._features_path, "{:03d}.p".format(corpus_index))
processor = self.processors_map[pkey](annotated_corpus, deepcopy(unannotated_corpus), **pparam)
""":type : dnnwsd.processor.base.BaseProcessor"""
processor.instances()
if mkey == 'mlp':
mparam['input_dim'] = processor.features_dimension()
mparam['output_dim'] = len(processor.labels)
model = self.models_map[mkey](**mparam)
""":type : dnnwsd.models.base.BaseModel"""
logger.info(u"Running experiments for {} and model {}".format(processor.name, model.__class__.__name__))
results_handler = results.SemiSupervisedResultsHandler(
save_path=results_save_path, labels=processor.labels, target=processor.target
)
experiment_params = dict(
confidence_threshold=self._confidence_threshold,
max_iterations=self._max_iterations,
evaluation_size=self._evaluation_size
)
if self._minimum_instances is not None:
experiment_params['minimum_instances'] = self._minimum_instances
experiment = semisupervised.SemiSupervisedExperiment(processor, model, **experiment_params)
experiment.run(results_handler)
results_handler.save_results()
def run(self):
logger.info(u"Running semi-supervised experiments pipeline for whole corpus")
for corpus_index, annotated_corpus in enumerate(self._corpus_iterator):
if corpus_index < self._starting_lemma:
logger.info(u"Skipping experiments pipeline for lemma {}. ".format(annotated_corpus.lemma) +
u"The corpus has already been parsed.")
continue
if not annotated_corpus.has_multiple_senses() or annotated_corpus.lemma == u"estar":
logger.info(u"Skipping experiments pipeline for lemma {}. ".format(annotated_corpus.lemma) +
u"The corpus doesn't have enough senses")
continue
unannotated_corpus = self._unannotated_corpus_iterator[annotated_corpus.lemma]
logger.info(u"Running experiments pipeline for lemma {} with index {}"
.format(annotated_corpus.lemma, corpus_index))
self._run_for_corpus(annotated_corpus, unannotated_corpus, corpus_index)
logger.info(u"Finished experiments pipeline for lemma {}".format(annotated_corpus.lemma))
| bsd-3-clause |
tskisner/pytoast | src/python/tests/ops_dipole.py | 1 | 7665 | # Copyright (c) 2015-2018 by the parties listed in the AUTHORS file.
# All rights reserved. Use of this source code is governed by
# a BSD-style license that can be found in the LICENSE file.
from ..mpi import MPI
from .mpi import MPITestCase
import sys
import os
import numpy as np
import numpy.testing as nt
import healpy as hp
from ..tod.tod import *
from ..tod.pointing import *
from ..tod.noise import *
from ..tod.sim_noise import *
from ..tod.sim_det_noise import *
from ..tod.sim_det_dipole import *
from ..tod.sim_tod import *
from ..tod.tod_math import *
from ..map import *
from ._helpers import (create_outdir, create_distdata, boresight_focalplane,
uniform_chunks)
class OpSimDipoleTest(MPITestCase):
def setUp(self):
fixture_name = os.path.splitext(os.path.basename(__file__))[0]
self.outdir = create_outdir(self.comm, fixture_name)
# Create one observation per group, and each observation will have
# one detector per process and a single chunk.
self.data = create_distdata(self.comm, obs_per_group=1)
self.ndet = self.data.comm.group_size
self.rate = 20.0
# Create detectors with default properties
dnames, dquat, depsilon, drate, dnet, dfmin, dfknee, dalpha = \
boresight_focalplane(self.ndet, samplerate=self.rate)
# Pixelization
self.nside = 64
self.npix = 12 * self.nside**2
self.subnside = 16
if self.subnside > self.nside:
self.subnside = self.nside
self.subnpix = 12 * self.subnside * self.subnside
# Samples per observation
self.totsamp = self.npix
# Dipole parameters
self.solar_speed = 369.0
gal_theta = np.deg2rad(90.0 - 48.05)
gal_phi = np.deg2rad(264.31)
z = self.solar_speed * np.cos(gal_theta)
x = self.solar_speed * np.sin(gal_theta) * np.cos(gal_phi)
y = self.solar_speed * np.sin(gal_theta) * np.sin(gal_phi)
self.solar_vel = np.array([x, y, z])
self.solar_quat = qa.from_vectors(np.array([0.0, 0.0, 1.0]), self.solar_vel)
self.dip_check = 0.00335673
self.dip_max_pix = hp.ang2pix(self.nside, gal_theta, gal_phi, nest=False)
self.dip_min_pix = hp.ang2pix(self.nside, (np.pi - gal_theta), (np.pi + gal_phi), nest=False)
# Populate the observations
tod = TODHpixSpiral(
self.data.comm.comm_group,
dquat,
self.totsamp,
detranks=self.data.comm.comm_group.size,
firsttime=0.0,
rate=self.rate,
nside=self.nside)
self.data.obs[0]["tod"] = tod
def tearDown(self):
del self.data
def test_dipole_func(self):
# Verify that we get the right magnitude if we are pointed at the
# velocity maximum.
dtod = dipole(self.solar_quat.reshape((1, 4)), solar=self.solar_vel)
nt.assert_almost_equal(dtod, self.dip_check * np.ones_like(dtod))
return
def test_dipole_func_total(self):
# Verify that we get the right magnitude if we are pointed at the
# velocity maximum.
quat = np.array(
[[ 0.5213338 , 0.47771442,-0.5213338 , 0.47771442],
[ 0.52143458, 0.47770023,-0.52123302, 0.4777286 ],
[ 0.52153535, 0.47768602,-0.52113222, 0.47774277],
[ 0.52163611, 0.4776718 ,-0.52103142, 0.47775692],
[ 0.52173686, 0.47765757,-0.52093061, 0.47777106]])
v_sat = np.array(
[[ 1.82378638e-15, 2.97846918e+01, 0.00000000e+00],
[ -1.48252084e-07, 2.97846918e+01, 0.00000000e+00],
[ -2.96504176e-07, 2.97846918e+01, 0.00000000e+00],
[ -4.44756262e-07, 2.97846918e+01, 0.00000000e+00],
[ -5.93008348e-07, 2.97846918e+01, 0.00000000e+00]])
v_sol = np.array([ -25.7213418, -244.31203375, 275.33805175])
dtod = dipole(quat, vel=v_sat, solar=v_sol, cmb=2.725)
# computed with github.com/zonca/dipole
expected = np.array([0.00196249, 0.00196203, 0.00196157, 0.00196111, 0.00196065])
nt.assert_allclose(dtod, expected, rtol=1e-5)
return
def test_sim(self):
# make a simple pointing matrix
pointing = OpPointingHpix(nside=self.nside, nest=False, mode='I')
pointing.exec(self.data)
# generate timestreams
op = OpSimDipole(mode='solar', coord='G')
op.exec(self.data)
# make a binned map
# get locally hit pixels
lc = OpLocalPixels()
localpix = lc.exec(self.data)
# find the locally hit submaps.
localsm = np.unique(np.floor_divide(localpix, self.subnpix))
# construct distributed maps to store the covariance,
# noise weighted map, and hits
invnpp = DistPixels(comm=self.data.comm.comm_world, size=self.npix,
nnz=1, dtype=np.float64, submap=self.subnpix, local=localsm,
nest=False)
invnpp.data.fill(0.0)
zmap = DistPixels(comm=self.data.comm.comm_world, size=self.npix,
nnz=1, dtype=np.float64, submap=self.subnpix, local=localsm,
nest=False)
zmap.data.fill(0.0)
hits = DistPixels(comm=self.data.comm.comm_world, size=self.npix,
nnz=1, dtype=np.int64, submap=self.subnpix, local=localsm,
nest=False)
hits.data.fill(0)
# accumulate the inverse covariance and noise weighted map.
# Use detector weights based on the analytic NET.
tod = self.data.obs[0]['tod']
detweights = {}
for d in tod.local_dets:
detweights[d] = 1.0
build_invnpp = OpAccumDiag(detweights=detweights, invnpp=invnpp,
hits=hits, zmap=zmap, name="dipole")
build_invnpp.exec(self.data)
invnpp.allreduce()
hits.allreduce()
zmap.allreduce()
hits.write_healpix_fits(os.path.join(self.outdir, "hits.fits"))
invnpp.write_healpix_fits(os.path.join(self.outdir, "invnpp.fits"))
zmap.write_healpix_fits(os.path.join(self.outdir, "zmap.fits"))
# invert it
covariance_invert(invnpp, 1.0e-3)
invnpp.write_healpix_fits(os.path.join(self.outdir, "npp.fits"))
# compute the binned map, N_pp x Z
covariance_apply(invnpp, zmap)
zmap.write_healpix_fits(os.path.join(self.outdir, "binned.fits"))
if self.comm.rank == 0:
import matplotlib.pyplot as plt
mapfile = os.path.join(self.outdir, 'hits.fits')
data = hp.read_map(mapfile, nest=False)
nt.assert_almost_equal(data, self.data.comm.ngroups * self.ndet * \
np.ones_like(data))
outfile = "{}.png".format(mapfile)
hp.mollview(data, xsize=1600, nest=False)
plt.savefig(outfile)
plt.close()
mapfile = os.path.join(self.outdir, 'binned.fits')
data = hp.read_map(mapfile, nest=False)
# verify that the extrema are in the correct location
# and have the correct value.
minmap = np.min(data)
maxmap = np.max(data)
nt.assert_almost_equal(maxmap, self.dip_check, decimal=5)
nt.assert_almost_equal(minmap, -self.dip_check, decimal=5)
minloc = np.argmin(data)
maxloc = np.argmax(data)
nt.assert_equal(minloc, self.dip_min_pix)
nt.assert_equal(maxloc, self.dip_max_pix)
outfile = "{}.png".format(mapfile)
hp.mollview(data, xsize=1600, nest=False)
plt.savefig(outfile)
plt.close()
return
| bsd-2-clause |
cybernet14/scikit-learn | examples/cluster/plot_dict_face_patches.py | 337 | 2747 | """
Online learning of a dictionary of parts of faces
==================================================
This example uses a large dataset of faces to learn a set of 20 x 20
images patches that constitute faces.
From the programming standpoint, it is interesting because it shows how
to use the online API of the scikit-learn to process a very large
dataset by chunks. The way we proceed is that we load an image at a time
and extract randomly 50 patches from this image. Once we have accumulated
500 of these patches (using 10 images), we run the `partial_fit` method
of the online KMeans object, MiniBatchKMeans.
The verbose setting on the MiniBatchKMeans enables us to see that some
clusters are reassigned during the successive calls to
partial-fit. This is because the number of patches that they represent
has become too low, and it is better to choose a random new
cluster.
"""
print(__doc__)
import time
import matplotlib.pyplot as plt
import numpy as np
from sklearn import datasets
from sklearn.cluster import MiniBatchKMeans
from sklearn.feature_extraction.image import extract_patches_2d
faces = datasets.fetch_olivetti_faces()
###############################################################################
# Learn the dictionary of images
print('Learning the dictionary... ')
rng = np.random.RandomState(0)
kmeans = MiniBatchKMeans(n_clusters=81, random_state=rng, verbose=True)
patch_size = (20, 20)
buffer = []
index = 1
t0 = time.time()
# The online learning part: cycle over the whole dataset 6 times
index = 0
for _ in range(6):
for img in faces.images:
data = extract_patches_2d(img, patch_size, max_patches=50,
random_state=rng)
data = np.reshape(data, (len(data), -1))
buffer.append(data)
index += 1
if index % 10 == 0:
data = np.concatenate(buffer, axis=0)
data -= np.mean(data, axis=0)
data /= np.std(data, axis=0)
kmeans.partial_fit(data)
buffer = []
if index % 100 == 0:
print('Partial fit of %4i out of %i'
% (index, 6 * len(faces.images)))
dt = time.time() - t0
print('done in %.2fs.' % dt)
###############################################################################
# Plot the results
plt.figure(figsize=(4.2, 4))
for i, patch in enumerate(kmeans.cluster_centers_):
plt.subplot(9, 9, i + 1)
plt.imshow(patch.reshape(patch_size), cmap=plt.cm.gray,
interpolation='nearest')
plt.xticks(())
plt.yticks(())
plt.suptitle('Patches of faces\nTrain time %.1fs on %d patches' %
(dt, 8 * len(faces.images)), fontsize=16)
plt.subplots_adjust(0.08, 0.02, 0.92, 0.85, 0.08, 0.23)
plt.show()
| bsd-3-clause |
mtb0/flightmodel | src/download/get_files.py | 1 | 2224 | #!/usr/bin/env python
"""Download Latitude/Longitude information and Flight time information from
the Bureau of Transportation Statistics website, using wget."""
import os
import pandas as pd
import tempfile
URL='http://tsdata.bts.gov/'
LATLONG='187806114_T_MASTER_CORD'
FLIGHT='On_Time_On_Time_Performance'
def get_latlong():
newlatlongfile='../data/LatLong.csv'
#Download LatLong.csv
os.system('wget ' + URL + LATLONG + '.zip')
os.system('unzip ' + LATLONG + '.zip')
os.system('rm ' + LATLONG + '.zip')
os.system('mv ' + LATLONG + '.csv ' + newlatlongfile)
#Read LatLong.csv, select and write columns.
df = pd.read_csv(newlatlongfile)
latlong = pd.DataFrame({
'AirportId':df.AIRPORT_SEQ_ID,
'AirportName':df.DISPLAY_AIRPORT_NAME,
'Latitude':df.LATITUDE,
'Longitude':df.LONGITUDE})
latlong.dropna(inplace=True)
latlong.to_csv(newlatlongfile, index=False)
def get_flights(year, month):
flightfile=FLIGHT + '_%d_%d' %(year, month)
flighturl=URL + 'PREZIP/' + flightfile + '.zip'
newflightfile='../data/%d_%d.csv' %(year, month)
#Download flight data as year_month.csv
os.system('wget ' + flighturl)
tempdir = tempfile.mkdtemp() #Create temporary directory to place unzipped file.
os.system('unzip ' + flightfile + ' -d ' + tempdir)
os.system('rm ' + flightfile + '.zip')
os.system('mv ' + tempdir + '/' + flightfile + '.csv ' + newflightfile)
os.system('rm ' + tempdir + '/*')
os.removedirs(tempdir)
#Read LatLong.csv, select and write columns.
df=pd.read_csv(newflightfile, dtype={'UniqueCarrier':str})
flightdf=pd.DataFrame({
'Day':df.DayOfWeek,
'Date':df.FlightDate,
'Carrier':df.UniqueCarrier,
'OriginAirportId':df.OriginAirportSeqID,
'DestAirportId':df.DestAirportSeqID,
'SchDep':df.CRSDepTime,
'DepTime':df.DepTime,
'DepDelay':df.DepDelay,
'SchArr':df.CRSArrTime,
'ArrTime':df.ArrTime,
'ArrDelay':df.ArrDelay,
'SchTime':df.CRSElapsedTime,
'ActualTime':df.ActualElapsedTime,
'Distance':df.Distance})
flightdf.to_csv(newflightfile, index=False)
| mit |
LiaoPan/blaze | blaze/compute/tests/test_numpy_compute.py | 3 | 16537 | from __future__ import absolute_import, division, print_function
import pytest
import numpy as np
import pandas as pd
from datetime import datetime, date
from blaze.compute.core import compute, compute_up
from blaze.expr import symbol, by, exp, summary, Broadcast, join, concat
from blaze import sin
from odo import into
from datashape import discover, to_numpy, dshape
x = np.array([(1, 'Alice', 100),
(2, 'Bob', -200),
(3, 'Charlie', 300),
(4, 'Denis', 400),
(5, 'Edith', -500)],
dtype=[('id', 'i8'), ('name', 'S7'), ('amount', 'i8')])
t = symbol('t', discover(x))
def eq(a, b):
c = a == b
if isinstance(c, np.ndarray):
return c.all()
return c
def test_symbol():
assert eq(compute(t, x), x)
def test_eq():
assert eq(compute(t['amount'] == 100, x),
x['amount'] == 100)
def test_selection():
assert eq(compute(t[t['amount'] == 100], x), x[x['amount'] == 0])
assert eq(compute(t[t['amount'] < 0], x), x[x['amount'] < 0])
def test_arithmetic():
assert eq(compute(t['amount'] + t['id'], x),
x['amount'] + x['id'])
assert eq(compute(t['amount'] * t['id'], x),
x['amount'] * x['id'])
assert eq(compute(t['amount'] % t['id'], x),
x['amount'] % x['id'])
def test_UnaryOp():
assert eq(compute(exp(t['amount']), x),
np.exp(x['amount']))
assert eq(compute(abs(-t['amount']), x),
abs(-x['amount']))
def test_Neg():
assert eq(compute(-t['amount'], x),
-x['amount'])
def test_invert_not():
assert eq(compute(~(t.amount > 0), x),
~(x['amount'] > 0))
def test_Reductions():
assert compute(t['amount'].mean(), x) == x['amount'].mean()
assert compute(t['amount'].count(), x) == len(x['amount'])
assert compute(t['amount'].sum(), x) == x['amount'].sum()
assert compute(t['amount'].min(), x) == x['amount'].min()
assert compute(t['amount'].max(), x) == x['amount'].max()
assert compute(t['amount'].nunique(), x) == len(np.unique(x['amount']))
assert compute(t['amount'].var(), x) == x['amount'].var()
assert compute(t['amount'].std(), x) == x['amount'].std()
assert compute(t['amount'].var(unbiased=True), x) == x['amount'].var(ddof=1)
assert compute(t['amount'].std(unbiased=True), x) == x['amount'].std(ddof=1)
assert compute((t['amount'] > 150).any(), x) == True
assert compute((t['amount'] > 250).all(), x) == False
assert compute(t['amount'][0], x) == x['amount'][0]
assert compute(t['amount'][-1], x) == x['amount'][-1]
def test_count_string():
s = symbol('name', 'var * ?string')
x = np.array(['Alice', np.nan, 'Bob', 'Denis', 'Edith'], dtype='object')
assert compute(s.count(), x) == 4
def test_reductions_on_recarray():
assert compute(t.count(), x) == len(x)
def test_count_nan():
t = symbol('t', '3 * ?real')
x = np.array([1.0, np.nan, 2.0])
assert compute(t.count(), x) == 2
def test_distinct():
x = np.array([('Alice', 100),
('Alice', -200),
('Bob', 100),
('Bob', 100)],
dtype=[('name', 'S5'), ('amount', 'i8')])
t = symbol('t', 'var * {name: string, amount: int64}')
assert eq(compute(t['name'].distinct(), x),
np.unique(x['name']))
assert eq(compute(t.distinct(), x),
np.unique(x))
def test_distinct_on_recarray():
rec = pd.DataFrame(
[[0, 1],
[0, 2],
[1, 1],
[1, 2]],
columns=('a', 'b'),
).to_records(index=False)
s = symbol('s', discover(rec))
assert (
compute(s.distinct('a'), rec) ==
pd.DataFrame(
[[0, 1],
[1, 1]],
columns=('a', 'b'),
).to_records(index=False)
).all()
def test_distinct_on_structured_array():
arr = np.array(
[(0., 1.),
(0., 2.),
(1., 1.),
(1., 2.)],
dtype=[('a', 'f4'), ('b', 'f4')],
)
s = symbol('s', discover(arr))
assert(
compute(s.distinct('a'), arr) ==
np.array([(0., 1.), (1., 1.)], dtype=arr.dtype)
).all()
def test_distinct_on_str():
rec = pd.DataFrame(
[['a', 'a'],
['a', 'b'],
['b', 'a'],
['b', 'b']],
columns=('a', 'b'),
).to_records(index=False).astype([('a', '<U1'), ('b', '<U1')])
s = symbol('s', discover(rec))
assert (
compute(s.distinct('a'), rec) ==
pd.DataFrame(
[['a', 'a'],
['b', 'a']],
columns=('a', 'b'),
).to_records(index=False).astype([('a', '<U1'), ('b', '<U1')])
).all()
def test_sort():
assert eq(compute(t.sort('amount'), x),
np.sort(x, order='amount'))
assert eq(compute(t.sort('amount', ascending=False), x),
np.sort(x, order='amount')[::-1])
assert eq(compute(t.sort(['amount', 'id']), x),
np.sort(x, order=['amount', 'id']))
assert eq(compute(t.amount.sort(), x),
np.sort(x['amount']))
def test_head():
assert eq(compute(t.head(2), x),
x[:2])
def test_tail():
assert eq(compute(t.tail(2), x),
x[-2:])
def test_label():
expected = x['amount'] * 10
expected = np.array(expected, dtype=[('foo', 'i8')])
assert eq(compute((t['amount'] * 10).label('foo'), x),
expected)
def test_relabel():
expected = np.array(x, dtype=[('ID', 'i8'), ('NAME', 'S7'), ('amount', 'i8')])
result = compute(t.relabel({'name': 'NAME', 'id': 'ID'}), x)
assert result.dtype.names == expected.dtype.names
assert eq(result, expected)
def test_by():
expr = by(t.amount > 0, count=t.id.count())
result = compute(expr, x)
assert set(map(tuple, into(list, result))) == set([(False, 2), (True, 3)])
def test_compute_up_field():
assert eq(compute(t['name'], x), x['name'])
def test_compute_up_projection():
assert eq(compute_up(t[['name', 'amount']], x), x[['name', 'amount']])
ax = np.arange(30, dtype='f4').reshape((5, 3, 2))
a = symbol('a', discover(ax))
def test_slice():
inds = [0, slice(2), slice(1, 3), slice(None, None, 2), [1, 2, 3],
(0, 1), (0, slice(1, 3)), (slice(0, 3), slice(3, 1, -1)),
(0, [1, 2])]
for s in inds:
assert (compute(a[s], ax) == ax[s]).all()
def test_array_reductions():
for axis in [None, 0, 1, (0, 1), (2, 1)]:
assert eq(compute(a.sum(axis=axis), ax), ax.sum(axis=axis))
assert eq(compute(a.std(axis=axis), ax), ax.std(axis=axis))
def test_array_reductions_with_keepdims():
for axis in [None, 0, 1, (0, 1), (2, 1)]:
assert eq(compute(a.sum(axis=axis, keepdims=True), ax),
ax.sum(axis=axis, keepdims=True))
def test_summary_on_ndarray():
assert compute(summary(total=a.sum(), min=a.min()), ax) == \
(ax.min(), ax.sum())
result = compute(summary(total=a.sum(), min=a.min(), keepdims=True), ax)
expected = np.array([(ax.min(), ax.sum())],
dtype=[('min', 'float32'), ('total', 'float64')])
assert result.ndim == ax.ndim
assert eq(expected, result)
def test_summary_on_ndarray_with_axis():
for axis in [0, 1, (1, 0)]:
expr = summary(total=a.sum(), min=a.min(), axis=axis)
result = compute(expr, ax)
shape, dtype = to_numpy(expr.dshape)
expected = np.empty(shape=shape, dtype=dtype)
expected['total'] = ax.sum(axis=axis)
expected['min'] = ax.min(axis=axis)
assert eq(result, expected)
def test_utcfromtimestamp():
t = symbol('t', '1 * int64')
data = np.array([0, 1])
expected = np.array(['1970-01-01T00:00:00Z', '1970-01-01T00:00:01Z'],
dtype='M8[us]')
assert eq(compute(t.utcfromtimestamp, data), expected)
def test_nelements_structured_array():
assert compute(t.nelements(), x) == len(x)
assert compute(t.nelements(keepdims=True), x) == (len(x),)
def test_nelements_array():
t = symbol('t', '5 * 4 * 3 * float64')
x = np.random.randn(*t.shape)
result = compute(t.nelements(axis=(0, 1)), x)
np.testing.assert_array_equal(result, np.array([20, 20, 20]))
result = compute(t.nelements(axis=1), x)
np.testing.assert_array_equal(result, 4 * np.ones((5, 3)))
def test_nrows():
assert compute(t.nrows, x) == len(x)
dts = np.array(['2000-06-25T12:30:04Z', '2000-06-28T12:50:05Z'],
dtype='M8[us]')
s = symbol('s', 'var * datetime')
def test_datetime_truncation():
assert eq(compute(s.truncate(1, 'day'), dts),
dts.astype('M8[D]'))
assert eq(compute(s.truncate(2, 'seconds'), dts),
np.array(['2000-06-25T12:30:04Z', '2000-06-28T12:50:04Z'],
dtype='M8[s]'))
assert eq(compute(s.truncate(2, 'weeks'), dts),
np.array(['2000-06-18', '2000-06-18'], dtype='M8[D]'))
assert into(list, compute(s.truncate(1, 'week'), dts))[0].isoweekday() == 7
def test_hour():
dts = [datetime(2000, 6, 20, 1, 00, 00),
datetime(2000, 6, 20, 12, 59, 59),
datetime(2000, 6, 20, 12, 00, 00),
datetime(2000, 6, 20, 11, 59, 59)]
dts = into(np.ndarray, dts)
assert eq(compute(s.truncate(1, 'hour'), dts),
into(np.ndarray, [datetime(2000, 6, 20, 1, 0),
datetime(2000, 6, 20, 12, 0),
datetime(2000, 6, 20, 12, 0),
datetime(2000, 6, 20, 11, 0)]))
def test_month():
dts = [datetime(2000, 7, 1),
datetime(2000, 6, 30),
datetime(2000, 6, 1),
datetime(2000, 5, 31)]
dts = into(np.ndarray, dts)
assert eq(compute(s.truncate(1, 'month'), dts),
into(np.ndarray, [date(2000, 7, 1),
date(2000, 6, 1),
date(2000, 6, 1),
date(2000, 5, 1)]))
def test_truncate_on_np_datetime64_scalar():
s = symbol('s', 'datetime')
data = np.datetime64('2000-01-02T12:30:00Z')
assert compute(s.truncate(1, 'day'), data) == data.astype('M8[D]')
def test_numpy_and_python_datetime_truncate_agree_on_start_of_week():
s = symbol('s', 'datetime')
n = np.datetime64('2014-11-11')
p = datetime(2014, 11, 11)
expr = s.truncate(1, 'week')
assert compute(expr, n) == compute(expr, p)
def test_add_multiple_ndarrays():
a = symbol('a', '5 * 4 * int64')
b = symbol('b', '5 * 4 * float32')
x = np.arange(9, dtype='int64').reshape(3, 3)
y = (x + 1).astype('float32')
expr = sin(a) + 2 * b
scope = {a: x, b: y}
expected = sin(x) + 2 * y
# check that we cast correctly
assert expr.dshape == dshape('5 * 4 * float64')
np.testing.assert_array_equal(compute(expr, scope), expected)
np.testing.assert_array_equal(compute(expr, scope, optimize=False),
expected)
nA = np.arange(30, dtype='f4').reshape((5, 6))
ny = np.arange(6, dtype='f4')
A = symbol('A', discover(nA))
y = symbol('y', discover(ny))
def test_transpose():
assert eq(compute(A.T, nA), nA.T)
assert eq(compute(A.transpose((0, 1)), nA), nA)
def test_dot():
assert eq(compute(y.dot(y), {y: ny}), np.dot(ny, ny))
assert eq(compute(A.dot(y), {A: nA, y: ny}), np.dot(nA, ny))
def test_subexpr_datetime():
data = pd.date_range(start='01/01/2010', end='01/04/2010', freq='D').values
s = symbol('s', discover(data))
result = compute(s.truncate(days=2).day, data)
expected = np.array([31, 2, 2, 4])
np.testing.assert_array_equal(result, expected)
def test_mixed_types():
x = np.array([[(4, 180), (4, 184), (4, 188), (4, 192), (4, 196)],
[(4, 660), (4, 664), (4, 668), (4, 672), (4, 676)],
[(4, 1140), (4, 1144), (4, 1148), (4, 1152), (4, 1156)],
[(4, 1620), (4, 1624), (4, 1628), (4, 1632), (4, 1636)],
[(4, 2100), (4, 2104), (4, 2108), (4, 2112), (4, 2116)]],
dtype=[('count', '<i4'), ('total', '<i8')])
aggregate = symbol('aggregate', discover(x))
result = compute(aggregate.total.sum(axis=(0,)) /
aggregate.count.sum(axis=(0,)), x)
expected = (x['total'].sum(axis=0, keepdims=True) /
x['count'].sum(axis=0, keepdims=True)).squeeze()
np.testing.assert_array_equal(result, expected)
def test_broadcast_compute_against_numbers_and_arrays():
A = symbol('A', '5 * float32')
a = symbol('a', 'float32')
b = symbol('b', 'float32')
x = np.arange(5, dtype='f4')
expr = Broadcast((A, b), (a, b), a + b)
result = compute(expr, {A: x, b: 10})
assert eq(result, x + 10)
def test_map():
pytest.importorskip('numba')
a = np.arange(10.0)
f = lambda x: np.sin(x) + 1.03 * np.cos(x) ** 2
x = symbol('x', discover(a))
expr = x.map(f, 'float64')
result = compute(expr, a)
expected = f(a)
# make sure we're not going to pandas here
assert type(result) == np.ndarray
assert type(result) == type(expected)
np.testing.assert_array_equal(result, expected)
def test_vector_norm():
x = np.arange(30).reshape((5, 6))
s = symbol('x', discover(x))
assert eq(compute(s.vnorm(), x),
np.linalg.norm(x))
assert eq(compute(s.vnorm(ord=1), x),
np.linalg.norm(x.flatten(), ord=1))
assert eq(compute(s.vnorm(ord=4, axis=0), x),
np.linalg.norm(x, ord=4, axis=0))
expr = s.vnorm(ord=4, axis=0, keepdims=True)
assert expr.shape == compute(expr, x).shape
def test_join():
cities = np.array([('Alice', 'NYC'),
('Alice', 'LA'),
('Bob', 'Chicago')],
dtype=[('name', 'S7'), ('city', 'O')])
c = symbol('cities', discover(cities))
expr = join(t, c, 'name')
result = compute(expr, {t: x, c: cities})
assert (b'Alice', 1, 100, 'LA') in into(list, result)
def test_query_with_strings():
b = np.array([('a', 1), ('b', 2), ('c', 3)],
dtype=[('x', 'S1'), ('y', 'i4')])
s = symbol('s', discover(b))
assert compute(s[s.x == b'b'], b).tolist() == [(b'b', 2)]
@pytest.mark.parametrize('keys', [['a'], list('bc')])
def test_isin(keys):
b = np.array([('a', 1), ('b', 2), ('c', 3), ('a', 4), ('c', 5), ('b', 6)],
dtype=[('x', 'S1'), ('y', 'i4')])
s = symbol('s', discover(b))
result = compute(s.x.isin(keys), b)
expected = np.in1d(b['x'], keys)
np.testing.assert_array_equal(result, expected)
def test_nunique_recarray():
b = np.array([('a', 1), ('b', 2), ('c', 3), ('a', 4), ('c', 5), ('b', 6),
('a', 1), ('b', 2)],
dtype=[('x', 'S1'), ('y', 'i4')])
s = symbol('s', discover(b))
expr = s.nunique()
assert compute(expr, b) == len(np.unique(b))
def test_str_repeat():
a = np.array(('a', 'b', 'c'))
s = symbol('s', discover(a))
expr = s.repeat(3)
assert all(compute(expr, a) == np.char.multiply(a, 3))
def test_str_interp():
a = np.array(('%s', '%s', '%s'))
s = symbol('s', discover(a))
expr = s.interp(1)
assert all(compute(expr, a) == np.char.mod(a, 1))
def test_timedelta_arith():
dates = np.arange('2014-01-01', '2014-02-01', dtype='datetime64')
delta = np.timedelta64(1, 'D')
sym = symbol('s', discover(dates))
assert (compute(sym + delta, dates) == dates + delta).all()
assert (compute(sym - delta, dates) == dates - delta).all()
def test_coerce():
x = np.arange(1, 3)
s = symbol('s', discover(x))
np.testing.assert_array_equal(compute(s.coerce('float64'), x),
np.arange(1.0, 3.0))
def test_concat_arr():
s_data = np.arange(15)
t_data = np.arange(15, 30)
s = symbol('s', discover(s_data))
t = symbol('t', discover(t_data))
assert (
compute(concat(s, t), {s: s_data, t: t_data}) ==
np.arange(30)
).all()
def test_concat_mat():
s_data = np.arange(15).reshape(5, 3)
t_data = np.arange(15, 30).reshape(5, 3)
s = symbol('s', discover(s_data))
t = symbol('t', discover(t_data))
assert (
compute(concat(s, t), {s: s_data, t: t_data}) ==
np.arange(30).reshape(10, 3)
).all()
assert (
compute(concat(s, t, axis=1), {s: s_data, t: t_data}) ==
np.concatenate((s_data, t_data), axis=1)
).all()
| bsd-3-clause |
jorge2703/scikit-learn | sklearn/covariance/graph_lasso_.py | 127 | 25626 | """GraphLasso: sparse inverse covariance estimation with an l1-penalized
estimator.
"""
# Author: Gael Varoquaux <gael.varoquaux@normalesup.org>
# License: BSD 3 clause
# Copyright: INRIA
import warnings
import operator
import sys
import time
import numpy as np
from scipy import linalg
from .empirical_covariance_ import (empirical_covariance, EmpiricalCovariance,
log_likelihood)
from ..utils import ConvergenceWarning
from ..utils.extmath import pinvh
from ..utils.validation import check_random_state, check_array
from ..linear_model import lars_path
from ..linear_model import cd_fast
from ..cross_validation import check_cv, cross_val_score
from ..externals.joblib import Parallel, delayed
import collections
# Helper functions to compute the objective and dual objective functions
# of the l1-penalized estimator
def _objective(mle, precision_, alpha):
"""Evaluation of the graph-lasso objective function
the objective function is made of a shifted scaled version of the
normalized log-likelihood (i.e. its empirical mean over the samples) and a
penalisation term to promote sparsity
"""
p = precision_.shape[0]
cost = - 2. * log_likelihood(mle, precision_) + p * np.log(2 * np.pi)
cost += alpha * (np.abs(precision_).sum()
- np.abs(np.diag(precision_)).sum())
return cost
def _dual_gap(emp_cov, precision_, alpha):
"""Expression of the dual gap convergence criterion
The specific definition is given in Duchi "Projected Subgradient Methods
for Learning Sparse Gaussians".
"""
gap = np.sum(emp_cov * precision_)
gap -= precision_.shape[0]
gap += alpha * (np.abs(precision_).sum()
- np.abs(np.diag(precision_)).sum())
return gap
def alpha_max(emp_cov):
"""Find the maximum alpha for which there are some non-zeros off-diagonal.
Parameters
----------
emp_cov : 2D array, (n_features, n_features)
The sample covariance matrix
Notes
-----
This results from the bound for the all the Lasso that are solved
in GraphLasso: each time, the row of cov corresponds to Xy. As the
bound for alpha is given by `max(abs(Xy))`, the result follows.
"""
A = np.copy(emp_cov)
A.flat[::A.shape[0] + 1] = 0
return np.max(np.abs(A))
# The g-lasso algorithm
def graph_lasso(emp_cov, alpha, cov_init=None, mode='cd', tol=1e-4,
enet_tol=1e-4, max_iter=100, verbose=False,
return_costs=False, eps=np.finfo(np.float64).eps,
return_n_iter=False):
"""l1-penalized covariance estimator
Read more in the :ref:`User Guide <sparse_inverse_covariance>`.
Parameters
----------
emp_cov : 2D ndarray, shape (n_features, n_features)
Empirical covariance from which to compute the covariance estimate.
alpha : positive float
The regularization parameter: the higher alpha, the more
regularization, the sparser the inverse covariance.
cov_init : 2D array (n_features, n_features), optional
The initial guess for the covariance.
mode : {'cd', 'lars'}
The Lasso solver to use: coordinate descent or LARS. Use LARS for
very sparse underlying graphs, where p > n. Elsewhere prefer cd
which is more numerically stable.
tol : positive float, optional
The tolerance to declare convergence: if the dual gap goes below
this value, iterations are stopped.
enet_tol : positive float, optional
The tolerance for the elastic net solver used to calculate the descent
direction. This parameter controls the accuracy of the search direction
for a given column update, not of the overall parameter estimate. Only
used for mode='cd'.
max_iter : integer, optional
The maximum number of iterations.
verbose : boolean, optional
If verbose is True, the objective function and dual gap are
printed at each iteration.
return_costs : boolean, optional
If return_costs is True, the objective function and dual gap
at each iteration are returned.
eps : float, optional
The machine-precision regularization in the computation of the
Cholesky diagonal factors. Increase this for very ill-conditioned
systems.
return_n_iter : bool, optional
Whether or not to return the number of iterations.
Returns
-------
covariance : 2D ndarray, shape (n_features, n_features)
The estimated covariance matrix.
precision : 2D ndarray, shape (n_features, n_features)
The estimated (sparse) precision matrix.
costs : list of (objective, dual_gap) pairs
The list of values of the objective function and the dual gap at
each iteration. Returned only if return_costs is True.
n_iter : int
Number of iterations. Returned only if `return_n_iter` is set to True.
See Also
--------
GraphLasso, GraphLassoCV
Notes
-----
The algorithm employed to solve this problem is the GLasso algorithm,
from the Friedman 2008 Biostatistics paper. It is the same algorithm
as in the R `glasso` package.
One possible difference with the `glasso` R package is that the
diagonal coefficients are not penalized.
"""
_, n_features = emp_cov.shape
if alpha == 0:
if return_costs:
precision_ = linalg.inv(emp_cov)
cost = - 2. * log_likelihood(emp_cov, precision_)
cost += n_features * np.log(2 * np.pi)
d_gap = np.sum(emp_cov * precision_) - n_features
if return_n_iter:
return emp_cov, precision_, (cost, d_gap), 0
else:
return emp_cov, precision_, (cost, d_gap)
else:
if return_n_iter:
return emp_cov, linalg.inv(emp_cov), 0
else:
return emp_cov, linalg.inv(emp_cov)
if cov_init is None:
covariance_ = emp_cov.copy()
else:
covariance_ = cov_init.copy()
# As a trivial regularization (Tikhonov like), we scale down the
# off-diagonal coefficients of our starting point: This is needed, as
# in the cross-validation the cov_init can easily be
# ill-conditioned, and the CV loop blows. Beside, this takes
# conservative stand-point on the initial conditions, and it tends to
# make the convergence go faster.
covariance_ *= 0.95
diagonal = emp_cov.flat[::n_features + 1]
covariance_.flat[::n_features + 1] = diagonal
precision_ = pinvh(covariance_)
indices = np.arange(n_features)
costs = list()
# The different l1 regression solver have different numerical errors
if mode == 'cd':
errors = dict(over='raise', invalid='ignore')
else:
errors = dict(invalid='raise')
try:
# be robust to the max_iter=0 edge case, see:
# https://github.com/scikit-learn/scikit-learn/issues/4134
d_gap = np.inf
for i in range(max_iter):
for idx in range(n_features):
sub_covariance = covariance_[indices != idx].T[indices != idx]
row = emp_cov[idx, indices != idx]
with np.errstate(**errors):
if mode == 'cd':
# Use coordinate descent
coefs = -(precision_[indices != idx, idx]
/ (precision_[idx, idx] + 1000 * eps))
coefs, _, _, _ = cd_fast.enet_coordinate_descent_gram(
coefs, alpha, 0, sub_covariance, row, row,
max_iter, enet_tol, check_random_state(None), False)
else:
# Use LARS
_, _, coefs = lars_path(
sub_covariance, row, Xy=row, Gram=sub_covariance,
alpha_min=alpha / (n_features - 1), copy_Gram=True,
method='lars', return_path=False)
# Update the precision matrix
precision_[idx, idx] = (
1. / (covariance_[idx, idx]
- np.dot(covariance_[indices != idx, idx], coefs)))
precision_[indices != idx, idx] = (- precision_[idx, idx]
* coefs)
precision_[idx, indices != idx] = (- precision_[idx, idx]
* coefs)
coefs = np.dot(sub_covariance, coefs)
covariance_[idx, indices != idx] = coefs
covariance_[indices != idx, idx] = coefs
d_gap = _dual_gap(emp_cov, precision_, alpha)
cost = _objective(emp_cov, precision_, alpha)
if verbose:
print(
'[graph_lasso] Iteration % 3i, cost % 3.2e, dual gap %.3e'
% (i, cost, d_gap))
if return_costs:
costs.append((cost, d_gap))
if np.abs(d_gap) < tol:
break
if not np.isfinite(cost) and i > 0:
raise FloatingPointError('Non SPD result: the system is '
'too ill-conditioned for this solver')
else:
warnings.warn('graph_lasso: did not converge after %i iteration:'
' dual gap: %.3e' % (max_iter, d_gap),
ConvergenceWarning)
except FloatingPointError as e:
e.args = (e.args[0]
+ '. The system is too ill-conditioned for this solver',)
raise e
if return_costs:
if return_n_iter:
return covariance_, precision_, costs, i + 1
else:
return covariance_, precision_, costs
else:
if return_n_iter:
return covariance_, precision_, i + 1
else:
return covariance_, precision_
class GraphLasso(EmpiricalCovariance):
"""Sparse inverse covariance estimation with an l1-penalized estimator.
Read more in the :ref:`User Guide <sparse_inverse_covariance>`.
Parameters
----------
alpha : positive float, default 0.01
The regularization parameter: the higher alpha, the more
regularization, the sparser the inverse covariance.
mode : {'cd', 'lars'}, default 'cd'
The Lasso solver to use: coordinate descent or LARS. Use LARS for
very sparse underlying graphs, where p > n. Elsewhere prefer cd
which is more numerically stable.
tol : positive float, default 1e-4
The tolerance to declare convergence: if the dual gap goes below
this value, iterations are stopped.
enet_tol : positive float, optional
The tolerance for the elastic net solver used to calculate the descent
direction. This parameter controls the accuracy of the search direction
for a given column update, not of the overall parameter estimate. Only
used for mode='cd'.
max_iter : integer, default 100
The maximum number of iterations.
verbose : boolean, default False
If verbose is True, the objective function and dual gap are
plotted at each iteration.
assume_centered : boolean, default False
If True, data are not centered before computation.
Useful when working with data whose mean is almost, but not exactly
zero.
If False, data are centered before computation.
Attributes
----------
covariance_ : array-like, shape (n_features, n_features)
Estimated covariance matrix
precision_ : array-like, shape (n_features, n_features)
Estimated pseudo inverse matrix.
n_iter_ : int
Number of iterations run.
See Also
--------
graph_lasso, GraphLassoCV
"""
def __init__(self, alpha=.01, mode='cd', tol=1e-4, enet_tol=1e-4,
max_iter=100, verbose=False, assume_centered=False):
self.alpha = alpha
self.mode = mode
self.tol = tol
self.enet_tol = enet_tol
self.max_iter = max_iter
self.verbose = verbose
self.assume_centered = assume_centered
# The base class needs this for the score method
self.store_precision = True
def fit(self, X, y=None):
X = check_array(X)
if self.assume_centered:
self.location_ = np.zeros(X.shape[1])
else:
self.location_ = X.mean(0)
emp_cov = empirical_covariance(
X, assume_centered=self.assume_centered)
self.covariance_, self.precision_, self.n_iter_ = graph_lasso(
emp_cov, alpha=self.alpha, mode=self.mode, tol=self.tol,
enet_tol=self.enet_tol, max_iter=self.max_iter,
verbose=self.verbose, return_n_iter=True)
return self
# Cross-validation with GraphLasso
def graph_lasso_path(X, alphas, cov_init=None, X_test=None, mode='cd',
tol=1e-4, enet_tol=1e-4, max_iter=100, verbose=False):
"""l1-penalized covariance estimator along a path of decreasing alphas
Read more in the :ref:`User Guide <sparse_inverse_covariance>`.
Parameters
----------
X : 2D ndarray, shape (n_samples, n_features)
Data from which to compute the covariance estimate.
alphas : list of positive floats
The list of regularization parameters, decreasing order.
X_test : 2D array, shape (n_test_samples, n_features), optional
Optional test matrix to measure generalisation error.
mode : {'cd', 'lars'}
The Lasso solver to use: coordinate descent or LARS. Use LARS for
very sparse underlying graphs, where p > n. Elsewhere prefer cd
which is more numerically stable.
tol : positive float, optional
The tolerance to declare convergence: if the dual gap goes below
this value, iterations are stopped.
enet_tol : positive float, optional
The tolerance for the elastic net solver used to calculate the descent
direction. This parameter controls the accuracy of the search direction
for a given column update, not of the overall parameter estimate. Only
used for mode='cd'.
max_iter : integer, optional
The maximum number of iterations.
verbose : integer, optional
The higher the verbosity flag, the more information is printed
during the fitting.
Returns
-------
covariances_ : List of 2D ndarray, shape (n_features, n_features)
The estimated covariance matrices.
precisions_ : List of 2D ndarray, shape (n_features, n_features)
The estimated (sparse) precision matrices.
scores_ : List of float
The generalisation error (log-likelihood) on the test data.
Returned only if test data is passed.
"""
inner_verbose = max(0, verbose - 1)
emp_cov = empirical_covariance(X)
if cov_init is None:
covariance_ = emp_cov.copy()
else:
covariance_ = cov_init
covariances_ = list()
precisions_ = list()
scores_ = list()
if X_test is not None:
test_emp_cov = empirical_covariance(X_test)
for alpha in alphas:
try:
# Capture the errors, and move on
covariance_, precision_ = graph_lasso(
emp_cov, alpha=alpha, cov_init=covariance_, mode=mode, tol=tol,
enet_tol=enet_tol, max_iter=max_iter, verbose=inner_verbose)
covariances_.append(covariance_)
precisions_.append(precision_)
if X_test is not None:
this_score = log_likelihood(test_emp_cov, precision_)
except FloatingPointError:
this_score = -np.inf
covariances_.append(np.nan)
precisions_.append(np.nan)
if X_test is not None:
if not np.isfinite(this_score):
this_score = -np.inf
scores_.append(this_score)
if verbose == 1:
sys.stderr.write('.')
elif verbose > 1:
if X_test is not None:
print('[graph_lasso_path] alpha: %.2e, score: %.2e'
% (alpha, this_score))
else:
print('[graph_lasso_path] alpha: %.2e' % alpha)
if X_test is not None:
return covariances_, precisions_, scores_
return covariances_, precisions_
class GraphLassoCV(GraphLasso):
"""Sparse inverse covariance w/ cross-validated choice of the l1 penalty
Read more in the :ref:`User Guide <sparse_inverse_covariance>`.
Parameters
----------
alphas : integer, or list positive float, optional
If an integer is given, it fixes the number of points on the
grids of alpha to be used. If a list is given, it gives the
grid to be used. See the notes in the class docstring for
more details.
n_refinements: strictly positive integer
The number of times the grid is refined. Not used if explicit
values of alphas are passed.
cv : cross-validation generator, optional
see sklearn.cross_validation module. If None is passed, defaults to
a 3-fold strategy
tol: positive float, optional
The tolerance to declare convergence: if the dual gap goes below
this value, iterations are stopped.
enet_tol : positive float, optional
The tolerance for the elastic net solver used to calculate the descent
direction. This parameter controls the accuracy of the search direction
for a given column update, not of the overall parameter estimate. Only
used for mode='cd'.
max_iter: integer, optional
Maximum number of iterations.
mode: {'cd', 'lars'}
The Lasso solver to use: coordinate descent or LARS. Use LARS for
very sparse underlying graphs, where number of features is greater
than number of samples. Elsewhere prefer cd which is more numerically
stable.
n_jobs: int, optional
number of jobs to run in parallel (default 1).
verbose: boolean, optional
If verbose is True, the objective function and duality gap are
printed at each iteration.
assume_centered : Boolean
If True, data are not centered before computation.
Useful when working with data whose mean is almost, but not exactly
zero.
If False, data are centered before computation.
Attributes
----------
covariance_ : numpy.ndarray, shape (n_features, n_features)
Estimated covariance matrix.
precision_ : numpy.ndarray, shape (n_features, n_features)
Estimated precision matrix (inverse covariance).
alpha_ : float
Penalization parameter selected.
cv_alphas_ : list of float
All penalization parameters explored.
`grid_scores`: 2D numpy.ndarray (n_alphas, n_folds)
Log-likelihood score on left-out data across folds.
n_iter_ : int
Number of iterations run for the optimal alpha.
See Also
--------
graph_lasso, GraphLasso
Notes
-----
The search for the optimal penalization parameter (alpha) is done on an
iteratively refined grid: first the cross-validated scores on a grid are
computed, then a new refined grid is centered around the maximum, and so
on.
One of the challenges which is faced here is that the solvers can
fail to converge to a well-conditioned estimate. The corresponding
values of alpha then come out as missing values, but the optimum may
be close to these missing values.
"""
def __init__(self, alphas=4, n_refinements=4, cv=None, tol=1e-4,
enet_tol=1e-4, max_iter=100, mode='cd', n_jobs=1,
verbose=False, assume_centered=False):
self.alphas = alphas
self.n_refinements = n_refinements
self.mode = mode
self.tol = tol
self.enet_tol = enet_tol
self.max_iter = max_iter
self.verbose = verbose
self.cv = cv
self.n_jobs = n_jobs
self.assume_centered = assume_centered
# The base class needs this for the score method
self.store_precision = True
def fit(self, X, y=None):
"""Fits the GraphLasso covariance model to X.
Parameters
----------
X : ndarray, shape (n_samples, n_features)
Data from which to compute the covariance estimate
"""
X = check_array(X)
if self.assume_centered:
self.location_ = np.zeros(X.shape[1])
else:
self.location_ = X.mean(0)
emp_cov = empirical_covariance(
X, assume_centered=self.assume_centered)
cv = check_cv(self.cv, X, y, classifier=False)
# List of (alpha, scores, covs)
path = list()
n_alphas = self.alphas
inner_verbose = max(0, self.verbose - 1)
if isinstance(n_alphas, collections.Sequence):
alphas = self.alphas
n_refinements = 1
else:
n_refinements = self.n_refinements
alpha_1 = alpha_max(emp_cov)
alpha_0 = 1e-2 * alpha_1
alphas = np.logspace(np.log10(alpha_0), np.log10(alpha_1),
n_alphas)[::-1]
t0 = time.time()
for i in range(n_refinements):
with warnings.catch_warnings():
# No need to see the convergence warnings on this grid:
# they will always be points that will not converge
# during the cross-validation
warnings.simplefilter('ignore', ConvergenceWarning)
# Compute the cross-validated loss on the current grid
# NOTE: Warm-restarting graph_lasso_path has been tried, and
# this did not allow to gain anything (same execution time with
# or without).
this_path = Parallel(
n_jobs=self.n_jobs,
verbose=self.verbose
)(
delayed(graph_lasso_path)(
X[train], alphas=alphas,
X_test=X[test], mode=self.mode,
tol=self.tol, enet_tol=self.enet_tol,
max_iter=int(.1 * self.max_iter),
verbose=inner_verbose)
for train, test in cv)
# Little danse to transform the list in what we need
covs, _, scores = zip(*this_path)
covs = zip(*covs)
scores = zip(*scores)
path.extend(zip(alphas, scores, covs))
path = sorted(path, key=operator.itemgetter(0), reverse=True)
# Find the maximum (avoid using built in 'max' function to
# have a fully-reproducible selection of the smallest alpha
# in case of equality)
best_score = -np.inf
last_finite_idx = 0
for index, (alpha, scores, _) in enumerate(path):
this_score = np.mean(scores)
if this_score >= .1 / np.finfo(np.float64).eps:
this_score = np.nan
if np.isfinite(this_score):
last_finite_idx = index
if this_score >= best_score:
best_score = this_score
best_index = index
# Refine the grid
if best_index == 0:
# We do not need to go back: we have chosen
# the highest value of alpha for which there are
# non-zero coefficients
alpha_1 = path[0][0]
alpha_0 = path[1][0]
elif (best_index == last_finite_idx
and not best_index == len(path) - 1):
# We have non-converged models on the upper bound of the
# grid, we need to refine the grid there
alpha_1 = path[best_index][0]
alpha_0 = path[best_index + 1][0]
elif best_index == len(path) - 1:
alpha_1 = path[best_index][0]
alpha_0 = 0.01 * path[best_index][0]
else:
alpha_1 = path[best_index - 1][0]
alpha_0 = path[best_index + 1][0]
if not isinstance(n_alphas, collections.Sequence):
alphas = np.logspace(np.log10(alpha_1), np.log10(alpha_0),
n_alphas + 2)
alphas = alphas[1:-1]
if self.verbose and n_refinements > 1:
print('[GraphLassoCV] Done refinement % 2i out of %i: % 3is'
% (i + 1, n_refinements, time.time() - t0))
path = list(zip(*path))
grid_scores = list(path[1])
alphas = list(path[0])
# Finally, compute the score with alpha = 0
alphas.append(0)
grid_scores.append(cross_val_score(EmpiricalCovariance(), X,
cv=cv, n_jobs=self.n_jobs,
verbose=inner_verbose))
self.grid_scores = np.array(grid_scores)
best_alpha = alphas[best_index]
self.alpha_ = best_alpha
self.cv_alphas_ = alphas
# Finally fit the model with the selected alpha
self.covariance_, self.precision_, self.n_iter_ = graph_lasso(
emp_cov, alpha=best_alpha, mode=self.mode, tol=self.tol,
enet_tol=self.enet_tol, max_iter=self.max_iter,
verbose=inner_verbose, return_n_iter=True)
return self
| bsd-3-clause |
joshloyal/scikit-learn | examples/cluster/plot_ward_structured_vs_unstructured.py | 320 | 3369 | """
===========================================================
Hierarchical clustering: structured vs unstructured ward
===========================================================
Example builds a swiss roll dataset and runs
hierarchical clustering on their position.
For more information, see :ref:`hierarchical_clustering`.
In a first step, the hierarchical clustering is performed without connectivity
constraints on the structure and is solely based on distance, whereas in
a second step the clustering is restricted to the k-Nearest Neighbors
graph: it's a hierarchical clustering with structure prior.
Some of the clusters learned without connectivity constraints do not
respect the structure of the swiss roll and extend across different folds of
the manifolds. On the opposite, when opposing connectivity constraints,
the clusters form a nice parcellation of the swiss roll.
"""
# Authors : Vincent Michel, 2010
# Alexandre Gramfort, 2010
# Gael Varoquaux, 2010
# License: BSD 3 clause
print(__doc__)
import time as time
import numpy as np
import matplotlib.pyplot as plt
import mpl_toolkits.mplot3d.axes3d as p3
from sklearn.cluster import AgglomerativeClustering
from sklearn.datasets.samples_generator import make_swiss_roll
###############################################################################
# Generate data (swiss roll dataset)
n_samples = 1500
noise = 0.05
X, _ = make_swiss_roll(n_samples, noise)
# Make it thinner
X[:, 1] *= .5
###############################################################################
# Compute clustering
print("Compute unstructured hierarchical clustering...")
st = time.time()
ward = AgglomerativeClustering(n_clusters=6, linkage='ward').fit(X)
elapsed_time = time.time() - st
label = ward.labels_
print("Elapsed time: %.2fs" % elapsed_time)
print("Number of points: %i" % label.size)
###############################################################################
# Plot result
fig = plt.figure()
ax = p3.Axes3D(fig)
ax.view_init(7, -80)
for l in np.unique(label):
ax.plot3D(X[label == l, 0], X[label == l, 1], X[label == l, 2],
'o', color=plt.cm.jet(np.float(l) / np.max(label + 1)))
plt.title('Without connectivity constraints (time %.2fs)' % elapsed_time)
###############################################################################
# Define the structure A of the data. Here a 10 nearest neighbors
from sklearn.neighbors import kneighbors_graph
connectivity = kneighbors_graph(X, n_neighbors=10, include_self=False)
###############################################################################
# Compute clustering
print("Compute structured hierarchical clustering...")
st = time.time()
ward = AgglomerativeClustering(n_clusters=6, connectivity=connectivity,
linkage='ward').fit(X)
elapsed_time = time.time() - st
label = ward.labels_
print("Elapsed time: %.2fs" % elapsed_time)
print("Number of points: %i" % label.size)
###############################################################################
# Plot result
fig = plt.figure()
ax = p3.Axes3D(fig)
ax.view_init(7, -80)
for l in np.unique(label):
ax.plot3D(X[label == l, 0], X[label == l, 1], X[label == l, 2],
'o', color=plt.cm.jet(float(l) / np.max(label + 1)))
plt.title('With connectivity constraints (time %.2fs)' % elapsed_time)
plt.show()
| bsd-3-clause |
Lab603/PicEncyclopedias | jni-build/jni-build/jni/include/tensorflow/contrib/learn/python/learn/tests/dataframe/feeding_functions_test.py | 30 | 4777 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests feeding functions using arrays and `DataFrames`."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import tensorflow as tf
import tensorflow.contrib.learn.python.learn.dataframe.queues.feeding_functions as ff
# pylint: disable=g-import-not-at-top
try:
import pandas as pd
HAS_PANDAS = True
except ImportError:
HAS_PANDAS = False
def vals_to_list(a):
return {key: val.tolist() if isinstance(val, np.ndarray) else val
for key, val in a.items()}
class _FeedingFunctionsTestCase(tf.test.TestCase):
"""Tests for feeding functions."""
def testArrayFeedFnBatchOne(self):
array = np.arange(32).reshape([16, 2])
placeholders = ["index_placeholder", "value_placeholder"]
aff = ff._ArrayFeedFn(placeholders, array, 1)
# cycle around a couple times
for x in range(0, 100):
i = x % 16
expected = {"index_placeholder": [i],
"value_placeholder": [[2 * i, 2 * i + 1]]}
actual = aff()
self.assertEqual(expected, vals_to_list(actual))
def testArrayFeedFnBatchFive(self):
array = np.arange(32).reshape([16, 2])
placeholders = ["index_placeholder", "value_placeholder"]
aff = ff._ArrayFeedFn(placeholders, array, 5)
# cycle around a couple times
for _ in range(0, 101, 2):
aff()
expected = {"index_placeholder": [15, 0, 1, 2, 3],
"value_placeholder": [[30, 31], [0, 1], [2, 3], [4, 5], [6, 7]]}
actual = aff()
self.assertEqual(expected, vals_to_list(actual))
def testArrayFeedFnBatchOneHundred(self):
array = np.arange(32).reshape([16, 2])
placeholders = ["index_placeholder", "value_placeholder"]
aff = ff._ArrayFeedFn(placeholders, array, 100)
expected = {"index_placeholder": list(range(0, 16)) * 6 + list(range(0, 4)),
"value_placeholder": np.arange(32).reshape([16, 2]).tolist() * 6
+ [[0, 1], [2, 3], [4, 5], [6, 7]]}
actual = aff()
self.assertEqual(expected, vals_to_list(actual))
def testPandasFeedFnBatchOne(self):
if not HAS_PANDAS:
return
array1 = np.arange(32, 64)
array2 = np.arange(64, 96)
df = pd.DataFrame({"a": array1, "b": array2}, index=np.arange(96, 128))
placeholders = ["index_placeholder", "a_placeholder", "b_placeholder"]
aff = ff._PandasFeedFn(placeholders, df, 1)
# cycle around a couple times
for x in range(0, 100):
i = x % 32
expected = {"index_placeholder": [i + 96],
"a_placeholder": [32 + i],
"b_placeholder": [64 + i]}
actual = aff()
self.assertEqual(expected, vals_to_list(actual))
def testPandasFeedFnBatchFive(self):
if not HAS_PANDAS:
return
array1 = np.arange(32, 64)
array2 = np.arange(64, 96)
df = pd.DataFrame({"a": array1, "b": array2}, index=np.arange(96, 128))
placeholders = ["index_placeholder", "a_placeholder", "b_placeholder"]
aff = ff._PandasFeedFn(placeholders, df, 5)
# cycle around a couple times
for _ in range(0, 101, 2):
aff()
expected = {"index_placeholder": [127, 96, 97, 98, 99],
"a_placeholder": [63, 32, 33, 34, 35],
"b_placeholder": [95, 64, 65, 66, 67]}
actual = aff()
self.assertEqual(expected, vals_to_list(actual))
def testPandasFeedFnBatchOneHundred(self):
if not HAS_PANDAS:
return
array1 = np.arange(32, 64)
array2 = np.arange(64, 96)
df = pd.DataFrame({"a": array1, "b": array2}, index=np.arange(96, 128))
placeholders = ["index_placeholder", "a_placeholder", "b_placeholder"]
aff = ff._PandasFeedFn(placeholders, df, 100)
expected = {
"index_placeholder": list(range(96, 128)) * 3 + list(range(96, 100)),
"a_placeholder": list(range(32, 64)) * 3 + list(range(32, 36)),
"b_placeholder": list(range(64, 96)) * 3 + list(range(64, 68))
}
actual = aff()
self.assertEqual(expected, vals_to_list(actual))
if __name__ == "__main__":
tf.test.main()
| mit |
whitews/dpconverge | test_dp_3params.py | 1 | 1245 | from dpconverge.data_set import DataSet
import numpy as np
from sklearn.datasets.samples_generator import make_blobs
n_features = 3
points_per_feature = 100
centers = [[2, 2, 1], [2, 4, 2], [4, 2, 3], [4, 4, 4]]
ds = DataSet(parameter_count=n_features)
rnd_state = np.random.RandomState()
rnd_state.seed(3)
for i, center in enumerate(centers):
X, y = make_blobs(
n_samples=points_per_feature,
n_features=n_features,
centers=center,
cluster_std=0.2,
random_state=rnd_state.randint(128)
)
ds.add_blob(i, X)
component_count = 6
ds.plot_blobs(ds.classifications, x_lim=[0, 6], y_lim=[0, 6])
ds.plot_blobs(ds.classifications, x=0, y=2, x_lim=[0, 6], y_lim=[0, 6])
ds.cluster(
component_count=component_count,
burn_in=100,
iteration_count=400,
random_seed=1
)
valid_components = ds.get_valid_components()
print "Recommended component count: ", len(valid_components)
for i in range(component_count):
if i in valid_components:
ds.plot_iteration_traces(i)
# for i in range(component_count):
# if i not in valid_components:
# print "Possible invalid Component"
# ds.plot_iteration_traces(i)
ds.plot_animated_trace(x_lim=[0, 6], y_lim=[0, 6])
| bsd-3-clause |
blbradley/subset-selector | subset_selector/selector.py | 1 | 3200 | import numpy as np
import matplotlib
import matplotlib.pyplot as plt
from matplotlib.backends.backend_nbagg import NavigationIPy, FigureManagerNbAgg
BUTTONS = ('Home', 'Back', 'Forward', 'Download')
default_facecolor = matplotlib.rcParams['axes.facecolor']
def on_click(event):
if event.inaxes and event.button == 1:
axes = event.inaxes
sample_index = axes.get_figure().get_axes().index(axes)
event.canvas.toolbar.selector.toggle_select(sample_index)
event.canvas.toolbar.set_facecolor(axes.patch, sample_index)
event.canvas.draw_idle()
class CustomNavigationToolbar(NavigationIPy):
toolitems = [item for item in NavigationIPy.toolitems if item[0] in BUTTONS]
selector = None # filled by SubsetSelector
def home(self, *args):
self.selector.home()
self._update_views()
def forward(self, *args):
try:
self.selector.forward()
except ValueError:
self.set_message('No more subsets!')
self._update_views()
def back(self, *args):
try:
self.selector.back()
except ValueError:
self.set_message('Already at home!')
self._update_views()
def set_facecolor(self, patch, sample_index):
facecolor = None
index = (self.selector.subset_index, sample_index)
if self.selector.selected[index]:
facecolor = 'white'
else:
facecolor = default_facecolor
patch.set_facecolor(facecolor)
def _update_views(self):
self.canvas.figure.clear()
nrows = (len(self.selector.ydata[self.selector.subset_index]) + 1)/2
figure, _ = plt.subplots(nrows, 2, num=self.canvas.figure.number)
figure.set_size_inches(12, 2*nrows, forward=True)
for sample_index, ax in enumerate(figure.get_axes()):
index = (self.selector.subset_index, sample_index)
line, = ax.plot(self.selector.xdata, self.selector.ydata[index])
self.set_facecolor(ax.patch, sample_index)
FigureManagerNbAgg.ToolbarCls = CustomNavigationToolbar
class SubsetSelector(object):
def __init__(self, xdata, ydata):
self.xdata = xdata
self.ydata = ydata
self.selected = np.zeros(ydata.shape[:-1], dtype=bool)
self.subset_index = 0
def home(self):
self.subset_index = 0
def forward(self):
new_index = self.subset_index + 1
if new_index == len(self.ydata):
raise ValueError
self.subset_index = new_index
def back(self):
new_index = self.subset_index - 1
if new_index == -1:
raise ValueError
self.subset_index = new_index
def toggle_select(self, sample_index):
index = (self.subset_index, sample_index)
if self.selected[index]:
self.selected[index] = False
else:
self.selected[index] = True
def plot(self):
figure = plt.figure()
figure.canvas.mpl_connect('button_press_event', on_click)
figure.canvas.toolbar.selector = self
figure.canvas.toolbar._update_views()
def get_ydata(self):
return self.ydata[self.selected]
| mit |
Averroes/statsmodels | statsmodels/sandbox/survival2.py | 35 | 17924 | #Kaplan-Meier Estimator
import numpy as np
import numpy.linalg as la
import matplotlib.pyplot as plt
from scipy import stats
from statsmodels.iolib.table import SimpleTable
class KaplanMeier(object):
"""
KaplanMeier(...)
KaplanMeier(data, endog, exog=None, censoring=None)
Create an object of class KaplanMeier for estimating
Kaplan-Meier survival curves.
Parameters
----------
data: array_like
An array, with observations in each row, and
variables in the columns
endog: index (starting at zero) of the column
containing the endogenous variable (time)
exog: index of the column containing the exogenous
variable (must be catagorical). If exog = None, this
is equivalent to a single survival curve
censoring: index of the column containing an indicator
of whether an observation is an event, or a censored
observation, with 0 for censored, and 1 for an event
Attributes
-----------
censorings: List of censorings associated with each unique
time, at each value of exog
events: List of the number of events at each unique time
for each value of exog
results: List of arrays containing estimates of the value
value of the survival function and its standard error
at each unique time, for each value of exog
ts: List of unique times for each value of exog
Methods
-------
fit: Calcuate the Kaplan-Meier estimates of the survival
function and its standard error at each time, for each
value of exog
plot: Plot the survival curves using matplotlib.plyplot
summary: Display the results of fit in a table. Gives results
for all (including censored) times
test_diff: Test for difference between survival curves
Examples
--------
>>> import statsmodels.api as sm
>>> import matplotlib.pyplot as plt
>>> import numpy as np
>>> from statsmodels.sandbox.survival2 import KaplanMeier
>>> dta = sm.datasets.strikes.load()
>>> dta = dta.values()[-1]
>>> dta[range(5),:]
array([[ 7.00000000e+00, 1.13800000e-02],
[ 9.00000000e+00, 1.13800000e-02],
[ 1.30000000e+01, 1.13800000e-02],
[ 1.40000000e+01, 1.13800000e-02],
[ 2.60000000e+01, 1.13800000e-02]])
>>> km = KaplanMeier(dta,0)
>>> km.fit()
>>> km.plot()
Doing
>>> km.summary()
will display a table of the estimated survival and standard errors
for each time. The first few lines are
Kaplan-Meier Curve
=====================================
Time Survival Std. Err
-------------------------------------
1.0 0.983870967742 0.0159984306572
2.0 0.91935483871 0.0345807888235
3.0 0.854838709677 0.0447374942184
4.0 0.838709677419 0.0467104592871
5.0 0.822580645161 0.0485169952543
Doing
>>> plt.show()
will plot the survival curve
Mutliple survival curves:
>>> km2 = KaplanMeier(dta,0,exog=1)
>>> km2.fit()
km2 will estimate a survival curve for each value of industrial
production, the column of dta with index one (1).
With censoring:
>>> censoring = np.ones_like(dta[:,0])
>>> censoring[dta[:,0] > 80] = 0
>>> dta = np.c_[dta,censoring]
>>> dta[range(5),:]
array([[ 7.00000000e+00, 1.13800000e-02, 1.00000000e+00],
[ 9.00000000e+00, 1.13800000e-02, 1.00000000e+00],
[ 1.30000000e+01, 1.13800000e-02, 1.00000000e+00],
[ 1.40000000e+01, 1.13800000e-02, 1.00000000e+00],
[ 2.60000000e+01, 1.13800000e-02, 1.00000000e+00]])
>>> km3 = KaplanMeier(dta,0,exog=1,censoring=2)
>>> km3.fit()
Test for difference of survival curves
>>> log_rank = km3.test_diff([0.0645,-0.03957])
The zeroth element of log_rank is the chi-square test statistic
for the difference between the survival curves for exog = 0.0645
and exog = -0.03957, the index one element is the degrees of freedom for
the test, and the index two element is the p-value for the test
Groups with nan names
>>> groups = np.ones_like(dta[:,1])
>>> groups = groups.astype('S4')
>>> groups[dta[:,1] > 0] = 'high'
>>> groups[dta[:,1] <= 0] = 'low'
>>> dta = dta.astype('S4')
>>> dta[:,1] = groups
>>> dta[range(5),:]
array([['7.0', 'high', '1.0'],
['9.0', 'high', '1.0'],
['13.0', 'high', '1.0'],
['14.0', 'high', '1.0'],
['26.0', 'high', '1.0']],
dtype='|S4')
>>> km4 = KaplanMeier(dta,0,exog=1,censoring=2)
>>> km4.fit()
"""
def __init__(self, data, endog, exog=None, censoring=None):
self.exog = exog
self.censoring = censoring
cols = [endog]
self.endog = 0
if exog != None:
cols.append(exog)
self.exog = 1
if censoring != None:
cols.append(censoring)
if exog != None:
self.censoring = 2
else:
self.censoring = 1
data = data[:,cols]
if data.dtype == float or data.dtype == int:
self.data = data[~np.isnan(data).any(1)]
else:
t = (data[:,self.endog]).astype(float)
if exog != None:
evec = data[:,self.exog]
evec = evec[~np.isnan(t)]
if censoring != None:
cvec = (data[:,self.censoring]).astype(float)
cvec = cvec[~np.isnan(t)]
t = t[~np.isnan(t)]
if censoring != None:
t = t[~np.isnan(cvec)]
if exog != None:
evec = evec[~np.isnan(cvec)]
cvec = cvec[~np.isnan(cvec)]
cols = [t]
if exog != None:
cols.append(evec)
if censoring != None:
cols.append(cvec)
data = (np.array(cols)).transpose()
self.data = data
def fit(self):
"""
Calculate the Kaplan-Meier estimator of the survival function
"""
self.results = []
self.ts = []
self.censorings = []
self.event = []
if self.exog == None:
self.fitting_proc(self.data)
else:
groups = np.unique(self.data[:,self.exog])
self.groups = groups
for g in groups:
group = self.data[self.data[:,self.exog] == g]
self.fitting_proc(group)
def plot(self):
"""
Plot the estimated survival curves. After using this method
do
plt.show()
to display the plot
"""
plt.figure()
if self.exog == None:
self.plotting_proc(0)
else:
for g in range(len(self.groups)):
self.plotting_proc(g)
plt.ylim(ymax=1.05)
plt.ylabel('Survival')
plt.xlabel('Time')
def summary(self):
"""
Print a set of tables containing the estimates of the survival
function, and its standard errors
"""
if self.exog == None:
self.summary_proc(0)
else:
for g in range(len(self.groups)):
self.summary_proc(g)
def fitting_proc(self, group):
"""
For internal use
"""
t = ((group[:,self.endog]).astype(float)).astype(int)
if self.censoring == None:
events = np.bincount(t)
t = np.unique(t)
events = events[:,list(t)]
events = events.astype(float)
eventsSum = np.cumsum(events)
eventsSum = np.r_[0,eventsSum]
n = len(group) - eventsSum[:-1]
else:
censoring = ((group[:,self.censoring]).astype(float)).astype(int)
reverseCensoring = -1*(censoring - 1)
events = np.bincount(t,censoring)
censored = np.bincount(t,reverseCensoring)
t = np.unique(t)
censored = censored[:,list(t)]
censored = censored.astype(float)
censoredSum = np.cumsum(censored)
censoredSum = np.r_[0,censoredSum]
events = events[:,list(t)]
events = events.astype(float)
eventsSum = np.cumsum(events)
eventsSum = np.r_[0,eventsSum]
n = len(group) - eventsSum[:-1] - censoredSum[:-1]
(self.censorings).append(censored)
survival = np.cumprod(1-events/n)
var = ((survival*survival) *
np.cumsum(events/(n*(n-events))))
se = np.sqrt(var)
(self.results).append(np.array([survival,se]))
(self.ts).append(t)
(self.event).append(events)
def plotting_proc(self, g):
"""
For internal use
"""
survival = self.results[g][0]
t = self.ts[g]
e = (self.event)[g]
if self.censoring != None:
c = self.censorings[g]
csurvival = survival[c != 0]
ct = t[c != 0]
if len(ct) != 0:
plt.vlines(ct,csurvival+0.02,csurvival-0.02)
x = np.repeat(t[e != 0], 2)
y = np.repeat(survival[e != 0], 2)
if self.ts[g][-1] in t[e != 0]:
x = np.r_[0,x]
y = np.r_[1,1,y[:-1]]
else:
x = np.r_[0,x,self.ts[g][-1]]
y = np.r_[1,1,y]
plt.plot(x,y)
def summary_proc(self, g):
"""
For internal use
"""
if self.exog != None:
myTitle = ('exog = ' + str(self.groups[g]) + '\n')
else:
myTitle = "Kaplan-Meier Curve"
table = np.transpose(self.results[g])
table = np.c_[np.transpose(self.ts[g]),table]
table = SimpleTable(table, headers=['Time','Survival','Std. Err'],
title = myTitle)
print(table)
def test_diff(self, groups, rho=None, weight=None):
"""
test_diff(groups, rho=0)
Test for difference between survival curves
Parameters
----------
groups: A list of the values for exog to test for difference.
tests the null hypothesis that the survival curves for all
values of exog in groups are equal
rho: compute the test statistic with weight S(t)^rho, where
S(t) is the pooled estimate for the Kaplan-Meier survival function.
If rho = 0, this is the logrank test, if rho = 0, this is the
Peto and Peto modification to the Gehan-Wilcoxon test.
weight: User specified function that accepts as its sole arguement
an array of times, and returns an array of weights for each time
to be used in the test
Returns
-------
An array whose zeroth element is the chi-square test statistic for
the global null hypothesis, that all survival curves are equal,
the index one element is degrees of freedom for the test, and the
index two element is the p-value for the test.
Examples
--------
>>> import statsmodels.api as sm
>>> import matplotlib.pyplot as plt
>>> import numpy as np
>>> from statsmodels.sandbox.survival2 import KaplanMeier
>>> dta = sm.datasets.strikes.load()
>>> dta = dta.values()[-1]
>>> censoring = np.ones_like(dta[:,0])
>>> censoring[dta[:,0] > 80] = 0
>>> dta = np.c_[dta,censoring]
>>> km = KaplanMeier(dta,0,exog=1,censoring=2)
>>> km.fit()
Test for difference of survival curves
>>> log_rank = km3.test_diff([0.0645,-0.03957])
The zeroth element of log_rank is the chi-square test statistic
for the difference between the survival curves using the log rank test
for exog = 0.0645 and exog = -0.03957, the index one element
is the degrees of freedom for the test, and the index two element
is the p-value for the test
>>> wilcoxon = km.test_diff([0.0645,-0.03957], rho=1)
wilcoxon is the equivalent information as log_rank, but for the
Peto and Peto modification to the Gehan-Wilcoxon test.
User specified weight functions
>>> log_rank = km3.test_diff([0.0645,-0.03957], weight=np.ones_like)
This is equivalent to the log rank test
More than two groups
>>> log_rank = km.test_diff([0.0645,-0.03957,0.01138])
The test can be performed with arbitrarily many groups, so long as
they are all in the column exog
"""
groups = np.asarray(groups)
if self.exog == None:
raise ValueError("Need an exogenous variable for logrank test")
elif (np.in1d(groups,self.groups)).all():
data = self.data[np.in1d(self.data[:,self.exog],groups)]
t = ((data[:,self.endog]).astype(float)).astype(int)
tind = np.unique(t)
NK = []
N = []
D = []
Z = []
if rho != None and weight != None:
raise ValueError("Must use either rho or weights, not both")
elif rho != None:
s = KaplanMeier(data,self.endog,censoring=self.censoring)
s.fit()
s = (s.results[0][0]) ** (rho)
s = np.r_[1,s[:-1]]
elif weight != None:
s = weight(tind)
else:
s = np.ones_like(tind)
if self.censoring == None:
for g in groups:
dk = np.bincount((t[data[:,self.exog] == g]))
d = np.bincount(t)
if np.max(tind) != len(dk):
dif = np.max(tind) - len(dk) + 1
dk = np.r_[dk,[0]*dif]
dk = dk[:,list(tind)]
d = d[:,list(tind)]
dk = dk.astype(float)
d = d.astype(float)
dkSum = np.cumsum(dk)
dSum = np.cumsum(d)
dkSum = np.r_[0,dkSum]
dSum = np.r_[0,dSum]
nk = len(data[data[:,self.exog] == g]) - dkSum[:-1]
n = len(data) - dSum[:-1]
d = d[n>1]
dk = dk[n>1]
nk = nk[n>1]
n = n[n>1]
s = s[n>1]
ek = (nk * d)/(n)
Z.append(np.sum(s * (dk - ek)))
NK.append(nk)
N.append(n)
D.append(d)
else:
for g in groups:
censoring = ((data[:,self.censoring]).astype(float)).astype(int)
reverseCensoring = -1*(censoring - 1)
censored = np.bincount(t,reverseCensoring)
ck = np.bincount((t[data[:,self.exog] == g]),
reverseCensoring[data[:,self.exog] == g])
dk = np.bincount((t[data[:,self.exog] == g]),
censoring[data[:,self.exog] == g])
d = np.bincount(t,censoring)
if np.max(tind) != len(dk):
dif = np.max(tind) - len(dk) + 1
dk = np.r_[dk,[0]*dif]
ck = np.r_[ck,[0]*dif]
dk = dk[:,list(tind)]
ck = ck[:,list(tind)]
d = d[:,list(tind)]
dk = dk.astype(float)
d = d.astype(float)
ck = ck.astype(float)
dkSum = np.cumsum(dk)
dSum = np.cumsum(d)
ck = np.cumsum(ck)
ck = np.r_[0,ck]
dkSum = np.r_[0,dkSum]
dSum = np.r_[0,dSum]
censored = censored[:,list(tind)]
censored = censored.astype(float)
censoredSum = np.cumsum(censored)
censoredSum = np.r_[0,censoredSum]
nk = (len(data[data[:,self.exog] == g]) - dkSum[:-1]
- ck[:-1])
n = len(data) - dSum[:-1] - censoredSum[:-1]
d = d[n>1]
dk = dk[n>1]
nk = nk[n>1]
n = n[n>1]
s = s[n>1]
ek = (nk * d)/(n)
Z.append(np.sum(s * (dk - ek)))
NK.append(nk)
N.append(n)
D.append(d)
Z = np.array(Z)
N = np.array(N)
D = np.array(D)
NK = np.array(NK)
sigma = -1 * np.dot((NK/N) * ((N - D)/(N - 1)) * D
* np.array([(s ** 2)]*len(D))
,np.transpose(NK/N))
np.fill_diagonal(sigma, np.diagonal(np.dot((NK/N)
* ((N - D)/(N - 1)) * D
* np.array([(s ** 2)]*len(D))
,np.transpose(1 - (NK/N)))))
chisq = np.dot(np.transpose(Z),np.dot(la.pinv(sigma), Z))
df = len(groups) - 1
return np.array([chisq, df, stats.chi2.sf(chisq,df)])
else:
raise ValueError("groups must be in column exog")
| bsd-3-clause |
snnn/tensorflow | tensorflow/contrib/distributions/python/ops/mixture.py | 22 | 21121 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""The Mixture distribution class."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.contrib.distributions.python.ops import distribution_util as distribution_utils
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import tensor_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import check_ops
from tensorflow.python.ops import data_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn_ops
from tensorflow.python.ops.distributions import categorical
from tensorflow.python.ops.distributions import distribution
from tensorflow.python.ops.distributions import util as distribution_util
from tensorflow.python.util import deprecation
class Mixture(distribution.Distribution):
"""Mixture distribution.
The `Mixture` object implements batched mixture distributions.
The mixture model is defined by a `Categorical` distribution (the mixture)
and a python list of `Distribution` objects.
Methods supported include `log_prob`, `prob`, `mean`, `sample`, and
`entropy_lower_bound`.
#### Examples
```python
# Create a mixture of two Gaussians:
import tensorflow_probability as tfp
tfd = tfp.distributions
mix = 0.3
bimix_gauss = tfd.Mixture(
cat=tfd.Categorical(probs=[mix, 1.-mix]),
components=[
tfd.Normal(loc=-1., scale=0.1),
tfd.Normal(loc=+1., scale=0.5),
])
# Plot the PDF.
import matplotlib.pyplot as plt
x = tf.linspace(-2., 3., int(1e4)).eval()
plt.plot(x, bimix_gauss.prob(x).eval());
```
"""
@deprecation.deprecated(
"2018-10-01",
"The TensorFlow Distributions library has moved to "
"TensorFlow Probability "
"(https://github.com/tensorflow/probability). You "
"should update all references to use `tfp.distributions` "
"instead of `tf.contrib.distributions`.",
warn_once=True)
def __init__(self,
cat,
components,
validate_args=False,
allow_nan_stats=True,
use_static_graph=False,
name="Mixture"):
"""Initialize a Mixture distribution.
A `Mixture` is defined by a `Categorical` (`cat`, representing the
mixture probabilities) and a list of `Distribution` objects
all having matching dtype, batch shape, event shape, and continuity
properties (the components).
The `num_classes` of `cat` must be possible to infer at graph construction
time and match `len(components)`.
Args:
cat: A `Categorical` distribution instance, representing the probabilities
of `distributions`.
components: A list or tuple of `Distribution` instances.
Each instance must have the same type, be defined on the same domain,
and have matching `event_shape` and `batch_shape`.
validate_args: Python `bool`, default `False`. If `True`, raise a runtime
error if batch or event ranks are inconsistent between cat and any of
the distributions. This is only checked if the ranks cannot be
determined statically at graph construction time.
allow_nan_stats: Boolean, default `True`. If `False`, raise an
exception if a statistic (e.g. mean/mode/etc...) is undefined for any
batch member. If `True`, batch members with valid parameters leading to
undefined statistics will return NaN for this statistic.
use_static_graph: Calls to `sample` will not rely on dynamic tensor
indexing, allowing for some static graph compilation optimizations, but
at the expense of sampling all underlying distributions in the mixture.
(Possibly useful when running on TPUs).
Default value: `False` (i.e., use dynamic indexing).
name: A name for this distribution (optional).
Raises:
TypeError: If cat is not a `Categorical`, or `components` is not
a list or tuple, or the elements of `components` are not
instances of `Distribution`, or do not have matching `dtype`.
ValueError: If `components` is an empty list or tuple, or its
elements do not have a statically known event rank.
If `cat.num_classes` cannot be inferred at graph creation time,
or the constant value of `cat.num_classes` is not equal to
`len(components)`, or all `components` and `cat` do not have
matching static batch shapes, or all components do not
have matching static event shapes.
"""
parameters = dict(locals())
if not isinstance(cat, categorical.Categorical):
raise TypeError("cat must be a Categorical distribution, but saw: %s" %
cat)
if not components:
raise ValueError("components must be a non-empty list or tuple")
if not isinstance(components, (list, tuple)):
raise TypeError("components must be a list or tuple, but saw: %s" %
components)
if not all(isinstance(c, distribution.Distribution) for c in components):
raise TypeError(
"all entries in components must be Distribution instances"
" but saw: %s" % components)
dtype = components[0].dtype
if not all(d.dtype == dtype for d in components):
raise TypeError("All components must have the same dtype, but saw "
"dtypes: %s" % [(d.name, d.dtype) for d in components])
static_event_shape = components[0].event_shape
static_batch_shape = cat.batch_shape
for d in components:
static_event_shape = static_event_shape.merge_with(d.event_shape)
static_batch_shape = static_batch_shape.merge_with(d.batch_shape)
if static_event_shape.ndims is None:
raise ValueError(
"Expected to know rank(event_shape) from components, but "
"none of the components provide a static number of ndims")
# Ensure that all batch and event ndims are consistent.
with ops.name_scope(name, values=[cat.logits]) as name:
num_components = cat.event_size
static_num_components = tensor_util.constant_value(num_components)
if static_num_components is None:
raise ValueError(
"Could not infer number of classes from cat and unable "
"to compare this value to the number of components passed in.")
# Possibly convert from numpy 0-D array.
static_num_components = int(static_num_components)
if static_num_components != len(components):
raise ValueError("cat.num_classes != len(components): %d vs. %d" %
(static_num_components, len(components)))
cat_batch_shape = cat.batch_shape_tensor()
cat_batch_rank = array_ops.size(cat_batch_shape)
if validate_args:
batch_shapes = [d.batch_shape_tensor() for d in components]
batch_ranks = [array_ops.size(bs) for bs in batch_shapes]
check_message = ("components[%d] batch shape must match cat "
"batch shape")
self._assertions = [
check_ops.assert_equal(
cat_batch_rank, batch_ranks[di], message=check_message % di)
for di in range(len(components))
]
self._assertions += [
check_ops.assert_equal(
cat_batch_shape, batch_shapes[di], message=check_message % di)
for di in range(len(components))
]
else:
self._assertions = []
self._cat = cat
self._components = list(components)
self._num_components = static_num_components
self._static_event_shape = static_event_shape
self._static_batch_shape = static_batch_shape
self._use_static_graph = use_static_graph
if use_static_graph and static_num_components is None:
raise ValueError("Number of categories must be known statically when "
"`static_sample=True`.")
# We let the Mixture distribution access _graph_parents since its arguably
# more like a baseclass.
graph_parents = self._cat._graph_parents # pylint: disable=protected-access
for c in self._components:
graph_parents += c._graph_parents # pylint: disable=protected-access
super(Mixture, self).__init__(
dtype=dtype,
reparameterization_type=distribution.NOT_REPARAMETERIZED,
validate_args=validate_args,
allow_nan_stats=allow_nan_stats,
parameters=parameters,
graph_parents=graph_parents,
name=name)
@property
def cat(self):
return self._cat
@property
def components(self):
return self._components
@property
def num_components(self):
return self._num_components
def _batch_shape_tensor(self):
return self._cat.batch_shape_tensor()
def _batch_shape(self):
return self._static_batch_shape
def _event_shape_tensor(self):
return self._components[0].event_shape_tensor()
def _event_shape(self):
return self._static_event_shape
def _expand_to_event_rank(self, x):
"""Expand the rank of x up to static_event_rank times for broadcasting.
The static event rank was checked to not be None at construction time.
Args:
x: A tensor to expand.
Returns:
The expanded tensor.
"""
expanded_x = x
for _ in range(self.event_shape.ndims):
expanded_x = array_ops.expand_dims(expanded_x, -1)
return expanded_x
def _mean(self):
with ops.control_dependencies(self._assertions):
distribution_means = [d.mean() for d in self.components]
cat_probs = self._cat_probs(log_probs=False)
cat_probs = [self._expand_to_event_rank(c_p) for c_p in cat_probs]
partial_means = [
c_p * m for (c_p, m) in zip(cat_probs, distribution_means)
]
# These should all be the same shape by virtue of matching
# batch_shape and event_shape.
return math_ops.add_n(partial_means)
def _stddev(self):
with ops.control_dependencies(self._assertions):
distribution_means = [d.mean() for d in self.components]
distribution_devs = [d.stddev() for d in self.components]
cat_probs = self._cat_probs(log_probs=False)
stacked_means = array_ops.stack(distribution_means, axis=-1)
stacked_devs = array_ops.stack(distribution_devs, axis=-1)
cat_probs = [self._expand_to_event_rank(c_p) for c_p in cat_probs]
broadcasted_cat_probs = (array_ops.stack(cat_probs, axis=-1) *
array_ops.ones_like(stacked_means))
batched_dev = distribution_utils.mixture_stddev(
array_ops.reshape(broadcasted_cat_probs, [-1, len(self.components)]),
array_ops.reshape(stacked_means, [-1, len(self.components)]),
array_ops.reshape(stacked_devs, [-1, len(self.components)]))
# I.e. re-shape to list(batch_shape) + list(event_shape).
return array_ops.reshape(batched_dev,
array_ops.shape(broadcasted_cat_probs)[:-1])
def _log_prob(self, x):
with ops.control_dependencies(self._assertions):
x = ops.convert_to_tensor(x, name="x")
distribution_log_probs = [d.log_prob(x) for d in self.components]
cat_log_probs = self._cat_probs(log_probs=True)
final_log_probs = [
cat_lp + d_lp
for (cat_lp, d_lp) in zip(cat_log_probs, distribution_log_probs)
]
concat_log_probs = array_ops.stack(final_log_probs, 0)
log_sum_exp = math_ops.reduce_logsumexp(concat_log_probs, [0])
return log_sum_exp
def _log_cdf(self, x):
with ops.control_dependencies(self._assertions):
x = ops.convert_to_tensor(x, name="x")
distribution_log_cdfs = [d.log_cdf(x) for d in self.components]
cat_log_probs = self._cat_probs(log_probs=True)
final_log_cdfs = [
cat_lp + d_lcdf
for (cat_lp, d_lcdf) in zip(cat_log_probs, distribution_log_cdfs)
]
concatted_log_cdfs = array_ops.stack(final_log_cdfs, axis=0)
mixture_log_cdf = math_ops.reduce_logsumexp(concatted_log_cdfs, [0])
return mixture_log_cdf
def _sample_n(self, n, seed=None):
if self._use_static_graph:
# This sampling approach is almost the same as the approach used by
# `MixtureSameFamily`. The differences are due to having a list of
# `Distribution` objects rather than a single object, and maintaining
# random seed management that is consistent with the non-static code path.
samples = []
cat_samples = self.cat.sample(n, seed=seed)
for c in range(self.num_components):
seed = distribution_util.gen_new_seed(seed, "mixture")
samples.append(self.components[c].sample(n, seed=seed))
x = array_ops.stack(
samples, -self._static_event_shape.ndims - 1) # [n, B, k, E]
npdt = x.dtype.as_numpy_dtype
mask = array_ops.one_hot(
indices=cat_samples, # [n, B]
depth=self._num_components, # == k
on_value=np.ones([], dtype=npdt),
off_value=np.zeros([], dtype=npdt)) # [n, B, k]
mask = distribution_utils.pad_mixture_dimensions(
mask, self, self._cat,
self._static_event_shape.ndims) # [n, B, k, [1]*e]
return math_ops.reduce_sum(
x * mask,
axis=-1 - self._static_event_shape.ndims) # [n, B, E]
with ops.control_dependencies(self._assertions):
n = ops.convert_to_tensor(n, name="n")
static_n = tensor_util.constant_value(n)
n = int(static_n) if static_n is not None else n
cat_samples = self.cat.sample(n, seed=seed)
static_samples_shape = cat_samples.get_shape()
if static_samples_shape.is_fully_defined():
samples_shape = static_samples_shape.as_list()
samples_size = static_samples_shape.num_elements()
else:
samples_shape = array_ops.shape(cat_samples)
samples_size = array_ops.size(cat_samples)
static_batch_shape = self.batch_shape
if static_batch_shape.is_fully_defined():
batch_shape = static_batch_shape.as_list()
batch_size = static_batch_shape.num_elements()
else:
batch_shape = self.batch_shape_tensor()
batch_size = math_ops.reduce_prod(batch_shape)
static_event_shape = self.event_shape
if static_event_shape.is_fully_defined():
event_shape = np.array(static_event_shape.as_list(), dtype=np.int32)
else:
event_shape = self.event_shape_tensor()
# Get indices into the raw cat sampling tensor. We will
# need these to stitch sample values back out after sampling
# within the component partitions.
samples_raw_indices = array_ops.reshape(
math_ops.range(0, samples_size), samples_shape)
# Partition the raw indices so that we can use
# dynamic_stitch later to reconstruct the samples from the
# known partitions.
partitioned_samples_indices = data_flow_ops.dynamic_partition(
data=samples_raw_indices,
partitions=cat_samples,
num_partitions=self.num_components)
# Copy the batch indices n times, as we will need to know
# these to pull out the appropriate rows within the
# component partitions.
batch_raw_indices = array_ops.reshape(
array_ops.tile(math_ops.range(0, batch_size), [n]), samples_shape)
# Explanation of the dynamic partitioning below:
# batch indices are i.e., [0, 1, 0, 1, 0, 1]
# Suppose partitions are:
# [1 1 0 0 1 1]
# After partitioning, batch indices are cut as:
# [batch_indices[x] for x in 2, 3]
# [batch_indices[x] for x in 0, 1, 4, 5]
# i.e.
# [1 1] and [0 0 0 0]
# Now we sample n=2 from part 0 and n=4 from part 1.
# For part 0 we want samples from batch entries 1, 1 (samples 0, 1),
# and for part 1 we want samples from batch entries 0, 0, 0, 0
# (samples 0, 1, 2, 3).
partitioned_batch_indices = data_flow_ops.dynamic_partition(
data=batch_raw_indices,
partitions=cat_samples,
num_partitions=self.num_components)
samples_class = [None for _ in range(self.num_components)]
for c in range(self.num_components):
n_class = array_ops.size(partitioned_samples_indices[c])
seed = distribution_util.gen_new_seed(seed, "mixture")
samples_class_c = self.components[c].sample(n_class, seed=seed)
# Pull out the correct batch entries from each index.
# To do this, we may have to flatten the batch shape.
# For sample s, batch element b of component c, we get the
# partitioned batch indices from
# partitioned_batch_indices[c]; and shift each element by
# the sample index. The final lookup can be thought of as
# a matrix gather along locations (s, b) in
# samples_class_c where the n_class rows correspond to
# samples within this component and the batch_size columns
# correspond to batch elements within the component.
#
# Thus the lookup index is
# lookup[c, i] = batch_size * s[i] + b[c, i]
# for i = 0 ... n_class[c] - 1.
lookup_partitioned_batch_indices = (
batch_size * math_ops.range(n_class) +
partitioned_batch_indices[c])
samples_class_c = array_ops.reshape(
samples_class_c,
array_ops.concat([[n_class * batch_size], event_shape], 0))
samples_class_c = array_ops.gather(
samples_class_c, lookup_partitioned_batch_indices,
name="samples_class_c_gather")
samples_class[c] = samples_class_c
# Stitch back together the samples across the components.
lhs_flat_ret = data_flow_ops.dynamic_stitch(
indices=partitioned_samples_indices, data=samples_class)
# Reshape back to proper sample, batch, and event shape.
ret = array_ops.reshape(lhs_flat_ret,
array_ops.concat([samples_shape,
self.event_shape_tensor()], 0))
ret.set_shape(
tensor_shape.TensorShape(static_samples_shape).concatenate(
self.event_shape))
return ret
def entropy_lower_bound(self, name="entropy_lower_bound"):
r"""A lower bound on the entropy of this mixture model.
The bound below is not always very tight, and its usefulness depends
on the mixture probabilities and the components in use.
A lower bound is useful for ELBO when the `Mixture` is the variational
distribution:
\\(
\log p(x) >= ELBO = \int q(z) \log p(x, z) dz + H[q]
\\)
where \\( p \\) is the prior distribution, \\( q \\) is the variational,
and \\( H[q] \\) is the entropy of \\( q \\). If there is a lower bound
\\( G[q] \\) such that \\( H[q] \geq G[q] \\) then it can be used in
place of \\( H[q] \\).
For a mixture of distributions \\( q(Z) = \sum_i c_i q_i(Z) \\) with
\\( \sum_i c_i = 1 \\), by the concavity of \\( f(x) = -x \log x \\), a
simple lower bound is:
\\(
\begin{align}
H[q] & = - \int q(z) \log q(z) dz \\\
& = - \int (\sum_i c_i q_i(z)) \log(\sum_i c_i q_i(z)) dz \\\
& \geq - \sum_i c_i \int q_i(z) \log q_i(z) dz \\\
& = \sum_i c_i H[q_i]
\end{align}
\\)
This is the term we calculate below for \\( G[q] \\).
Args:
name: A name for this operation (optional).
Returns:
A lower bound on the Mixture's entropy.
"""
with self._name_scope(name, values=[self.cat.logits]):
with ops.control_dependencies(self._assertions):
distribution_entropies = [d.entropy() for d in self.components]
cat_probs = self._cat_probs(log_probs=False)
partial_entropies = [
c_p * m for (c_p, m) in zip(cat_probs, distribution_entropies)
]
# These are all the same shape by virtue of matching batch_shape
return math_ops.add_n(partial_entropies)
def _cat_probs(self, log_probs):
"""Get a list of num_components batchwise probabilities."""
which_softmax = nn_ops.log_softmax if log_probs else nn_ops.softmax
cat_probs = which_softmax(self.cat.logits)
cat_probs = array_ops.unstack(cat_probs, num=self.num_components, axis=-1)
return cat_probs
| apache-2.0 |
zhoukekestar/drafts | 2020-10~12/2020-12-13-python/k-means.py | 1 | 2941 | import matplotlib.pyplot as plt
import numpy as np
import random
pointsA = np.random.normal(loc=[40, 60], scale=15, size=[200, 2])
plt.scatter(pointsA.T[0], pointsA.T[1], marker='o', label="A")
pointsB = np.random.normal(loc=[80, 100], scale=12, size=[100, 2])
plt.scatter(pointsB.T[0], pointsB.T[1], marker='^', label="B")
pointsC = np.random.normal(loc=[20, 110], scale=8, size=[30, 2])
plt.scatter(pointsC.T[0], pointsC.T[1], marker='s', label="C")
# 所有的点加在一起
points = list(pointsC) + list(pointsB) + list(pointsA)
KNUM = 3
# 获取所有点的取值范围
minX = 999
maxX = 0
minY = 999
maxY = 0
for p in points:
minX = int(min(minX, p[0]))
maxX = int(max(maxX, p[0]))
minY = int(min(minY, p[1]))
maxY = int(max(maxY, p[1]))
print(minX, maxX, minY, maxY)
# 随机初始化 K 个点
pointsK = []
for num in range(0, KNUM):
pointsK.append([random.randrange(minX, maxX), random.randrange(minY, maxY)]);
pointsK = np.array(pointsK)
plt.scatter(pointsK.T[0], pointsK.T[1], marker='+', label="K")
# 两点间距离
def pointsLength(a, b):
return (a[0] - b[0]) * (a[0] - b[0]) + (a[1] - b[1]) * (a[1] - b[1])
# 分配节点到 K 个中心队列中
def gotoK(points, pointsK):
# 初始化 K 个队列
pointsArr = []
for k in range(len(pointsK)):
pointsArr.append([])
for p in points:
minLen = 999
minKIndex = 0
for i, k in enumerate(pointsK):
if (pointsLength(p, k) < minLen):
minLen = pointsLength(p, k)
minKIndex = i
pointsArr[minKIndex].append(p)
# for k in range(len(pointsK)):
# print(k, len(pointsArr[k]))
newPointsK = []
for k in range(len(pointsK)):
sumx = 0
sumy = 0
for p in pointsArr[k]:
sumx += p[0]
sumy += p[1]
if (len(pointsArr[k]) == 0):
newPointsK.append([pointsK[k][0], pointsK[k][1]])
else:
newPointsK.append([sumx / len(pointsArr[k]), sumy / len(pointsArr[k])])
return newPointsK
plt.show()
def drawABC():
# allp = np.array(points)
# plt.scatter(allp.T[0], allp.T[1], marker='o', label="A")
plt.scatter(pointsA.T[0], pointsA.T[1], marker='o', label="A")
plt.scatter(pointsB.T[0], pointsB.T[1], marker='^', label="B")
plt.scatter(pointsC.T[0], pointsC.T[1], marker='s', label="C")
for i in range(15):
print(i)
drawABC()
newPointsK = np.array(gotoK(points, pointsK))
flag = 1
# 所有的都相等的时候,提示完成
for i, p in enumerate(pointsK):
if (newPointsK[i][0] != p[0] or newPointsK[i][1] != p[1]):
flag = 0
break
if (flag == 1):
print('finished')
else:
pointsK = newPointsK
plt.scatter(newPointsK.T[0], newPointsK.T[1], marker = '+', label = 'k')
plt.show()
if (flag == 1):
break
# print(type(list(pointsC)))
| mit |
pombredanne/metamorphosys-desktop | metamorphosys/META/models/DynamicsTeam/RISoT/post_processing/common/post_processing_class.py | 18 | 28308 | # Copyright (C) 2013-2015 MetaMorph Software, Inc
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this data, including any software or models in source or binary
# form, as well as any drawings, specifications, and documentation
# (collectively "the Data"), to deal in the Data without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Data, and to
# permit persons to whom the Data is furnished to do so, subject to the
# following conditions:
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Data.
# THE DATA IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS, SPONSORS, DEVELOPERS, CONTRIBUTORS, OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE DATA OR THE USE OR OTHER DEALINGS IN THE DATA.
# =======================
# This version of the META tools is a fork of an original version produced
# by Vanderbilt University's Institute for Software Integrated Systems (ISIS).
# Their license statement:
# Copyright (C) 2011-2014 Vanderbilt University
# Developed with the sponsorship of the Defense Advanced Research Projects
# Agency (DARPA) and delivered to the U.S. Government with Unlimited Rights
# as defined in DFARS 252.227-7013.
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this data, including any software or models in source or binary
# form, as well as any drawings, specifications, and documentation
# (collectively "the Data"), to deal in the Data without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Data, and to
# permit persons to whom the Data is furnished to do so, subject to the
# following conditions:
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Data.
# THE DATA IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS, SPONSORS, DEVELOPERS, CONTRIBUTORS, OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE DATA OR THE USE OR OTHER DEALINGS IN THE DATA.
import os
import json
import sys
import re
import numpy as np
from py_modelica.mat_file_functions.mat_file_to_dict import MatFile2Dict
import matplotlib.pyplot as plt
# Rescue if the limit-checking should get stuck in an infinite while-loop.
# Which should be impossible to start with, but if I am wrong...
MAX_ITERATIONS = 100000
class PostProcess:
filter = [] # list of all variables/parameter to load from mat-file
# (does not need to include 'time' - loaded by default)
time = None
result = None
def __init__(self, mat_file='', filter=None):
"""
Loads in mat-file, extracts given variables in filter (time always included)
and converts lists of values into numpy arrays.
These are stored in result as:
{{name1: array([values1])}, ..., {nameN: array([valuesN])}}
"""
mat_converter = MatFile2Dict(mat_file, filter, False)
result_lists = mat_converter.get_results()
# convert lists into numpy arrays
self.result = {}
for item in result_lists.iteritems():
self.result.update({item[0]: np.array(item[1])})
self.time = self.result['time']
def data_array(self, name):
"""
Get time-series in numpy array format.
name - name of variable
e.g. data_array('time')
returns with the time.
"""
return self.result[name]
def print_data(self, name):
"""
Prints the time-series.
name - name of variable
e.g. data_array('time')
returns with the time.
"""
data = self.data_array(name)
print 'name of data: '
print name
print 'here is the data: (with index)'
print '[',
for i in xrange(data.size - 1):
print str(i) + ':', str(data[i]) + ',',
print str(i + 1) + ':', str(data[i + 1]) + ']'
return data
def save_as_svg(self, name, metric_value, metric_name='metric_name', formula='', unit=''):
metric_array = np.ones(len(self.time)) * metric_value
plt.plot(self.time, self.data_array(name))
plt.plot(self.time, metric_array)
plt.plot()
plt.title('{0}\n{1}'.format(metric_name, formula))
plt.xlabel('time\n[s]')
if unit:
plt.ylabel('{0}\n[{1}]'.format(name, unit))
else:
plt.ylabel(name)
if not os.path.isdir('plots'):
os.mkdir('plots')
plot_path = os.path.join('plots', '{0}.svg'.format(metric_name))
plt.savefig(plot_path)
plt.close()
with open('testbench_manifest.json', 'r') as f_in:
sum_rep_json = json.load(f_in)
sum_rep_json['Artifacts'].append(plot_path.replace(os.path.sep, '/'))
with open('testbench_manifest.json', 'wb') as f_out:
json.dump(sum_rep_json, f_out, indent=4)
return plot_path
def time_array(self):
"""
Get time-series of time in numpy array format.
"""
return self.time
def print_time(self):
"""
Prints and returns with time-series of time.
"""
time = self.time
print 'here are time intervals:', time
return time
def short_array(self, name, start=0, end=-1):
"""
Get a truncated, from n1 to n2 array for variable name
name - name of variable
start - start index of interval
end - end index of interval
N.B index goes from 0 to len(array)-1
"""
return self.result[name][start:end]
def plot(self, name):
"""
Returns a tuple, suitable for plotting, of the variable's time-series together with time.
name - name of variable
"""
return self.data_array(name), self.time
def get_data_by_time(self, name, time_val):
"""
Get data based on time value.
name - name of variable to consider
time_val - time point where to extract the value
Returns the data and the index of the data
"""
i = 0
time = self.time
while time[i] < time_val and i in xrange(time.size - 1):
i += 1
data_arr = self.data_array(name)
if time[i - 1] != time_val:
cur = data_arr[i - 1]
next = data_arr[i]
data = time[i - 1] / ((time[i - 1] + time[i]) / 2) * (next - cur) + cur
else:
data = data_arr[i - 1]
return data, i
def get_data_by_index(self, name, index):
return self.data_array(name)[index]
def get_index_from_time(self, time_val):
"""
Get index based on time value.
time_val - time point where to extract the value
Returns index nearest to time_val
"""
i = 0
time = self.time
while time[i] < time_val and i in xrange(time.size-1):
i += 1
return i
def get_time(self, name, value, atol=1e-4, rtol=1e-4, start_index=0, end_index=-1):
"""
Gets the first time point where the variable satisfies either atol or rtol,
if no such point exists - returns with -1.
name - name of variable
atol - absolute tolerance
rtol - relative tolerance
"""
index = -1
# N.B. this is only one of many ways to do this
denominator = 1
if value > rtol:
denominator = value
data = self.data_array(name)[start_index:end_index]
cnt = 0
for x in data:
abs_diff = abs(x - value)
rel_diff = abs_diff / denominator
if abs_diff < atol or rel_diff < rtol:
index = cnt
break
else:
cnt += 1
if index >= 0:
return self.time[start_index + index]
return -1
def last_value(self, name):
"""
Get last value of variable
name - name of variable
"""
return self.data_array(name)[-1]
def global_max(self, name):
"""
Get maximum value of variable
name - name of variable
"""
return self.data_array(name).max()
def global_max_time(self, name):
"""
Get time where max occurs
name - name of variable
returns the time at where the max is
"""
index = self.data_array(name).argmax()
time_at_max = self.time[index]
return time_at_max
def global_min(self, name):
"""
Get minimum value of variable
name - name of variable
"""
return self.data_array(name).min()
def global_min_time(self, name):
"""
Get time where min occurs
name - name of variable
returns the time at where the min is
"""
index = self.data_array(name).argmin()
time_at_min = self.time[index]
return time_at_min
def global_abs_max(self, name):
"""
Get the maximum absolute value of variable
name - name of variable
"""
return np.absolute(self.data_array(name)).max()
def std_dev(self, name):
"""
Returns the standard deviation of variable
name - name of variable
"""
stddev = self.data_array(name).std()
return stddev
def variance(self, name):
"""
Returns the variance of variable
name - name of variable
"""
variance = self.data_array(name).var()
return variance
def sum_value(self, name):
"""
Returns the sum of the time-series for the variable
name - name of variable
"""
result = self.data_array(name).sum()
return result
def mean(self, name):
"""
Returns the mean of the time-series for the variable
name - name of variable
"""
result = np.mean(self.data_array(name), dtype=np.float64)
return result
def integrate(self, name):
"""
Returns the area under the curve of the time-series for the variable
name - name of variable
"""
time = self.time
data = self.data_array(name)
sum = 0
next = data[0]
next_t = time[0]
for i in xrange(data.size):
cur = next
next = data[i]
cur_t = next_t
next_t = time[i]
height = (next + cur) / 2
interval = next_t - cur_t
sum += height * interval
return sum
def minima(self, name):
"""
Returns the minima of time-series of variable
name - name of variable
"""
data = self.data_array(name)
min = []
prev = 0
cur = 0
next = data[0]
for i in xrange(data.size):
if cur < prev and cur <= next:
min.append(cur)
prev = cur
cur = next
next = data[++i]
minimum = np.array(min)
return minimum
def maxima(self, name):
"""
Returns the maxima of time-series of variable
name - name of variable
"""
data = self.data_array(name)
max = []
prev = 0
cur = 0
next = data[0]
for i in xrange(data.size):
if cur >= prev and cur > next:
max.append(cur)
prev = cur
cur = next
next = data[++i]
maximum = np.array(max)
return maximum
def pos_neg(self, name, tol=0.00000015):
"""
Returns time of the roots from positive to negative of time-series of variable
name - name of variable
tol - tolerance
"""
data = self.data_array(name)
time_arr = self.time
time = []
next = -1
for i in xrange(data.size):
cur = next
next = data[i]
if cur > 0 + tol and next <= 0 + tol:
if cur != 0:
cur_t = time_arr[i - 1]
next_t = time_arr[i]
time.append((cur / (cur + next) / 2) * (next_t - cur_t) + cur_t)
else:
time.append(time_arr[i - 1])
timing = np.array(time)
return timing
def neg_pos(self, name, tol=0.00000015):
"""
Returns time of the roots from negative to positive of time-series of variable
name - name of variable
tol - tolerance
"""
time = []
data = self.data_array(name)
time_arr = self.time
next = 1
for i in xrange(data.size):
cur = next
next = data[i]
if cur <= 0 + tol and next > 0 + tol:
if cur != 0:
cur_t = time_arr[i - 1]
next_t = time_arr[i]
time.append(cur / ((cur + next) / 2) * (next_t - cur_t) + cur_t)
else:
time.append(time_arr[i - 1])
timing = np.array(time)
return timing
def to_zero(self, name, value_index):
"""
# time from a number to zero
# (use index from print_data() function)
# parameters: data array, time array, index of value
# returns the time of the zero
"""
data = self.data_array(name)
time_arr = self.time
i = value_index + 1
cur = data[value_index]
next = data[i]
tolerance = 0.00000015
if data[value_index] >= 0:
while next >= 0 + tolerance and i in xrange(data.size - 1):
i += 1
cur = next
next = data[i]
if next >= 0 + tolerance:
return -1
else:
while next <= 0 + tolerance and i in xrange(data.size - 1):
i += 1
cur = next
next = data[i]
if next <= 0 + tolerance:
return -1
if cur != 0:
cur_t = time_arr[i - 1]
next_t = time_arr[i]
time = cur / ((cur + next) / 2) * (next_t - cur_t) + cur_t
else:
time = time_arr[i - 1]
return time
def from_zero(self, name, value_index):
"""
# time from a number to zero
# (use index from print_data() function)
# parameters: data array, time array, index of value
# returns the time of the zero
"""
data = self.data_array(name)
time_arr = self.time
i = value_index - 1
cur = data[value_index]
next = data[i]
tolerance = 0.00000015
if data[value_index - 1] >= 0:
while next >= 0 + tolerance and i in xrange(data.size):
i -= 1
cur = next
next = data[i]
if next >= 0 + tolerance:
return -1
else:
while next <= 0 + tolerance and i in xrange(data.size):
i -= 1
cur = next
next = data[i]
if next <= 0 + tolerance:
return -1
if cur != 0:
cur_t = time_arr[i + 1]
next_t = time_arr[i]
time = cur / ((cur + next) / 2) * (next_t - cur_t) + cur_t
else:
time = time_arr[i + 1]
return time
def zeros(self, name):
"""
Find zeros of time-series for variable
name - name of variable
returns the time of the zero
"""
data_array = self.data_array(name)
time = self.time
data = [[], []]
data[0].append(self.pos_neg(data_array, time))
data[1].append(self.neg_pos(data_array, time))
data_arr = np.array(data)
return data_arr
def compare(self, name1, name2):
"""
Compare the time-series of two variables
name1 - name of variable 1
name2 - name of variable 2
returns true if the results are identical
"""
data1 = self.data_array(name1)
data2 = self.data_array(name2)
for i in xrange(data1.size):
if data1[i] != data2[i]:
return False
return True
def time_total(self, val1, val2):
# finding the difference between 2 times
time = abs(val2 - val1)
return time
def delta_t(self, start_index, end_index):
"""
Returns the length of the time-interval between to indices
"""
t1 = self.time[start_index]
t2 = self.time[end_index]
dt = t2 - t1
return dt
def get_local_max(self, name, start_index, end_index):
"""
Returns the value of the maximum between two indices
N.B. including both points
:param name:
:param start_index:
:param end_index:
"""
if end_index == -1:
maximum = self.data_array(name)[start_index:].max()
else:
maximum = self.data_array(name)[start_index:end_index + 1].max()
return maximum
def get_local_min(self, name, start_index, end_index):
"""
Returns the value of the minimum between two indices
N.B. including both points
"""
if end_index == -1:
minimum = self.data_array(name)[start_index:].min()
else:
minimum = self.data_array(name)[start_index:end_index + 1].min()
return minimum
def find_first_max_violation(self, name, value, start_index=0):
"""
Starting from start_index it looks for the first index where the
time-series has a value greater than value.
If it never occurs, it returns -1
"""
time_series = self.data_array(name)[start_index:]
n = len(time_series)
for i in range(n):
if time_series[i] > value:
return i + start_index
return -1
def find_first_min_violation(self, name, value, start_index=0):
"""
Starting from start_index it looks for the first index where the
time-series has a value less than value.
If it never occurs, it returns -1
"""
time_series = self.data_array(name)[start_index:]
n = len(time_series)
for i in range(n):
if time_series[i] < value:
return i + start_index
return -1
def check_max_limit(self, name, value):
actual_value = ''
limit_exceeded = False
start_index = 0
global_max = -np.Inf
cnt = 0
print 'check_max_limit'
while start_index > -1:
index = self.find_first_max_violation(name, value, start_index)
if index > -1:
end_index = self.find_first_min_violation(name, value, index)
d_t = self.delta_t(index, end_index)
print 'Found violation at t={0} lasting : {1}'.format(self.time[index], d_t)
if d_t > 0.5:
limit_exceeded = True
local_max = self.get_local_max(name, index, end_index)
print 'Local maximum : {0}'.format(local_max)
if local_max > global_max:
global_max = local_max
start_index = end_index
else:
break
cnt += 1
if cnt == MAX_ITERATIONS:
print 'Limit checking for variable {0} aborted after {1} iterations' \
.format(name, MAX_ITERATIONS)
sys.exit(1)
if limit_exceeded:
actual_value = global_max
return limit_exceeded, actual_value
def check_min_limit(self, name, value):
actual_value = ''
limit_exceeded = False
start_index = 0
global_min = np.Inf
cnt = 0
print 'check_min_limit'
while start_index > -1:
index = self.find_first_min_violation(name, value, start_index)
if index > -1:
end_index = self.find_first_max_violation(name, value, index)
d_t = self.delta_t(index, end_index)
print 'Found violation at t={0} lasting : {1} s'.format(self.time[index], d_t)
if d_t > 0.5:
limit_exceeded = True
local_min = self.get_local_min(name, index, end_index)
print 'Local minimum : {0}'.format(local_min)
if local_min < global_min:
global_min = local_min
start_index = end_index
else:
break
cnt += 1
if cnt == MAX_ITERATIONS:
print 'Limit checking for variable {0} aborted after {1} iterations' \
.format(name, MAX_ITERATIONS)
sys.exit(1)
if limit_exceeded:
actual_value = global_min
return limit_exceeded, actual_value
def update_metrics_in_report_json(metrics, report_file='testbench_manifest.json'):
"""
Metrics should be of the form
:param metrics:
:param report_file:
{'name_of_metric' : {value: (int) or (float), unit: ""}, ...}
"""
if not os.path.exists(report_file):
raise IOError('Report file does not exits : {0}'.format(report_file))
# read current summary report, which contains the metrics
with open(report_file, 'r') as file_in:
result_json = json.load(file_in)
assert isinstance(result_json, dict)
if 'Metrics' in result_json:
for metric in result_json['Metrics']:
if 'Name' in metric and 'Value' in metric:
if metric['Name'] in metrics.keys():
new_value = metrics[metric['Name']]['value']
new_unit = metrics[metric['Name']]['unit']
if new_unit is not None:
metric['Unit'] = new_unit
if new_value is not None:
metric['Value'] = str(new_value)
else:
pass
else:
print 'Metric item : {0} does not have right format'.format(metric)
pass
# update json file with the new values
with open(report_file, 'wb') as file_out:
json.dump(result_json, file_out, indent=4)
else:
print 'Report file {0} does not have any Metrics defined..'
pass
def read_limits():
"""
Reads in limits and modifies the ModelicaUri to the correct one.
Returns:
- the updated limit_dict
- the filter as a list
"""
with open('limits.json', 'r') as f_in:
limit_dict = json.load(f_in)
# use set to avoid checking for duplicates
filter = set()
for limit_item in limit_dict['LimitChecks']:
# drop first part of VariableFullPath update the limit_item
# once the limit.json is generated correctly these two lines can be dropped
# modelica_uri = '.'.join(.split('.')[1:])
# modelica_model_rel_uri = limit_item['VariableName']
# split_full_path = limit_item['LimitFullPath'].split('/')
# modelica_model = split_full_path[-2]
# cyphy_relative_uri = '{0}.{1}'.format(modelica_model, modelica_model_rel_uri)
# modelica_uri = modelica_uri.replace(modelica_model_rel_uri, cyphy_relative_uri)
# limit_item['VariableFullPath'] = modelica_uri
# limit_item['ComponentInstanceName'] = split_full_path[-3]
# filter out this variable in the .mat-file
filter.add(limit_item['VariableFullPath'])
# Code specific for FANG-I, with no defined VariableName from GME
# limit_var_name = limit_item['VariableName']
# limit_var_name = re.sub('\.u(.*)$', '', limit_item['VariableFullPath'])
# limit_var_name_split = limit_var_name.split('.')
# limit_var_name = limit_var_name_split[len(limit_var_name_split)-3] + '=>' + \
# limit_var_name_split[len(limit_var_name_split)-1]
# limit_item['LimitName'] = limit_var_name
filter = list(filter)
print "Variables for limit-checking : {0}".format(filter)
return limit_dict, filter
def check_limits_and_add_to_report_json(pp, limit_dict):
"""
Check the limits and write out dictionary to testbench_manifest.json
"""
assert isinstance(pp, PostProcess)
for limit_item in limit_dict['LimitChecks']:
modelica_uri = limit_item['VariableFullPath']
limit_value = limit_item['Value']
limit_type = limit_item['Type']
print "--== {0} ==--".format(modelica_uri)
print "Type of Limit : {0}".format(limit_type)
print "Limit : {0} ".format(limit_value)
if limit_type == 'min':
limit_exceeded, actual_value = pp.check_min_limit(modelica_uri, limit_value)
limit_item['LimitExceeded'] = limit_exceeded
limit_item['ActualValue'] = str(actual_value)
elif limit_type == 'max':
limit_exceeded, actual_value = pp.check_max_limit(modelica_uri, limit_value)
limit_item['LimitExceeded'] = limit_exceeded
limit_item['ActualValue'] = str(actual_value)
else:
limit_exceeded_max, actual_max_value = pp.check_max_limit(modelica_uri, limit_value)
limit_exceeded_min, actual_min_value = pp.check_min_limit(modelica_uri, -limit_value)
# determine the actual value depending on which limits were exceeded
if limit_exceeded_max and limit_exceeded_min:
if actual_max_value > abs(actual_min_value):
actual_value = str(actual_max_value)
else:
actual_value = str(abs(actual_min_value))
elif limit_exceeded_max:
actual_value = str(actual_max_value)
elif limit_exceeded_min:
actual_value = str(abs(actual_min_value))
else:
actual_value = ''
limit_item['LimitExceeded'] = limit_exceeded_max or limit_exceeded_min
limit_item['ActualValue'] = actual_value
limit_item['Value'] = str(limit_value)
print "Violation : {0}".format(limit_item["LimitExceeded"])
with open('testbench_manifest.json', 'r') as f_in:
sum_rep_json = json.load(f_in)
sum_rep_json['LimitChecks'] = limit_dict['LimitChecks']
with open('testbench_manifest.json', 'wb') as f_out:
json.dump(sum_rep_json, f_out, indent=4)
print "Limits updated"
| mit |
with-git/tensorflow | tensorflow/python/estimator/inputs/queues/feeding_queue_runner_test.py | 116 | 5164 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests `FeedingQueueRunner` using arrays and `DataFrames`."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.client import session
from tensorflow.python.estimator.inputs.queues import feeding_functions as ff
from tensorflow.python.framework import ops
from tensorflow.python.platform import test
from tensorflow.python.training import coordinator
from tensorflow.python.training import queue_runner_impl
try:
# pylint: disable=g-import-not-at-top
import pandas as pd
HAS_PANDAS = True
except IOError:
# Pandas writes a temporary file during import. If it fails, don't use pandas.
HAS_PANDAS = False
except ImportError:
HAS_PANDAS = False
def get_rows(array, row_indices):
rows = [array[i] for i in row_indices]
return np.vstack(rows)
class FeedingQueueRunnerTestCase(test.TestCase):
"""Tests for `FeedingQueueRunner`."""
def testArrayFeeding(self):
with ops.Graph().as_default():
array = np.arange(32).reshape([16, 2])
q = ff._enqueue_data(array, capacity=100)
batch_size = 3
dq_op = q.dequeue_many(batch_size)
with session.Session() as sess:
coord = coordinator.Coordinator()
threads = queue_runner_impl.start_queue_runners(sess=sess, coord=coord)
for i in range(100):
indices = [
j % array.shape[0]
for j in range(batch_size * i, batch_size * (i + 1))
]
expected_dq = get_rows(array, indices)
dq = sess.run(dq_op)
np.testing.assert_array_equal(indices, dq[0])
np.testing.assert_array_equal(expected_dq, dq[1])
coord.request_stop()
coord.join(threads)
def testArrayFeedingMultiThread(self):
with ops.Graph().as_default():
array = np.arange(256).reshape([128, 2])
q = ff._enqueue_data(array, capacity=128, num_threads=8, shuffle=True)
batch_size = 3
dq_op = q.dequeue_many(batch_size)
with session.Session() as sess:
coord = coordinator.Coordinator()
threads = queue_runner_impl.start_queue_runners(sess=sess, coord=coord)
for _ in range(100):
dq = sess.run(dq_op)
indices = dq[0]
expected_dq = get_rows(array, indices)
np.testing.assert_array_equal(expected_dq, dq[1])
coord.request_stop()
coord.join(threads)
def testPandasFeeding(self):
if not HAS_PANDAS:
return
with ops.Graph().as_default():
array1 = np.arange(32)
array2 = np.arange(32, 64)
df = pd.DataFrame({"a": array1, "b": array2}, index=np.arange(64, 96))
q = ff._enqueue_data(df, capacity=100)
batch_size = 5
dq_op = q.dequeue_many(5)
with session.Session() as sess:
coord = coordinator.Coordinator()
threads = queue_runner_impl.start_queue_runners(sess=sess, coord=coord)
for i in range(100):
indices = [
j % array1.shape[0]
for j in range(batch_size * i, batch_size * (i + 1))
]
expected_df_indices = df.index[indices]
expected_rows = df.iloc[indices]
dq = sess.run(dq_op)
np.testing.assert_array_equal(expected_df_indices, dq[0])
for col_num, col in enumerate(df.columns):
np.testing.assert_array_equal(expected_rows[col].values,
dq[col_num + 1])
coord.request_stop()
coord.join(threads)
def testPandasFeedingMultiThread(self):
if not HAS_PANDAS:
return
with ops.Graph().as_default():
array1 = np.arange(128, 256)
array2 = 2 * array1
df = pd.DataFrame({"a": array1, "b": array2}, index=np.arange(128))
q = ff._enqueue_data(df, capacity=128, num_threads=8, shuffle=True)
batch_size = 5
dq_op = q.dequeue_many(batch_size)
with session.Session() as sess:
coord = coordinator.Coordinator()
threads = queue_runner_impl.start_queue_runners(sess=sess, coord=coord)
for _ in range(100):
dq = sess.run(dq_op)
indices = dq[0]
expected_rows = df.iloc[indices]
for col_num, col in enumerate(df.columns):
np.testing.assert_array_equal(expected_rows[col].values,
dq[col_num + 1])
coord.request_stop()
coord.join(threads)
if __name__ == "__main__":
test.main()
| apache-2.0 |
luminescence/PolyLibScan | helpers/msms.py | 1 | 13918 | import pathlib2 as pl
import os
import subprocess as sp
import tempfile
import numpy as np
import pandas as pd
import collections as col
import Bio.PDB as PDB
import PolyLibScan.Database.db as DB
class HydrophobicParameterisation(object):
def __init__(self, pdb_path,
pdb_to_xyzrn=None, atmtypenumbers=None, msms_bin=None, verbose=False):
self.own_path = pl.Path(__file__).parent
self.path = self.set_paths(pdb_to_xyzrn, atmtypenumbers, msms_bin)
self.pdb_path = pdb_path
self.residues = {}
self.phob_resis = {}
self.phil_resis = {}
self.correlation_parameter = 0.02
self.parser = PDB.PDBParser()
self.struc = self.parser.get_structure('protein', self.pdb_path.as_posix())
@property
def pdb_path(self):
return self._pdb_path
@pdb_path.setter
def pdb_path(self, value):
'''Setter makes sure that the path is a PosixPath object
'''
if isinstance(value, basestring):
self._pdb_path = pl2.Path(value)
else:
self._pdb_path = value
self.check_pdb_path()
def check_pdb_path(self):
if not self.pdb_path.exists():
raise IOError('pdb file could not be found.')
def set_paths(self, pdb_to_xyzrn, atmtypenumbers, msms_bin):
paths = {}
if atmtypenumbers and pl.Path(atmtypenumbers).exists():
paths['cmd_dir'] = pl.Path(atmtypenumbers).parent.absolute().resolve()
else:
paths['cmd_dir'] = self.own_path.joinpath('../external_bins/msms').absolute().resolve()
if pdb_to_xyzrn and pl.Path(pdb_to_xyzrn).exists():
paths['pdb_to_xyzrn'] = pl.Path(pdb_to_xyzrn)
else:
paths['pdb_to_xyzrn'] = self.own_path.joinpath('../external_bins/msms/pdb_to_xyzrn').absolute().resolve()
if msms_bin and pl.Path(msms_bin).exists():
paths['msms_bin'] = pl.Path(msms_bin)
else:
paths['msms_bin'] = self.own_path.joinpath('../external_bins/msms/msms.x86_64Linux2.2.6.1.staticgcc').absolute().resolve()
return paths
def msms_surface(self, pdb_file=None, verbose=False):
if pdb_file:
path = pdb_file
else:
path = self.pdb_path
input_file = pl.Path(path).absolute().resolve().as_posix()
msms_folder = tempfile.mkdtemp()
xyz_file = tempfile.mktemp(dir=msms_folder)
with open(xyz_file, 'w') as f:
xyz_run = sp.Popen('%s %s' % (self.path['pdb_to_xyzrn'].as_posix(), input_file),
cwd=self.path['cmd_dir'].as_posix(),
shell=True, stdin=None, stdout=f, stderr=sp.PIPE)
xyz_out = xyz_run.communicate()
if xyz_out[1] != '':
raise RuntimeError('pdb_to_xyz:\n %s' % xyz_out[1])
file_path = os.path.join(msms_folder,'msms')
msms = sp.Popen('%s -probe_radius 1.5 -if %s -of %s -af %s' % (self.path['msms_bin'],
xyz_file, file_path, file_path),
shell=True, stdin=None, stdout=sp.PIPE, stderr=sp.PIPE)
msms_out = msms.communicate()
if verbose:
print out[0]
if msms_out[1] != '':
raise RuntimeError('msms:\n %s' % xyz_out[1])
vertices_file = pl.Path(file_path + '.vert')
if not vertices_file.exists():
raise IOError('%s does not exist.' % vertices_file)
face_file = pl.Path(file_path + '.face')
if not face_file.exists():
raise IOError('%s does not exist.' % face_file)
area_file = pl.Path(file_path + '.area')
if not area_file.exists():
raise IOError('%s does not exist.' % area_file)
self.path['msms_vertex'] = vertices_file
self.path['msms_face'] = face_file
self.path['msms_area'] = area_file
return vertices_file, face_file, area_file
def area(self, path=None):
'''Parse msms-area file and return a numpy array.
'''
if path:
area_file = pl.Path(path)
else:
area_file = self.path['msms_area']
with open(area_file.as_posix()) as f:
content = f.read().split('\n')[1:-1]
atom_count = len(content)
data = np.zeros(atom_count, dtype=[('count', np.int), ('atom_id', 'S4'), ('resn', 'S3'),
('chain', 'S1'), ('resi', np.int), ('iCode', 'S1'),
('ses', np.float), ('sas', np.float)])
for i, line in enumerate(content):
cnt, ses, sas, id_string = line.split()
a_id, resn, chain, resi = id_string.split('_')
try:
res_id = int(resi)
icode = ' '
except ValueError:
res_id = int(resi[:-1])
icode = resi[-1]
data[i] = (int(cnt), a_id, resn, chain, res_id, icode, float(ses), float(sas))
return data
def faces(self, path=None):
if path:
face_file = pl.Path(path)
else:
face_file = self.path['msms_face']
with open(face_file.as_posix()) as f:
# discard first two lines
f.next()
f.next()
n_faces, n_spheres, dens, probe_r = f.next().split()
data = np.zeros(int(n_faces), dtype=[('vertices', np.int,3), ('type', np.int8),
('f_no', np.int)])
for i,face in enumerate(f):
face_data = map(int, face.strip().split())
data[i] = (tuple(face_data[:3]), face_data[3], face_data[4])
return data
def vertices(self, path=None):
if path:
vertex_file = pl.Path(path)
else:
vertex_file = self.path['msms_vertex']
with open(vertex_file.as_posix()) as f:
# discard first two lines
f.next()
f.next()
n_vertex, n_spheres, dens, probe_r = f.next().split()
data = np.zeros(int(n_vertex), dtype=[('xyz', np.float, 3), ('normal', np.float, 3),
('face_id', np.int), ('sph_idx', np.int), ('type', np.int8),
('atom', 'S4'), ('resn', 'S3'), ('chain', 'S1'), ('resi', np.int),
('icode', 'S1')])
for i,line in enumerate(f):
vertex = line.split()
try:
coords = tuple(map(float, vertex[:3]))
except:
print vertex[:3]
print i
break
normal = tuple(map(float, vertex[3:6]))
info = tuple(map(int, vertex[6:9]))
res_str = vertex[9].split('_')
try:
res_id = int(res_str[3])
icode = ' '
except ValueError:
res_id = int(res_str[3][:-1])
icode = res_str[3][-1]
res_info = res_str[0], res_str[1], res_str[2], res_id, icode
data[i] = (coords, normal) + info + res_info
return data
def get_residue_hydro_levels(self, parameters_file=None):
'''Read in the clogP values of all residues from the
parameter file and sort all members into sets hydrophobic
and hydrophilic particles.
'''
if parameters_file and pl.Path(parameters_file).exists():
path = pl.Path(parameters_file)
else:
path = self.own_path.joinpath('../parameters/clogP.h5')
store = pd.HDFStore(path.as_posix())
hy = store['clogP']
store.close()
hydrophobic = hy[hy>0]
hydrophilic = hy[hy<0]
self.hydrophobic = set(hydrophobic.index)
self.hydrophilic = set(hydrophilic.index)
def create_residues(self, parent, surface_resis):
for res_id in surface_resis:
self.residues[res_id] = Residue(parent, res_id)
if res_id[0] in self.hydrophilic:
self.phil_resis[res_id] = self.residues[res_id]
else:
self.phob_resis[res_id] = self.residues[res_id]
def add_area(self, areas):
'''add area information to residue and
delete the residue, if it has an area of 0.0.
'''
for res_id, area in areas.items():
if area > 0.0:
self.residues[res_id].area = area
else:
del self.residues[res_id]
if res_id in self.phob_resis:
del self.phob_resis[res_id]
else:
del self.phil_resis[res_id]
def add_vertices(self, vertices):
for vert in vertices:
res_id = (vert[-4], vert[-3], vert[-2], vert[-1])
# there are corner cases, where there is no area
# but vertices. In case the residue has no area,
# we discard the vertices.
if res_id in self.residues:
self.residues[res_id].vertices.append(vert[0])
def resi_surface(self, area_data):
res_area = col.defaultdict(float)
for atom in area_data:
res_id = (atom[2], atom[3], atom[4], atom[5])
res_area[res_id] += atom[7]
return res_area
def calculate_distances(self):
for residue in self.phob_resis.values():
residue.distance_to(self.phil_resis)
def add_neighbors(self):
for residue in self.phob_resis.values():
residue.neighboring_resis(self.phob_resis)
def additive_area(self):
for residue in self.phob_resis.values():
residue.additive_area()
def complete(self):
self.get_residue_hydro_levels()
self.msms_surface()
area = self.resi_surface(self.area())
self.create_residues(self, area.keys())
self.add_area(area)
vertices = self.vertices()
self.add_vertices(vertices)
self.calculate_distances()
self.add_neighbors()
self.additive_area()
def to_numpy_array(self, threshold=0.0):
arr_type = [('resname', 'S3'), ('chain', 'S1'), ('id', np.int16),
('iCode', 'S1'), ('singleParameter', np.float16), ('areaParameter', np.float16)]
hydrophobic_array = np.zeros(len(self.phob_resis), dtype=arr_type)
for i,resi in enumerate(self.phob_resis.values()):
hydrophobic_array[i] = (resi.name[0], resi.pdb_id[0], resi.pdb_id[1], resi.pdb_id[2],
resi.hydrophobic_energy_single(), resi.hydrophobic_energy_area())
mask = hydrophobic_array['singleParameter'] > threshold
return hydrophobic_array[mask]
def to_hdf5(self, db_path, table_name, threshold=0.1):
'''Save residue info and parameters to HDF5 Database.
'''
hydrophobic_array = self.to_numpy_array(threshold)
dBase = DB.Database(db_path)
dBase._save_table(hydrophobic_array, '/', table_name)
dBase.close()
def to_dataframe(self, threshold=0.0):
hydrophobic_array = self.to_numpy_array(threshold)
df = pd.Dataframe(data=hydrophobic_array)
return df
class Residue(object):
def __init__(self, parent, name):
self.name = name
self.parent = parent
self.clogP = None
self.min_radius = -1
self.neighbors = set([])
self.area = -1
self.surrounding_area = -1
self.vertices = []
self.distance = -1
self.pdb_id = None
self.set_pdb_id()
def center(self):
return np.array(self.vertices).mean(axis=0)
def distance_to(self, group):
self.distance = self.closest_of(group)
return self.distance
def closest_of(self, group):
min_dist = 1000.0
res_center = self.center()
for residue in group.values():
for vertex in residue.vertices:
dist = np.linalg.norm(res_center - vertex)
if dist < min_dist:
min_dist = dist
return min_dist
def neighboring_resis(self, hphob_resis):
if self.distance < 0:
raise ValueError('calculate distance first!')
center = self.center()
for residue_id, hphob_resi in filter(lambda x:x[0]!=self.name, hphob_resis.items()):
for vert in hphob_resi.vertices:
if np.linalg.norm(vert-center) < self.distance:
self.neighbors.add(hphob_resi)
break
def additive_area(self):
if self.area < 0:
raise ValueError('Set area first.')
self.surrounding_area = self.area
for neighbor in self.neighbors:
self.surrounding_area += neighbor.area
def hydrophobic_energy_single(self):
'''
See: Reynolds1974
'''
return self.parent.correlation_parameter * self.area
def hydrophobic_energy_area(self):
'''
See: Reynolds1974
'''
return self.parent.correlation_parameter * self.surrounding_area
def set_pdb_id(self):
matches = []
for pdb_resi in self.parent.struc[0].get_residues():
pdb_id = self.reduced_id(pdb_resi.get_full_id())
if self.name[1:] == pdb_id:
matches.append(self.reduced_id(pdb_resi.get_full_id()))
if len(matches) !=1:
raise ValueError('Amino Acid %s was not matched to a single pdb residue but %s' % (self.name, matches))
self.pdb_id = matches[0]
@staticmethod
def reduced_id(full_id):
return (full_id[2], full_id[3][1], full_id[3][2])
| mit |
chrisburr/scikit-learn | sklearn/feature_selection/tests/test_feature_select.py | 103 | 22297 | """
Todo: cross-check the F-value with stats model
"""
from __future__ import division
import itertools
import warnings
import numpy as np
from scipy import stats, sparse
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_not_in
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import assert_warns
from sklearn.utils.testing import ignore_warnings
from sklearn.utils.testing import assert_warns_message
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_greater_equal
from sklearn.utils import safe_mask
from sklearn.datasets.samples_generator import (make_classification,
make_regression)
from sklearn.feature_selection import (chi2, f_classif, f_oneway, f_regression,
SelectPercentile, SelectKBest,
SelectFpr, SelectFdr, SelectFwe,
GenericUnivariateSelect)
##############################################################################
# Test the score functions
def test_f_oneway_vs_scipy_stats():
# Test that our f_oneway gives the same result as scipy.stats
rng = np.random.RandomState(0)
X1 = rng.randn(10, 3)
X2 = 1 + rng.randn(10, 3)
f, pv = stats.f_oneway(X1, X2)
f2, pv2 = f_oneway(X1, X2)
assert_true(np.allclose(f, f2))
assert_true(np.allclose(pv, pv2))
def test_f_oneway_ints():
# Smoke test f_oneway on integers: that it does raise casting errors
# with recent numpys
rng = np.random.RandomState(0)
X = rng.randint(10, size=(10, 10))
y = np.arange(10)
fint, pint = f_oneway(X, y)
# test that is gives the same result as with float
f, p = f_oneway(X.astype(np.float), y)
assert_array_almost_equal(f, fint, decimal=4)
assert_array_almost_equal(p, pint, decimal=4)
def test_f_classif():
# Test whether the F test yields meaningful results
# on a simple simulated classification problem
X, y = make_classification(n_samples=200, n_features=20,
n_informative=3, n_redundant=2,
n_repeated=0, n_classes=8,
n_clusters_per_class=1, flip_y=0.0,
class_sep=10, shuffle=False, random_state=0)
F, pv = f_classif(X, y)
F_sparse, pv_sparse = f_classif(sparse.csr_matrix(X), y)
assert_true((F > 0).all())
assert_true((pv > 0).all())
assert_true((pv < 1).all())
assert_true((pv[:5] < 0.05).all())
assert_true((pv[5:] > 1.e-4).all())
assert_array_almost_equal(F_sparse, F)
assert_array_almost_equal(pv_sparse, pv)
def test_f_regression():
# Test whether the F test yields meaningful results
# on a simple simulated regression problem
X, y = make_regression(n_samples=200, n_features=20, n_informative=5,
shuffle=False, random_state=0)
F, pv = f_regression(X, y)
assert_true((F > 0).all())
assert_true((pv > 0).all())
assert_true((pv < 1).all())
assert_true((pv[:5] < 0.05).all())
assert_true((pv[5:] > 1.e-4).all())
# again without centering, compare with sparse
F, pv = f_regression(X, y, center=False)
F_sparse, pv_sparse = f_regression(sparse.csr_matrix(X), y, center=False)
assert_array_almost_equal(F_sparse, F)
assert_array_almost_equal(pv_sparse, pv)
def test_f_regression_input_dtype():
# Test whether f_regression returns the same value
# for any numeric data_type
rng = np.random.RandomState(0)
X = rng.rand(10, 20)
y = np.arange(10).astype(np.int)
F1, pv1 = f_regression(X, y)
F2, pv2 = f_regression(X, y.astype(np.float))
assert_array_almost_equal(F1, F2, 5)
assert_array_almost_equal(pv1, pv2, 5)
def test_f_regression_center():
# Test whether f_regression preserves dof according to 'center' argument
# We use two centered variates so we have a simple relationship between
# F-score with variates centering and F-score without variates centering.
# Create toy example
X = np.arange(-5, 6).reshape(-1, 1) # X has zero mean
n_samples = X.size
Y = np.ones(n_samples)
Y[::2] *= -1.
Y[0] = 0. # have Y mean being null
F1, _ = f_regression(X, Y, center=True)
F2, _ = f_regression(X, Y, center=False)
assert_array_almost_equal(F1 * (n_samples - 1.) / (n_samples - 2.), F2)
assert_almost_equal(F2[0], 0.232558139) # value from statsmodels OLS
def test_f_classif_multi_class():
# Test whether the F test yields meaningful results
# on a simple simulated classification problem
X, y = make_classification(n_samples=200, n_features=20,
n_informative=3, n_redundant=2,
n_repeated=0, n_classes=8,
n_clusters_per_class=1, flip_y=0.0,
class_sep=10, shuffle=False, random_state=0)
F, pv = f_classif(X, y)
assert_true((F > 0).all())
assert_true((pv > 0).all())
assert_true((pv < 1).all())
assert_true((pv[:5] < 0.05).all())
assert_true((pv[5:] > 1.e-4).all())
def test_select_percentile_classif():
# Test whether the relative univariate feature selection
# gets the correct items in a simple classification problem
# with the percentile heuristic
X, y = make_classification(n_samples=200, n_features=20,
n_informative=3, n_redundant=2,
n_repeated=0, n_classes=8,
n_clusters_per_class=1, flip_y=0.0,
class_sep=10, shuffle=False, random_state=0)
univariate_filter = SelectPercentile(f_classif, percentile=25)
X_r = univariate_filter.fit(X, y).transform(X)
X_r2 = GenericUnivariateSelect(f_classif, mode='percentile',
param=25).fit(X, y).transform(X)
assert_array_equal(X_r, X_r2)
support = univariate_filter.get_support()
gtruth = np.zeros(20)
gtruth[:5] = 1
assert_array_equal(support, gtruth)
def test_select_percentile_classif_sparse():
# Test whether the relative univariate feature selection
# gets the correct items in a simple classification problem
# with the percentile heuristic
X, y = make_classification(n_samples=200, n_features=20,
n_informative=3, n_redundant=2,
n_repeated=0, n_classes=8,
n_clusters_per_class=1, flip_y=0.0,
class_sep=10, shuffle=False, random_state=0)
X = sparse.csr_matrix(X)
univariate_filter = SelectPercentile(f_classif, percentile=25)
X_r = univariate_filter.fit(X, y).transform(X)
X_r2 = GenericUnivariateSelect(f_classif, mode='percentile',
param=25).fit(X, y).transform(X)
assert_array_equal(X_r.toarray(), X_r2.toarray())
support = univariate_filter.get_support()
gtruth = np.zeros(20)
gtruth[:5] = 1
assert_array_equal(support, gtruth)
X_r2inv = univariate_filter.inverse_transform(X_r2)
assert_true(sparse.issparse(X_r2inv))
support_mask = safe_mask(X_r2inv, support)
assert_equal(X_r2inv.shape, X.shape)
assert_array_equal(X_r2inv[:, support_mask].toarray(), X_r.toarray())
# Check other columns are empty
assert_equal(X_r2inv.getnnz(), X_r.getnnz())
##############################################################################
# Test univariate selection in classification settings
def test_select_kbest_classif():
# Test whether the relative univariate feature selection
# gets the correct items in a simple classification problem
# with the k best heuristic
X, y = make_classification(n_samples=200, n_features=20,
n_informative=3, n_redundant=2,
n_repeated=0, n_classes=8,
n_clusters_per_class=1, flip_y=0.0,
class_sep=10, shuffle=False, random_state=0)
univariate_filter = SelectKBest(f_classif, k=5)
X_r = univariate_filter.fit(X, y).transform(X)
X_r2 = GenericUnivariateSelect(
f_classif, mode='k_best', param=5).fit(X, y).transform(X)
assert_array_equal(X_r, X_r2)
support = univariate_filter.get_support()
gtruth = np.zeros(20)
gtruth[:5] = 1
assert_array_equal(support, gtruth)
def test_select_kbest_all():
# Test whether k="all" correctly returns all features.
X, y = make_classification(n_samples=20, n_features=10,
shuffle=False, random_state=0)
univariate_filter = SelectKBest(f_classif, k='all')
X_r = univariate_filter.fit(X, y).transform(X)
assert_array_equal(X, X_r)
def test_select_kbest_zero():
# Test whether k=0 correctly returns no features.
X, y = make_classification(n_samples=20, n_features=10,
shuffle=False, random_state=0)
univariate_filter = SelectKBest(f_classif, k=0)
univariate_filter.fit(X, y)
support = univariate_filter.get_support()
gtruth = np.zeros(10, dtype=bool)
assert_array_equal(support, gtruth)
X_selected = assert_warns_message(UserWarning, 'No features were selected',
univariate_filter.transform, X)
assert_equal(X_selected.shape, (20, 0))
def test_select_heuristics_classif():
# Test whether the relative univariate feature selection
# gets the correct items in a simple classification problem
# with the fdr, fwe and fpr heuristics
X, y = make_classification(n_samples=200, n_features=20,
n_informative=3, n_redundant=2,
n_repeated=0, n_classes=8,
n_clusters_per_class=1, flip_y=0.0,
class_sep=10, shuffle=False, random_state=0)
univariate_filter = SelectFwe(f_classif, alpha=0.01)
X_r = univariate_filter.fit(X, y).transform(X)
gtruth = np.zeros(20)
gtruth[:5] = 1
for mode in ['fdr', 'fpr', 'fwe']:
X_r2 = GenericUnivariateSelect(
f_classif, mode=mode, param=0.01).fit(X, y).transform(X)
assert_array_equal(X_r, X_r2)
support = univariate_filter.get_support()
assert_array_almost_equal(support, gtruth)
##############################################################################
# Test univariate selection in regression settings
def assert_best_scores_kept(score_filter):
scores = score_filter.scores_
support = score_filter.get_support()
assert_array_equal(np.sort(scores[support]),
np.sort(scores)[-support.sum():])
def test_select_percentile_regression():
# Test whether the relative univariate feature selection
# gets the correct items in a simple regression problem
# with the percentile heuristic
X, y = make_regression(n_samples=200, n_features=20,
n_informative=5, shuffle=False, random_state=0)
univariate_filter = SelectPercentile(f_regression, percentile=25)
X_r = univariate_filter.fit(X, y).transform(X)
assert_best_scores_kept(univariate_filter)
X_r2 = GenericUnivariateSelect(
f_regression, mode='percentile', param=25).fit(X, y).transform(X)
assert_array_equal(X_r, X_r2)
support = univariate_filter.get_support()
gtruth = np.zeros(20)
gtruth[:5] = 1
assert_array_equal(support, gtruth)
X_2 = X.copy()
X_2[:, np.logical_not(support)] = 0
assert_array_equal(X_2, univariate_filter.inverse_transform(X_r))
# Check inverse_transform respects dtype
assert_array_equal(X_2.astype(bool),
univariate_filter.inverse_transform(X_r.astype(bool)))
def test_select_percentile_regression_full():
# Test whether the relative univariate feature selection
# selects all features when '100%' is asked.
X, y = make_regression(n_samples=200, n_features=20,
n_informative=5, shuffle=False, random_state=0)
univariate_filter = SelectPercentile(f_regression, percentile=100)
X_r = univariate_filter.fit(X, y).transform(X)
assert_best_scores_kept(univariate_filter)
X_r2 = GenericUnivariateSelect(
f_regression, mode='percentile', param=100).fit(X, y).transform(X)
assert_array_equal(X_r, X_r2)
support = univariate_filter.get_support()
gtruth = np.ones(20)
assert_array_equal(support, gtruth)
def test_invalid_percentile():
X, y = make_regression(n_samples=10, n_features=20,
n_informative=2, shuffle=False, random_state=0)
assert_raises(ValueError, SelectPercentile(percentile=-1).fit, X, y)
assert_raises(ValueError, SelectPercentile(percentile=101).fit, X, y)
assert_raises(ValueError, GenericUnivariateSelect(mode='percentile',
param=-1).fit, X, y)
assert_raises(ValueError, GenericUnivariateSelect(mode='percentile',
param=101).fit, X, y)
def test_select_kbest_regression():
# Test whether the relative univariate feature selection
# gets the correct items in a simple regression problem
# with the k best heuristic
X, y = make_regression(n_samples=200, n_features=20, n_informative=5,
shuffle=False, random_state=0, noise=10)
univariate_filter = SelectKBest(f_regression, k=5)
X_r = univariate_filter.fit(X, y).transform(X)
assert_best_scores_kept(univariate_filter)
X_r2 = GenericUnivariateSelect(
f_regression, mode='k_best', param=5).fit(X, y).transform(X)
assert_array_equal(X_r, X_r2)
support = univariate_filter.get_support()
gtruth = np.zeros(20)
gtruth[:5] = 1
assert_array_equal(support, gtruth)
def test_select_heuristics_regression():
# Test whether the relative univariate feature selection
# gets the correct items in a simple regression problem
# with the fpr, fdr or fwe heuristics
X, y = make_regression(n_samples=200, n_features=20, n_informative=5,
shuffle=False, random_state=0, noise=10)
univariate_filter = SelectFpr(f_regression, alpha=0.01)
X_r = univariate_filter.fit(X, y).transform(X)
gtruth = np.zeros(20)
gtruth[:5] = 1
for mode in ['fdr', 'fpr', 'fwe']:
X_r2 = GenericUnivariateSelect(
f_regression, mode=mode, param=0.01).fit(X, y).transform(X)
assert_array_equal(X_r, X_r2)
support = univariate_filter.get_support()
assert_array_equal(support[:5], np.ones((5, ), dtype=np.bool))
assert_less(np.sum(support[5:] == 1), 3)
def test_select_fdr_regression():
# Test that fdr heuristic actually has low FDR.
def single_fdr(alpha, n_informative, random_state):
X, y = make_regression(n_samples=150, n_features=20,
n_informative=n_informative, shuffle=False,
random_state=random_state, noise=10)
with warnings.catch_warnings(record=True):
# Warnings can be raised when no features are selected
# (low alpha or very noisy data)
univariate_filter = SelectFdr(f_regression, alpha=alpha)
X_r = univariate_filter.fit(X, y).transform(X)
X_r2 = GenericUnivariateSelect(
f_regression, mode='fdr', param=alpha).fit(X, y).transform(X)
assert_array_equal(X_r, X_r2)
support = univariate_filter.get_support()
num_false_positives = np.sum(support[n_informative:] == 1)
num_true_positives = np.sum(support[:n_informative] == 1)
if num_false_positives == 0:
return 0.
false_discovery_rate = (num_false_positives /
(num_true_positives + num_false_positives))
return false_discovery_rate
for alpha in [0.001, 0.01, 0.1]:
for n_informative in [1, 5, 10]:
# As per Benjamini-Hochberg, the expected false discovery rate
# should be lower than alpha:
# FDR = E(FP / (TP + FP)) <= alpha
false_discovery_rate = np.mean([single_fdr(alpha, n_informative,
random_state) for
random_state in range(30)])
assert_greater_equal(alpha, false_discovery_rate)
# Make sure that the empirical false discovery rate increases
# with alpha:
if false_discovery_rate != 0:
assert_greater(false_discovery_rate, alpha / 10)
def test_select_fwe_regression():
# Test whether the relative univariate feature selection
# gets the correct items in a simple regression problem
# with the fwe heuristic
X, y = make_regression(n_samples=200, n_features=20,
n_informative=5, shuffle=False, random_state=0)
univariate_filter = SelectFwe(f_regression, alpha=0.01)
X_r = univariate_filter.fit(X, y).transform(X)
X_r2 = GenericUnivariateSelect(
f_regression, mode='fwe', param=0.01).fit(X, y).transform(X)
assert_array_equal(X_r, X_r2)
support = univariate_filter.get_support()
gtruth = np.zeros(20)
gtruth[:5] = 1
assert_array_equal(support[:5], np.ones((5, ), dtype=np.bool))
assert_less(np.sum(support[5:] == 1), 2)
def test_selectkbest_tiebreaking():
# Test whether SelectKBest actually selects k features in case of ties.
# Prior to 0.11, SelectKBest would return more features than requested.
Xs = [[0, 1, 1], [0, 0, 1], [1, 0, 0], [1, 1, 0]]
y = [1]
dummy_score = lambda X, y: (X[0], X[0])
for X in Xs:
sel = SelectKBest(dummy_score, k=1)
X1 = ignore_warnings(sel.fit_transform)([X], y)
assert_equal(X1.shape[1], 1)
assert_best_scores_kept(sel)
sel = SelectKBest(dummy_score, k=2)
X2 = ignore_warnings(sel.fit_transform)([X], y)
assert_equal(X2.shape[1], 2)
assert_best_scores_kept(sel)
def test_selectpercentile_tiebreaking():
# Test if SelectPercentile selects the right n_features in case of ties.
Xs = [[0, 1, 1], [0, 0, 1], [1, 0, 0], [1, 1, 0]]
y = [1]
dummy_score = lambda X, y: (X[0], X[0])
for X in Xs:
sel = SelectPercentile(dummy_score, percentile=34)
X1 = ignore_warnings(sel.fit_transform)([X], y)
assert_equal(X1.shape[1], 1)
assert_best_scores_kept(sel)
sel = SelectPercentile(dummy_score, percentile=67)
X2 = ignore_warnings(sel.fit_transform)([X], y)
assert_equal(X2.shape[1], 2)
assert_best_scores_kept(sel)
def test_tied_pvalues():
# Test whether k-best and percentiles work with tied pvalues from chi2.
# chi2 will return the same p-values for the following features, but it
# will return different scores.
X0 = np.array([[10000, 9999, 9998], [1, 1, 1]])
y = [0, 1]
for perm in itertools.permutations((0, 1, 2)):
X = X0[:, perm]
Xt = SelectKBest(chi2, k=2).fit_transform(X, y)
assert_equal(Xt.shape, (2, 2))
assert_not_in(9998, Xt)
Xt = SelectPercentile(chi2, percentile=67).fit_transform(X, y)
assert_equal(Xt.shape, (2, 2))
assert_not_in(9998, Xt)
def test_tied_scores():
# Test for stable sorting in k-best with tied scores.
X_train = np.array([[0, 0, 0], [1, 1, 1]])
y_train = [0, 1]
for n_features in [1, 2, 3]:
sel = SelectKBest(chi2, k=n_features).fit(X_train, y_train)
X_test = sel.transform([[0, 1, 2]])
assert_array_equal(X_test[0], np.arange(3)[-n_features:])
def test_nans():
# Assert that SelectKBest and SelectPercentile can handle NaNs.
# First feature has zero variance to confuse f_classif (ANOVA) and
# make it return a NaN.
X = [[0, 1, 0], [0, -1, -1], [0, .5, .5]]
y = [1, 0, 1]
for select in (SelectKBest(f_classif, 2),
SelectPercentile(f_classif, percentile=67)):
ignore_warnings(select.fit)(X, y)
assert_array_equal(select.get_support(indices=True), np.array([1, 2]))
def test_score_func_error():
X = [[0, 1, 0], [0, -1, -1], [0, .5, .5]]
y = [1, 0, 1]
for SelectFeatures in [SelectKBest, SelectPercentile, SelectFwe,
SelectFdr, SelectFpr, GenericUnivariateSelect]:
assert_raises(TypeError, SelectFeatures(score_func=10).fit, X, y)
def test_invalid_k():
X = [[0, 1, 0], [0, -1, -1], [0, .5, .5]]
y = [1, 0, 1]
assert_raises(ValueError, SelectKBest(k=-1).fit, X, y)
assert_raises(ValueError, SelectKBest(k=4).fit, X, y)
assert_raises(ValueError,
GenericUnivariateSelect(mode='k_best', param=-1).fit, X, y)
assert_raises(ValueError,
GenericUnivariateSelect(mode='k_best', param=4).fit, X, y)
def test_f_classif_constant_feature():
# Test that f_classif warns if a feature is constant throughout.
X, y = make_classification(n_samples=10, n_features=5)
X[:, 0] = 2.0
assert_warns(UserWarning, f_classif, X, y)
def test_no_feature_selected():
rng = np.random.RandomState(0)
# Generate random uncorrelated data: a strict univariate test should
# rejects all the features
X = rng.rand(40, 10)
y = rng.randint(0, 4, size=40)
strict_selectors = [
SelectFwe(alpha=0.01).fit(X, y),
SelectFdr(alpha=0.01).fit(X, y),
SelectFpr(alpha=0.01).fit(X, y),
SelectPercentile(percentile=0).fit(X, y),
SelectKBest(k=0).fit(X, y),
]
for selector in strict_selectors:
assert_array_equal(selector.get_support(), np.zeros(10))
X_selected = assert_warns_message(
UserWarning, 'No features were selected', selector.transform, X)
assert_equal(X_selected.shape, (40, 0))
| bsd-3-clause |
yyjiang/scikit-learn | sklearn/tree/tree.py | 113 | 34767 | """
This module gathers tree-based methods, including decision, regression and
randomized trees. Single and multi-output problems are both handled.
"""
# Authors: Gilles Louppe <g.louppe@gmail.com>
# Peter Prettenhofer <peter.prettenhofer@gmail.com>
# Brian Holt <bdholt1@gmail.com>
# Noel Dawe <noel@dawe.me>
# Satrajit Gosh <satrajit.ghosh@gmail.com>
# Joly Arnaud <arnaud.v.joly@gmail.com>
# Fares Hedayati <fares.hedayati@gmail.com>
#
# Licence: BSD 3 clause
from __future__ import division
import numbers
from abc import ABCMeta, abstractmethod
import numpy as np
from scipy.sparse import issparse
from ..base import BaseEstimator, ClassifierMixin, RegressorMixin
from ..externals import six
from ..feature_selection.from_model import _LearntSelectorMixin
from ..utils import check_array, check_random_state, compute_sample_weight
from ..utils.validation import NotFittedError
from ._tree import Criterion
from ._tree import Splitter
from ._tree import DepthFirstTreeBuilder, BestFirstTreeBuilder
from ._tree import Tree
from . import _tree
__all__ = ["DecisionTreeClassifier",
"DecisionTreeRegressor",
"ExtraTreeClassifier",
"ExtraTreeRegressor"]
# =============================================================================
# Types and constants
# =============================================================================
DTYPE = _tree.DTYPE
DOUBLE = _tree.DOUBLE
CRITERIA_CLF = {"gini": _tree.Gini, "entropy": _tree.Entropy}
CRITERIA_REG = {"mse": _tree.MSE, "friedman_mse": _tree.FriedmanMSE}
DENSE_SPLITTERS = {"best": _tree.BestSplitter,
"presort-best": _tree.PresortBestSplitter,
"random": _tree.RandomSplitter}
SPARSE_SPLITTERS = {"best": _tree.BestSparseSplitter,
"random": _tree.RandomSparseSplitter}
# =============================================================================
# Base decision tree
# =============================================================================
class BaseDecisionTree(six.with_metaclass(ABCMeta, BaseEstimator,
_LearntSelectorMixin)):
"""Base class for decision trees.
Warning: This class should not be used directly.
Use derived classes instead.
"""
@abstractmethod
def __init__(self,
criterion,
splitter,
max_depth,
min_samples_split,
min_samples_leaf,
min_weight_fraction_leaf,
max_features,
max_leaf_nodes,
random_state,
class_weight=None):
self.criterion = criterion
self.splitter = splitter
self.max_depth = max_depth
self.min_samples_split = min_samples_split
self.min_samples_leaf = min_samples_leaf
self.min_weight_fraction_leaf = min_weight_fraction_leaf
self.max_features = max_features
self.random_state = random_state
self.max_leaf_nodes = max_leaf_nodes
self.class_weight = class_weight
self.n_features_ = None
self.n_outputs_ = None
self.classes_ = None
self.n_classes_ = None
self.tree_ = None
self.max_features_ = None
def fit(self, X, y, sample_weight=None, check_input=True):
"""Build a decision tree from the training set (X, y).
Parameters
----------
X : array-like or sparse matrix, shape = [n_samples, n_features]
The training input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csc_matrix``.
y : array-like, shape = [n_samples] or [n_samples, n_outputs]
The target values (class labels in classification, real numbers in
regression). In the regression case, use ``dtype=np.float64`` and
``order='C'`` for maximum efficiency.
sample_weight : array-like, shape = [n_samples] or None
Sample weights. If None, then samples are equally weighted. Splits
that would create child nodes with net zero or negative weight are
ignored while searching for a split in each node. In the case of
classification, splits are also ignored if they would result in any
single class carrying a negative weight in either child node.
check_input : boolean, (default=True)
Allow to bypass several input checking.
Don't use this parameter unless you know what you do.
Returns
-------
self : object
Returns self.
"""
random_state = check_random_state(self.random_state)
if check_input:
X = check_array(X, dtype=DTYPE, accept_sparse="csc")
if issparse(X):
X.sort_indices()
if X.indices.dtype != np.intc or X.indptr.dtype != np.intc:
raise ValueError("No support for np.int64 index based "
"sparse matrices")
# Determine output settings
n_samples, self.n_features_ = X.shape
is_classification = isinstance(self, ClassifierMixin)
y = np.atleast_1d(y)
expanded_class_weight = None
if y.ndim == 1:
# reshape is necessary to preserve the data contiguity against vs
# [:, np.newaxis] that does not.
y = np.reshape(y, (-1, 1))
self.n_outputs_ = y.shape[1]
if is_classification:
y = np.copy(y)
self.classes_ = []
self.n_classes_ = []
if self.class_weight is not None:
y_original = np.copy(y)
y_store_unique_indices = np.zeros(y.shape, dtype=np.int)
for k in range(self.n_outputs_):
classes_k, y_store_unique_indices[:, k] = np.unique(y[:, k], return_inverse=True)
self.classes_.append(classes_k)
self.n_classes_.append(classes_k.shape[0])
y = y_store_unique_indices
if self.class_weight is not None:
expanded_class_weight = compute_sample_weight(
self.class_weight, y_original)
else:
self.classes_ = [None] * self.n_outputs_
self.n_classes_ = [1] * self.n_outputs_
self.n_classes_ = np.array(self.n_classes_, dtype=np.intp)
if getattr(y, "dtype", None) != DOUBLE or not y.flags.contiguous:
y = np.ascontiguousarray(y, dtype=DOUBLE)
# Check parameters
max_depth = ((2 ** 31) - 1 if self.max_depth is None
else self.max_depth)
max_leaf_nodes = (-1 if self.max_leaf_nodes is None
else self.max_leaf_nodes)
if isinstance(self.max_features, six.string_types):
if self.max_features == "auto":
if is_classification:
max_features = max(1, int(np.sqrt(self.n_features_)))
else:
max_features = self.n_features_
elif self.max_features == "sqrt":
max_features = max(1, int(np.sqrt(self.n_features_)))
elif self.max_features == "log2":
max_features = max(1, int(np.log2(self.n_features_)))
else:
raise ValueError(
'Invalid value for max_features. Allowed string '
'values are "auto", "sqrt" or "log2".')
elif self.max_features is None:
max_features = self.n_features_
elif isinstance(self.max_features, (numbers.Integral, np.integer)):
max_features = self.max_features
else: # float
if self.max_features > 0.0:
max_features = max(1, int(self.max_features * self.n_features_))
else:
max_features = 0
self.max_features_ = max_features
if len(y) != n_samples:
raise ValueError("Number of labels=%d does not match "
"number of samples=%d" % (len(y), n_samples))
if self.min_samples_split <= 0:
raise ValueError("min_samples_split must be greater than zero.")
if self.min_samples_leaf <= 0:
raise ValueError("min_samples_leaf must be greater than zero.")
if not 0 <= self.min_weight_fraction_leaf <= 0.5:
raise ValueError("min_weight_fraction_leaf must in [0, 0.5]")
if max_depth <= 0:
raise ValueError("max_depth must be greater than zero. ")
if not (0 < max_features <= self.n_features_):
raise ValueError("max_features must be in (0, n_features]")
if not isinstance(max_leaf_nodes, (numbers.Integral, np.integer)):
raise ValueError("max_leaf_nodes must be integral number but was "
"%r" % max_leaf_nodes)
if -1 < max_leaf_nodes < 2:
raise ValueError(("max_leaf_nodes {0} must be either smaller than "
"0 or larger than 1").format(max_leaf_nodes))
if sample_weight is not None:
if (getattr(sample_weight, "dtype", None) != DOUBLE or
not sample_weight.flags.contiguous):
sample_weight = np.ascontiguousarray(
sample_weight, dtype=DOUBLE)
if len(sample_weight.shape) > 1:
raise ValueError("Sample weights array has more "
"than one dimension: %d" %
len(sample_weight.shape))
if len(sample_weight) != n_samples:
raise ValueError("Number of weights=%d does not match "
"number of samples=%d" %
(len(sample_weight), n_samples))
if expanded_class_weight is not None:
if sample_weight is not None:
sample_weight = sample_weight * expanded_class_weight
else:
sample_weight = expanded_class_weight
# Set min_weight_leaf from min_weight_fraction_leaf
if self.min_weight_fraction_leaf != 0. and sample_weight is not None:
min_weight_leaf = (self.min_weight_fraction_leaf *
np.sum(sample_weight))
else:
min_weight_leaf = 0.
# Set min_samples_split sensibly
min_samples_split = max(self.min_samples_split,
2 * self.min_samples_leaf)
# Build tree
criterion = self.criterion
if not isinstance(criterion, Criterion):
if is_classification:
criterion = CRITERIA_CLF[self.criterion](self.n_outputs_,
self.n_classes_)
else:
criterion = CRITERIA_REG[self.criterion](self.n_outputs_)
SPLITTERS = SPARSE_SPLITTERS if issparse(X) else DENSE_SPLITTERS
splitter = self.splitter
if not isinstance(self.splitter, Splitter):
splitter = SPLITTERS[self.splitter](criterion,
self.max_features_,
self.min_samples_leaf,
min_weight_leaf,
random_state)
self.tree_ = Tree(self.n_features_, self.n_classes_, self.n_outputs_)
# Use BestFirst if max_leaf_nodes given; use DepthFirst otherwise
if max_leaf_nodes < 0:
builder = DepthFirstTreeBuilder(splitter, min_samples_split,
self.min_samples_leaf,
min_weight_leaf,
max_depth)
else:
builder = BestFirstTreeBuilder(splitter, min_samples_split,
self.min_samples_leaf,
min_weight_leaf,
max_depth,
max_leaf_nodes)
builder.build(self.tree_, X, y, sample_weight)
if self.n_outputs_ == 1:
self.n_classes_ = self.n_classes_[0]
self.classes_ = self.classes_[0]
return self
def _validate_X_predict(self, X, check_input):
"""Validate X whenever one tries to predict, apply, predict_proba"""
if self.tree_ is None:
raise NotFittedError("Estimator not fitted, "
"call `fit` before exploiting the model.")
if check_input:
X = check_array(X, dtype=DTYPE, accept_sparse="csr")
if issparse(X) and (X.indices.dtype != np.intc or
X.indptr.dtype != np.intc):
raise ValueError("No support for np.int64 index based "
"sparse matrices")
n_features = X.shape[1]
if self.n_features_ != n_features:
raise ValueError("Number of features of the model must "
" match the input. Model n_features is %s and "
" input n_features is %s "
% (self.n_features_, n_features))
return X
def predict(self, X, check_input=True):
"""Predict class or regression value for X.
For a classification model, the predicted class for each sample in X is
returned. For a regression model, the predicted value based on X is
returned.
Parameters
----------
X : array-like or sparse matrix of shape = [n_samples, n_features]
The input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csr_matrix``.
check_input : boolean, (default=True)
Allow to bypass several input checking.
Don't use this parameter unless you know what you do.
Returns
-------
y : array of shape = [n_samples] or [n_samples, n_outputs]
The predicted classes, or the predict values.
"""
X = self._validate_X_predict(X, check_input)
proba = self.tree_.predict(X)
n_samples = X.shape[0]
# Classification
if isinstance(self, ClassifierMixin):
if self.n_outputs_ == 1:
return self.classes_.take(np.argmax(proba, axis=1), axis=0)
else:
predictions = np.zeros((n_samples, self.n_outputs_))
for k in range(self.n_outputs_):
predictions[:, k] = self.classes_[k].take(
np.argmax(proba[:, k], axis=1),
axis=0)
return predictions
# Regression
else:
if self.n_outputs_ == 1:
return proba[:, 0]
else:
return proba[:, :, 0]
def apply(self, X, check_input=True):
"""
Returns the index of the leaf that each sample is predicted as.
Parameters
----------
X : array_like or sparse matrix, shape = [n_samples, n_features]
The input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csr_matrix``.
check_input : boolean, (default=True)
Allow to bypass several input checking.
Don't use this parameter unless you know what you do.
Returns
-------
X_leaves : array_like, shape = [n_samples,]
For each datapoint x in X, return the index of the leaf x
ends up in. Leaves are numbered within
``[0; self.tree_.node_count)``, possibly with gaps in the
numbering.
"""
X = self._validate_X_predict(X, check_input)
return self.tree_.apply(X)
@property
def feature_importances_(self):
"""Return the feature importances.
The importance of a feature is computed as the (normalized) total
reduction of the criterion brought by that feature.
It is also known as the Gini importance.
Returns
-------
feature_importances_ : array, shape = [n_features]
"""
if self.tree_ is None:
raise NotFittedError("Estimator not fitted, call `fit` before"
" `feature_importances_`.")
return self.tree_.compute_feature_importances()
# =============================================================================
# Public estimators
# =============================================================================
class DecisionTreeClassifier(BaseDecisionTree, ClassifierMixin):
"""A decision tree classifier.
Read more in the :ref:`User Guide <tree>`.
Parameters
----------
criterion : string, optional (default="gini")
The function to measure the quality of a split. Supported criteria are
"gini" for the Gini impurity and "entropy" for the information gain.
splitter : string, optional (default="best")
The strategy used to choose the split at each node. Supported
strategies are "best" to choose the best split and "random" to choose
the best random split.
max_features : int, float, string or None, optional (default=None)
The number of features to consider when looking for the best split:
- If int, then consider `max_features` features at each split.
- If float, then `max_features` is a percentage and
`int(max_features * n_features)` features are considered at each
split.
- If "auto", then `max_features=sqrt(n_features)`.
- If "sqrt", then `max_features=sqrt(n_features)`.
- If "log2", then `max_features=log2(n_features)`.
- If None, then `max_features=n_features`.
Note: the search for a split does not stop until at least one
valid partition of the node samples is found, even if it requires to
effectively inspect more than ``max_features`` features.
max_depth : int or None, optional (default=None)
The maximum depth of the tree. If None, then nodes are expanded until
all leaves are pure or until all leaves contain less than
min_samples_split samples.
Ignored if ``max_leaf_nodes`` is not None.
min_samples_split : int, optional (default=2)
The minimum number of samples required to split an internal node.
min_samples_leaf : int, optional (default=1)
The minimum number of samples required to be at a leaf node.
min_weight_fraction_leaf : float, optional (default=0.)
The minimum weighted fraction of the input samples required to be at a
leaf node.
max_leaf_nodes : int or None, optional (default=None)
Grow a tree with ``max_leaf_nodes`` in best-first fashion.
Best nodes are defined as relative reduction in impurity.
If None then unlimited number of leaf nodes.
If not None then ``max_depth`` will be ignored.
class_weight : dict, list of dicts, "balanced" or None, optional
(default=None)
Weights associated with classes in the form ``{class_label: weight}``.
If not given, all classes are supposed to have weight one. For
multi-output problems, a list of dicts can be provided in the same
order as the columns of y.
The "balanced" mode uses the values of y to automatically adjust
weights inversely proportional to class frequencies in the input data
as ``n_samples / (n_classes * np.bincount(y))``
For multi-output, the weights of each column of y will be multiplied.
Note that these weights will be multiplied with sample_weight (passed
through the fit method) if sample_weight is specified.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Attributes
----------
classes_ : array of shape = [n_classes] or a list of such arrays
The classes labels (single output problem),
or a list of arrays of class labels (multi-output problem).
feature_importances_ : array of shape = [n_features]
The feature importances. The higher, the more important the
feature. The importance of a feature is computed as the (normalized)
total reduction of the criterion brought by that feature. It is also
known as the Gini importance [4]_.
max_features_ : int,
The inferred value of max_features.
n_classes_ : int or list
The number of classes (for single output problems),
or a list containing the number of classes for each
output (for multi-output problems).
n_features_ : int
The number of features when ``fit`` is performed.
n_outputs_ : int
The number of outputs when ``fit`` is performed.
tree_ : Tree object
The underlying Tree object.
See also
--------
DecisionTreeRegressor
References
----------
.. [1] http://en.wikipedia.org/wiki/Decision_tree_learning
.. [2] L. Breiman, J. Friedman, R. Olshen, and C. Stone, "Classification
and Regression Trees", Wadsworth, Belmont, CA, 1984.
.. [3] T. Hastie, R. Tibshirani and J. Friedman. "Elements of Statistical
Learning", Springer, 2009.
.. [4] L. Breiman, and A. Cutler, "Random Forests",
http://www.stat.berkeley.edu/~breiman/RandomForests/cc_home.htm
Examples
--------
>>> from sklearn.datasets import load_iris
>>> from sklearn.cross_validation import cross_val_score
>>> from sklearn.tree import DecisionTreeClassifier
>>> clf = DecisionTreeClassifier(random_state=0)
>>> iris = load_iris()
>>> cross_val_score(clf, iris.data, iris.target, cv=10)
... # doctest: +SKIP
...
array([ 1. , 0.93..., 0.86..., 0.93..., 0.93...,
0.93..., 0.93..., 1. , 0.93..., 1. ])
"""
def __init__(self,
criterion="gini",
splitter="best",
max_depth=None,
min_samples_split=2,
min_samples_leaf=1,
min_weight_fraction_leaf=0.,
max_features=None,
random_state=None,
max_leaf_nodes=None,
class_weight=None):
super(DecisionTreeClassifier, self).__init__(
criterion=criterion,
splitter=splitter,
max_depth=max_depth,
min_samples_split=min_samples_split,
min_samples_leaf=min_samples_leaf,
min_weight_fraction_leaf=min_weight_fraction_leaf,
max_features=max_features,
max_leaf_nodes=max_leaf_nodes,
class_weight=class_weight,
random_state=random_state)
def predict_proba(self, X, check_input=True):
"""Predict class probabilities of the input samples X.
The predicted class probability is the fraction of samples of the same
class in a leaf.
check_input : boolean, (default=True)
Allow to bypass several input checking.
Don't use this parameter unless you know what you do.
Parameters
----------
X : array-like or sparse matrix of shape = [n_samples, n_features]
The input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csr_matrix``.
Returns
-------
p : array of shape = [n_samples, n_classes], or a list of n_outputs
such arrays if n_outputs > 1.
The class probabilities of the input samples. The order of the
classes corresponds to that in the attribute `classes_`.
"""
X = self._validate_X_predict(X, check_input)
proba = self.tree_.predict(X)
if self.n_outputs_ == 1:
proba = proba[:, :self.n_classes_]
normalizer = proba.sum(axis=1)[:, np.newaxis]
normalizer[normalizer == 0.0] = 1.0
proba /= normalizer
return proba
else:
all_proba = []
for k in range(self.n_outputs_):
proba_k = proba[:, k, :self.n_classes_[k]]
normalizer = proba_k.sum(axis=1)[:, np.newaxis]
normalizer[normalizer == 0.0] = 1.0
proba_k /= normalizer
all_proba.append(proba_k)
return all_proba
def predict_log_proba(self, X):
"""Predict class log-probabilities of the input samples X.
Parameters
----------
X : array-like or sparse matrix of shape = [n_samples, n_features]
The input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csr_matrix``.
Returns
-------
p : array of shape = [n_samples, n_classes], or a list of n_outputs
such arrays if n_outputs > 1.
The class log-probabilities of the input samples. The order of the
classes corresponds to that in the attribute `classes_`.
"""
proba = self.predict_proba(X)
if self.n_outputs_ == 1:
return np.log(proba)
else:
for k in range(self.n_outputs_):
proba[k] = np.log(proba[k])
return proba
class DecisionTreeRegressor(BaseDecisionTree, RegressorMixin):
"""A decision tree regressor.
Read more in the :ref:`User Guide <tree>`.
Parameters
----------
criterion : string, optional (default="mse")
The function to measure the quality of a split. The only supported
criterion is "mse" for the mean squared error, which is equal to
variance reduction as feature selection criterion.
splitter : string, optional (default="best")
The strategy used to choose the split at each node. Supported
strategies are "best" to choose the best split and "random" to choose
the best random split.
max_features : int, float, string or None, optional (default=None)
The number of features to consider when looking for the best split:
- If int, then consider `max_features` features at each split.
- If float, then `max_features` is a percentage and
`int(max_features * n_features)` features are considered at each
split.
- If "auto", then `max_features=n_features`.
- If "sqrt", then `max_features=sqrt(n_features)`.
- If "log2", then `max_features=log2(n_features)`.
- If None, then `max_features=n_features`.
Note: the search for a split does not stop until at least one
valid partition of the node samples is found, even if it requires to
effectively inspect more than ``max_features`` features.
max_depth : int or None, optional (default=None)
The maximum depth of the tree. If None, then nodes are expanded until
all leaves are pure or until all leaves contain less than
min_samples_split samples.
Ignored if ``max_leaf_nodes`` is not None.
min_samples_split : int, optional (default=2)
The minimum number of samples required to split an internal node.
min_samples_leaf : int, optional (default=1)
The minimum number of samples required to be at a leaf node.
min_weight_fraction_leaf : float, optional (default=0.)
The minimum weighted fraction of the input samples required to be at a
leaf node.
max_leaf_nodes : int or None, optional (default=None)
Grow a tree with ``max_leaf_nodes`` in best-first fashion.
Best nodes are defined as relative reduction in impurity.
If None then unlimited number of leaf nodes.
If not None then ``max_depth`` will be ignored.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Attributes
----------
feature_importances_ : array of shape = [n_features]
The feature importances.
The higher, the more important the feature.
The importance of a feature is computed as the
(normalized) total reduction of the criterion brought
by that feature. It is also known as the Gini importance [4]_.
max_features_ : int,
The inferred value of max_features.
n_features_ : int
The number of features when ``fit`` is performed.
n_outputs_ : int
The number of outputs when ``fit`` is performed.
tree_ : Tree object
The underlying Tree object.
See also
--------
DecisionTreeClassifier
References
----------
.. [1] http://en.wikipedia.org/wiki/Decision_tree_learning
.. [2] L. Breiman, J. Friedman, R. Olshen, and C. Stone, "Classification
and Regression Trees", Wadsworth, Belmont, CA, 1984.
.. [3] T. Hastie, R. Tibshirani and J. Friedman. "Elements of Statistical
Learning", Springer, 2009.
.. [4] L. Breiman, and A. Cutler, "Random Forests",
http://www.stat.berkeley.edu/~breiman/RandomForests/cc_home.htm
Examples
--------
>>> from sklearn.datasets import load_boston
>>> from sklearn.cross_validation import cross_val_score
>>> from sklearn.tree import DecisionTreeRegressor
>>> boston = load_boston()
>>> regressor = DecisionTreeRegressor(random_state=0)
>>> cross_val_score(regressor, boston.data, boston.target, cv=10)
... # doctest: +SKIP
...
array([ 0.61..., 0.57..., -0.34..., 0.41..., 0.75...,
0.07..., 0.29..., 0.33..., -1.42..., -1.77...])
"""
def __init__(self,
criterion="mse",
splitter="best",
max_depth=None,
min_samples_split=2,
min_samples_leaf=1,
min_weight_fraction_leaf=0.,
max_features=None,
random_state=None,
max_leaf_nodes=None):
super(DecisionTreeRegressor, self).__init__(
criterion=criterion,
splitter=splitter,
max_depth=max_depth,
min_samples_split=min_samples_split,
min_samples_leaf=min_samples_leaf,
min_weight_fraction_leaf=min_weight_fraction_leaf,
max_features=max_features,
max_leaf_nodes=max_leaf_nodes,
random_state=random_state)
class ExtraTreeClassifier(DecisionTreeClassifier):
"""An extremely randomized tree classifier.
Extra-trees differ from classic decision trees in the way they are built.
When looking for the best split to separate the samples of a node into two
groups, random splits are drawn for each of the `max_features` randomly
selected features and the best split among those is chosen. When
`max_features` is set 1, this amounts to building a totally random
decision tree.
Warning: Extra-trees should only be used within ensemble methods.
Read more in the :ref:`User Guide <tree>`.
See also
--------
ExtraTreeRegressor, ExtraTreesClassifier, ExtraTreesRegressor
References
----------
.. [1] P. Geurts, D. Ernst., and L. Wehenkel, "Extremely randomized trees",
Machine Learning, 63(1), 3-42, 2006.
"""
def __init__(self,
criterion="gini",
splitter="random",
max_depth=None,
min_samples_split=2,
min_samples_leaf=1,
min_weight_fraction_leaf=0.,
max_features="auto",
random_state=None,
max_leaf_nodes=None,
class_weight=None):
super(ExtraTreeClassifier, self).__init__(
criterion=criterion,
splitter=splitter,
max_depth=max_depth,
min_samples_split=min_samples_split,
min_samples_leaf=min_samples_leaf,
min_weight_fraction_leaf=min_weight_fraction_leaf,
max_features=max_features,
max_leaf_nodes=max_leaf_nodes,
class_weight=class_weight,
random_state=random_state)
class ExtraTreeRegressor(DecisionTreeRegressor):
"""An extremely randomized tree regressor.
Extra-trees differ from classic decision trees in the way they are built.
When looking for the best split to separate the samples of a node into two
groups, random splits are drawn for each of the `max_features` randomly
selected features and the best split among those is chosen. When
`max_features` is set 1, this amounts to building a totally random
decision tree.
Warning: Extra-trees should only be used within ensemble methods.
Read more in the :ref:`User Guide <tree>`.
See also
--------
ExtraTreeClassifier, ExtraTreesClassifier, ExtraTreesRegressor
References
----------
.. [1] P. Geurts, D. Ernst., and L. Wehenkel, "Extremely randomized trees",
Machine Learning, 63(1), 3-42, 2006.
"""
def __init__(self,
criterion="mse",
splitter="random",
max_depth=None,
min_samples_split=2,
min_samples_leaf=1,
min_weight_fraction_leaf=0.,
max_features="auto",
random_state=None,
max_leaf_nodes=None):
super(ExtraTreeRegressor, self).__init__(
criterion=criterion,
splitter=splitter,
max_depth=max_depth,
min_samples_split=min_samples_split,
min_samples_leaf=min_samples_leaf,
min_weight_fraction_leaf=min_weight_fraction_leaf,
max_features=max_features,
max_leaf_nodes=max_leaf_nodes,
random_state=random_state)
| bsd-3-clause |
idealabasu/code_pynamics | python/pynamics_examples/in_development/pendulum_script_mode.py | 1 | 2823 | # -*- coding: utf-8 -*-
"""
Written by Daniel M. Aukes
Email: danaukes<at>gmail.com
Please see LICENSE for full license.
"""
import pynamics
from pynamics.frame import Frame
from pynamics.variable_types import Differentiable,Constant
from pynamics.system import System
from pynamics.body import Body
from pynamics.dyadic import Dyadic
from pynamics.output import Output
from pynamics.particle import Particle
import pynamics.integration
#import sympy
import numpy
import matplotlib.pyplot as plt
plt.ion()
from math import pi
system = System()
pynamics.set_system(__name__,system)
tol = 1e-10
lA = Constant(1)
mA = Constant(1)
g = Constant(9.81)
b_air = Constant(1e-1)
b_joint = Constant(1e-1)
k = Constant(2e1)
Ixx_A = Constant(1)
Iyy_A = Constant(1)
Izz_A = Constant(1)
tinitial = 0
tfinal = 5
tstep = .001
t = numpy.r_[tinitial:tfinal:tstep]
preload1 = Constant(0*pi/180)
qA,qA_d,qA_dd = Differentiable('qA',system)
initialvalues = {}
initialvalues[qA]=0*pi/180
initialvalues[qA_d]=0*pi/180
statevariables = system.get_state_variables()
ini = [initialvalues[item] for item in statevariables]
N = Frame('N')
A = Frame('A')
system.set_newtonian(N)
A.rotate_fixed_axis_directed(N,[0,0,1],qA,system)
pNA=0*N.x
pAB=pNA+lA*A.x
vAB=pAB.time_derivative(N,system)
#ParticleA = Particle(pAB,mA,'ParticleA',system)
IA = Dyadic.build(A,Ixx_A,Iyy_A,Izz_A)
BodyA = Body('BodyA',A,pAB,mA,IA,system)
wNA = N.getw_(A)
lab2 = vAB.dot(vAB)
uab = vAB * (1/(lab2**.5+tol))
#squared term
#system.addforce(-b_air*lab2*uab,vAB)
#linear term
system.addforce(-b_air*vAB,vAB)
system.addforce(-b_joint*wNA,wNA)
system.addforcegravity(-g*N.y)
system.add_spring_force1(k,(qA-preload1)*N.z,wNA)
#x1 = ParticleA.pCM.dot(N.x)
#y1 = ParticleA.pCM.dot(N.y)
x1 = BodyA.pCM.dot(N.x)
y1 = BodyA.pCM.dot(N.y)
f,ma = system.getdynamics()
func = system.state_space_post_invert(f,ma)
states=pynamics.integration.integrate_odeint(func,ini,t,rtol=1e-12,atol=1e-12,hmin=1e-14, args=({'constants':system.constant_values},))
KE = system.get_KE()
PE = system.getPEGravity(pNA) - system.getPESprings()
output = Output([x1,y1,KE-PE,qA],system)
y = output.calc(states)
plt.figure(1)
plt.plot(y[:,0],y[:,1])
plt.axis('equal')
plt.figure(2)
plt.plot(y[:,2])
plt.figure(3)
plt.plot(t,y[:,0])
plt.show()
import numpy.random
f = f[0].simplify()
ma = ma[0].simplify()
q = y[:,-1].astype(float)
q += numpy.random.rand(len(q))*1e-6
q_d = (q[2:]-q[:-2])/(2*tstep)
q_dd = (q_d[2:]-q_d[:-2])/(2*tstep)
q = q[2:-2]
t = t[2:-2]
q_d = q_d[1:-1]
plt.figure()
plt.plot(t,q)
plt.figure()
plt.plot(t,q_d)
plt.figure()
plt.plot(t,q_dd)
x = numpy.c_[q,numpy.cos(q),q_d]
m = float((ma/qA_dd).subs(system.constant_values))
y = m*q_dd
C = numpy.linalg.solve(x.T.dot(x),x.T.dot(y))
y2 = numpy.r_[[C]].dot(x.T).T
plt.figure()
plt.plot(t,y)
plt.plot(t,y2)
| mit |
RMKD/networkx | networkx/convert.py | 22 | 13215 | """Functions to convert NetworkX graphs to and from other formats.
The preferred way of converting data to a NetworkX graph is through the
graph constuctor. The constructor calls the to_networkx_graph() function
which attempts to guess the input type and convert it automatically.
Examples
--------
Create a graph with a single edge from a dictionary of dictionaries
>>> d={0: {1: 1}} # dict-of-dicts single edge (0,1)
>>> G=nx.Graph(d)
See Also
--------
nx_pygraphviz, nx_pydot
"""
# Copyright (C) 2006-2013 by
# Aric Hagberg <hagberg@lanl.gov>
# Dan Schult <dschult@colgate.edu>
# Pieter Swart <swart@lanl.gov>
# All rights reserved.
# BSD license.
import warnings
import networkx as nx
__author__ = """\n""".join(['Aric Hagberg <aric.hagberg@gmail.com>',
'Pieter Swart (swart@lanl.gov)',
'Dan Schult(dschult@colgate.edu)'])
__all__ = ['to_networkx_graph',
'from_dict_of_dicts', 'to_dict_of_dicts',
'from_dict_of_lists', 'to_dict_of_lists',
'from_edgelist', 'to_edgelist']
def _prep_create_using(create_using):
"""Return a graph object ready to be populated.
If create_using is None return the default (just networkx.Graph())
If create_using.clear() works, assume it returns a graph object.
Otherwise raise an exception because create_using is not a networkx graph.
"""
if create_using is None:
return nx.Graph()
try:
create_using.clear()
except:
raise TypeError("Input graph is not a networkx graph type")
return create_using
def to_networkx_graph(data,create_using=None,multigraph_input=False):
"""Make a NetworkX graph from a known data structure.
The preferred way to call this is automatically
from the class constructor
>>> d={0: {1: {'weight':1}}} # dict-of-dicts single edge (0,1)
>>> G=nx.Graph(d)
instead of the equivalent
>>> G=nx.from_dict_of_dicts(d)
Parameters
----------
data : a object to be converted
Current known types are:
any NetworkX graph
dict-of-dicts
dist-of-lists
list of edges
numpy matrix
numpy ndarray
scipy sparse matrix
pygraphviz agraph
create_using : NetworkX graph
Use specified graph for result. Otherwise a new graph is created.
multigraph_input : bool (default False)
If True and data is a dict_of_dicts,
try to create a multigraph assuming dict_of_dict_of_lists.
If data and create_using are both multigraphs then create
a multigraph from a multigraph.
"""
# NX graph
if hasattr(data,"adj"):
try:
result= from_dict_of_dicts(data.adj,\
create_using=create_using,\
multigraph_input=data.is_multigraph())
if hasattr(data,'graph') and isinstance(data.graph,dict):
result.graph=data.graph.copy()
if hasattr(data,'node') and isinstance(data.node,dict):
result.node=dict( (n,dd.copy()) for n,dd in data.node.items() )
return result
except:
raise nx.NetworkXError("Input is not a correct NetworkX graph.")
# pygraphviz agraph
if hasattr(data,"is_strict"):
try:
return nx.from_agraph(data,create_using=create_using)
except:
raise nx.NetworkXError("Input is not a correct pygraphviz graph.")
# dict of dicts/lists
if isinstance(data,dict):
try:
return from_dict_of_dicts(data,create_using=create_using,\
multigraph_input=multigraph_input)
except:
try:
return from_dict_of_lists(data,create_using=create_using)
except:
raise TypeError("Input is not known type.")
# list or generator of edges
if (isinstance(data,list)
or isinstance(data,tuple)
or hasattr(data,'next')
or hasattr(data, '__next__')):
try:
return from_edgelist(data,create_using=create_using)
except:
raise nx.NetworkXError("Input is not a valid edge list")
# Pandas DataFrame
try:
import pandas as pd
if isinstance(data, pd.DataFrame):
try:
return nx.from_pandas_dataframe(data, create_using=create_using)
except:
msg = "Input is not a correct Pandas DataFrame."
raise nx.NetworkXError(msg)
except ImportError:
msg = 'pandas not found, skipping conversion test.'
warnings.warn(msg, ImportWarning)
# numpy matrix or ndarray
try:
import numpy
if isinstance(data,numpy.matrix) or \
isinstance(data,numpy.ndarray):
try:
return nx.from_numpy_matrix(data,create_using=create_using)
except:
raise nx.NetworkXError(\
"Input is not a correct numpy matrix or array.")
except ImportError:
warnings.warn('numpy not found, skipping conversion test.',
ImportWarning)
# scipy sparse matrix - any format
try:
import scipy
if hasattr(data,"format"):
try:
return nx.from_scipy_sparse_matrix(data,create_using=create_using)
except:
raise nx.NetworkXError(\
"Input is not a correct scipy sparse matrix type.")
except ImportError:
warnings.warn('scipy not found, skipping conversion test.',
ImportWarning)
raise nx.NetworkXError(\
"Input is not a known data type for conversion.")
return
def convert_to_undirected(G):
"""Return a new undirected representation of the graph G."""
return G.to_undirected()
def convert_to_directed(G):
"""Return a new directed representation of the graph G."""
return G.to_directed()
def to_dict_of_lists(G,nodelist=None):
"""Return adjacency representation of graph as a dictionary of lists.
Parameters
----------
G : graph
A NetworkX graph
nodelist : list
Use only nodes specified in nodelist
Notes
-----
Completely ignores edge data for MultiGraph and MultiDiGraph.
"""
if nodelist is None:
nodelist=G
d = {}
for n in nodelist:
d[n]=[nbr for nbr in G.neighbors(n) if nbr in nodelist]
return d
def from_dict_of_lists(d,create_using=None):
"""Return a graph from a dictionary of lists.
Parameters
----------
d : dictionary of lists
A dictionary of lists adjacency representation.
create_using : NetworkX graph
Use specified graph for result. Otherwise a new graph is created.
Examples
--------
>>> dol= {0:[1]} # single edge (0,1)
>>> G=nx.from_dict_of_lists(dol)
or
>>> G=nx.Graph(dol) # use Graph constructor
"""
G=_prep_create_using(create_using)
G.add_nodes_from(d)
if G.is_multigraph() and not G.is_directed():
# a dict_of_lists can't show multiedges. BUT for undirected graphs,
# each edge shows up twice in the dict_of_lists.
# So we need to treat this case separately.
seen={}
for node,nbrlist in d.items():
for nbr in nbrlist:
if nbr not in seen:
G.add_edge(node,nbr)
seen[node]=1 # don't allow reverse edge to show up
else:
G.add_edges_from( ((node,nbr) for node,nbrlist in d.items()
for nbr in nbrlist) )
return G
def to_dict_of_dicts(G,nodelist=None,edge_data=None):
"""Return adjacency representation of graph as a dictionary of dictionaries.
Parameters
----------
G : graph
A NetworkX graph
nodelist : list
Use only nodes specified in nodelist
edge_data : list, optional
If provided, the value of the dictionary will be
set to edge_data for all edges. This is useful to make
an adjacency matrix type representation with 1 as the edge data.
If edgedata is None, the edgedata in G is used to fill the values.
If G is a multigraph, the edgedata is a dict for each pair (u,v).
"""
dod={}
if nodelist is None:
if edge_data is None:
for u,nbrdict in G.adjacency_iter():
dod[u]=nbrdict.copy()
else: # edge_data is not None
for u,nbrdict in G.adjacency_iter():
dod[u]=dod.fromkeys(nbrdict, edge_data)
else: # nodelist is not None
if edge_data is None:
for u in nodelist:
dod[u]={}
for v,data in ((v,data) for v,data in G[u].items() if v in nodelist):
dod[u][v]=data
else: # nodelist and edge_data are not None
for u in nodelist:
dod[u]={}
for v in ( v for v in G[u] if v in nodelist):
dod[u][v]=edge_data
return dod
def from_dict_of_dicts(d,create_using=None,multigraph_input=False):
"""Return a graph from a dictionary of dictionaries.
Parameters
----------
d : dictionary of dictionaries
A dictionary of dictionaries adjacency representation.
create_using : NetworkX graph
Use specified graph for result. Otherwise a new graph is created.
multigraph_input : bool (default False)
When True, the values of the inner dict are assumed
to be containers of edge data for multiple edges.
Otherwise this routine assumes the edge data are singletons.
Examples
--------
>>> dod= {0: {1:{'weight':1}}} # single edge (0,1)
>>> G=nx.from_dict_of_dicts(dod)
or
>>> G=nx.Graph(dod) # use Graph constructor
"""
G=_prep_create_using(create_using)
G.add_nodes_from(d)
# is dict a MultiGraph or MultiDiGraph?
if multigraph_input:
# make a copy of the list of edge data (but not the edge data)
if G.is_directed():
if G.is_multigraph():
G.add_edges_from( (u,v,key,data)
for u,nbrs in d.items()
for v,datadict in nbrs.items()
for key,data in datadict.items()
)
else:
G.add_edges_from( (u,v,data)
for u,nbrs in d.items()
for v,datadict in nbrs.items()
for key,data in datadict.items()
)
else: # Undirected
if G.is_multigraph():
seen=set() # don't add both directions of undirected graph
for u,nbrs in d.items():
for v,datadict in nbrs.items():
if (u,v) not in seen:
G.add_edges_from( (u,v,key,data)
for key,data in datadict.items()
)
seen.add((v,u))
else:
seen=set() # don't add both directions of undirected graph
for u,nbrs in d.items():
for v,datadict in nbrs.items():
if (u,v) not in seen:
G.add_edges_from( (u,v,data)
for key,data in datadict.items() )
seen.add((v,u))
else: # not a multigraph to multigraph transfer
if G.is_multigraph() and not G.is_directed():
# d can have both representations u-v, v-u in dict. Only add one.
# We don't need this check for digraphs since we add both directions,
# or for Graph() since it is done implicitly (parallel edges not allowed)
seen=set()
for u,nbrs in d.items():
for v,data in nbrs.items():
if (u,v) not in seen:
G.add_edge(u,v,attr_dict=data)
seen.add((v,u))
else:
G.add_edges_from( ( (u,v,data)
for u,nbrs in d.items()
for v,data in nbrs.items()) )
return G
def to_edgelist(G,nodelist=None):
"""Return a list of edges in the graph.
Parameters
----------
G : graph
A NetworkX graph
nodelist : list
Use only nodes specified in nodelist
"""
if nodelist is None:
return G.edges(data=True)
else:
return G.edges(nodelist,data=True)
def from_edgelist(edgelist,create_using=None):
"""Return a graph from a list of edges.
Parameters
----------
edgelist : list or iterator
Edge tuples
create_using : NetworkX graph
Use specified graph for result. Otherwise a new graph is created.
Examples
--------
>>> edgelist= [(0,1)] # single edge (0,1)
>>> G=nx.from_edgelist(edgelist)
or
>>> G=nx.Graph(edgelist) # use Graph constructor
"""
G=_prep_create_using(create_using)
G.add_edges_from(edgelist)
return G
| bsd-3-clause |
lancezlin/ml_template_py | lib/python2.7/site-packages/pandas/util/doctools.py | 9 | 6779 | import numpy as np
import pandas as pd
import pandas.compat as compat
class TablePlotter(object):
"""
Layout some DataFrames in vertical/horizontal layout for explanation.
Used in merging.rst
"""
def __init__(self, cell_width=0.37, cell_height=0.25, font_size=7.5):
self.cell_width = cell_width
self.cell_height = cell_height
self.font_size = font_size
def _shape(self, df):
"""Calcurate table chape considering index levels"""
row, col = df.shape
return row + df.columns.nlevels, col + df.index.nlevels
def _get_cells(self, left, right, vertical):
"""Calcurate appropriate figure size based on left and right data"""
if vertical:
# calcurate required number of cells
vcells = max(sum([self._shape(l)[0] for l in left]),
self._shape(right)[0])
hcells = (max([self._shape(l)[1] for l in left]) +
self._shape(right)[1])
else:
vcells = max([self._shape(l)[0] for l in left] +
[self._shape(right)[0]])
hcells = sum([self._shape(l)[1] for l in left] +
[self._shape(right)[1]])
return hcells, vcells
def plot(self, left, right, labels=None, vertical=True):
"""
Plot left / right DataFrames in specified layout.
Parameters
----------
left : list of DataFrames before operation is applied
right : DataFrame of operation result
labels : list of str to be drawn as titles of left DataFrames
vertical : bool
If True, use vertical layout. If False, use horizontal layout.
"""
import matplotlib.pyplot as plt
import matplotlib.gridspec as gridspec
if not isinstance(left, list):
left = [left]
left = [self._conv(l) for l in left]
right = self._conv(right)
hcells, vcells = self._get_cells(left, right, vertical)
if vertical:
figsize = self.cell_width * hcells, self.cell_height * vcells
else:
# include margin for titles
figsize = self.cell_width * hcells, self.cell_height * vcells
fig = plt.figure(figsize=figsize)
if vertical:
gs = gridspec.GridSpec(len(left), hcells)
# left
max_left_cols = max([self._shape(l)[1] for l in left])
max_left_rows = max([self._shape(l)[0] for l in left])
for i, (l, label) in enumerate(zip(left, labels)):
ax = fig.add_subplot(gs[i, 0:max_left_cols])
self._make_table(ax, l, title=label,
height=1.0 / max_left_rows)
# right
ax = plt.subplot(gs[:, max_left_cols:])
self._make_table(ax, right, title='Result', height=1.05 / vcells)
fig.subplots_adjust(top=0.9, bottom=0.05, left=0.05, right=0.95)
else:
max_rows = max([self._shape(df)[0] for df in left + [right]])
height = 1.0 / np.max(max_rows)
gs = gridspec.GridSpec(1, hcells)
# left
i = 0
for l, label in zip(left, labels):
sp = self._shape(l)
ax = fig.add_subplot(gs[0, i:i + sp[1]])
self._make_table(ax, l, title=label, height=height)
i += sp[1]
# right
ax = plt.subplot(gs[0, i:])
self._make_table(ax, right, title='Result', height=height)
fig.subplots_adjust(top=0.85, bottom=0.05, left=0.05, right=0.95)
return fig
def _conv(self, data):
"""Convert each input to appropriate for table outplot"""
if isinstance(data, pd.Series):
if data.name is None:
data = data.to_frame(name='')
else:
data = data.to_frame()
data = data.fillna('NaN')
return data
def _insert_index(self, data):
# insert is destructive
data = data.copy()
idx_nlevels = data.index.nlevels
if idx_nlevels == 1:
data.insert(0, 'Index', data.index)
else:
for i in range(idx_nlevels):
data.insert(i, 'Index{0}'.format(i),
data.index.get_level_values(i))
col_nlevels = data.columns.nlevels
if col_nlevels > 1:
col = data.columns.get_level_values(0)
values = [data.columns.get_level_values(i).values
for i in range(1, col_nlevels)]
col_df = pd.DataFrame(values)
data.columns = col_df.columns
data = pd.concat([col_df, data])
data.columns = col
return data
def _make_table(self, ax, df, title, height=None):
if df is None:
ax.set_visible(False)
return
import pandas.tools.plotting as plotting
idx_nlevels = df.index.nlevels
col_nlevels = df.columns.nlevels
# must be convert here to get index levels for colorization
df = self._insert_index(df)
tb = plotting.table(ax, df, loc=9)
tb.set_fontsize(self.font_size)
if height is None:
height = 1.0 / (len(df) + 1)
props = tb.properties()
for (r, c), cell in compat.iteritems(props['celld']):
if c == -1:
cell.set_visible(False)
elif r < col_nlevels and c < idx_nlevels:
cell.set_visible(False)
elif r < col_nlevels or c < idx_nlevels:
cell.set_facecolor('#AAAAAA')
cell.set_height(height)
ax.set_title(title, size=self.font_size)
ax.axis('off')
if __name__ == "__main__":
import matplotlib.pyplot as plt
p = TablePlotter()
df1 = pd.DataFrame({'A': [10, 11, 12],
'B': [20, 21, 22],
'C': [30, 31, 32]})
df2 = pd.DataFrame({'A': [10, 12],
'C': [30, 32]})
p.plot([df1, df2], pd.concat([df1, df2]),
labels=['df1', 'df2'], vertical=True)
plt.show()
df3 = pd.DataFrame({'X': [10, 12],
'Z': [30, 32]})
p.plot([df1, df3], pd.concat([df1, df3], axis=1),
labels=['df1', 'df2'], vertical=False)
plt.show()
idx = pd.MultiIndex.from_tuples([(1, 'A'), (1, 'B'), (1, 'C'),
(2, 'A'), (2, 'B'), (2, 'C')])
col = pd.MultiIndex.from_tuples([(1, 'A'), (1, 'B')])
df3 = pd.DataFrame({'v1': [1, 2, 3, 4, 5, 6],
'v2': [5, 6, 7, 8, 9, 10]},
index=idx)
df3.columns = col
p.plot(df3, df3, labels=['df3'])
plt.show()
| mit |
robmarano/nyu-python | course-2/session-7/pandas/df_basics.py | 1 | 2677 | #!/usr/bin/env python3
try:
# for Python 2.x
import StringIO
except:
# for Python 3.x
import io
import csv
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import re
# define data
csv_input = """timestamp,title,reqid
2016-07-23 11:05:08,SVP,2356556-AS
2016-12-12 01:23:33,VP,5567894-AS
2016-09-13 12:43:33,VP,3455673-AS
2016-09-13 19:43:33,EVP,8455673-AS
2016-09-30 11:43:33,VP,9455673-AS
2016-08-02 01:23:33,VP,5698765-AS
2016-04-22 01:23:33,VP,1234556-AS
"""
# load data
try:
# for Python 2.x
f = StringIO.StringIO(csv_input)
except:
# for Python 3.x
f = io.StringIO(csv_input)
reader = csv.reader(f, delimiter=',')
for row in reader:
print('\t'.join(row))
# reset file pointer position to beginning of file
f.seek(0)
# create pandas dataframe
#df = pd.read_csv(io.StringIO(csv_input))
df = pd.read_csv(f)
print(df.head())
print(df.info())
print(df)
df['date'] = pd.DatetimeIndex(df.timestamp).normalize()
print(df)
print(df.index)
#df = df.drop('timestamp',axis=1)
df.drop('timestamp', axis=1, inplace=True)
#df = df.reindex(df.reqid, fill_value=0)
#df = df.reindex(df.reqid, method='bfill')
#print(df)
#print(df.index)
#i = df[((df.title == 'SVP') & (df.reqid == '3455673-AS'))].index
#df.drop(df.index[0],inplace=True)
#df.drop(i,inplace=True)
#i = df.index[0]
#df = df.drop(i)
#print(df)
#print(i)
print(type(df['date'][0]))
#df = df.sort_values(by='date',axis=0,ascending=True)
df.sort_values(by='date',axis=0,ascending=True,inplace=True)
print(df)
df['weekday'] = df['date'].apply( lambda x: x.dayofweek)
# setup date processing
now_string = '2016-10-01 08:01:20'
past_by_days = 30
time_delta = pd.to_timedelta('{} days'.format(past_by_days))
print(time_delta)
#now = pd.tslib.Timestamp('2016-10-01 08:01:20')
now = pd.Timestamp(now_string)
now_norm = now.normalize()
print(now_norm)
now_start = now_norm - time_delta
print(now_start)
# process
ddf = df.loc[((df['date'] >= now_start) & (df['date'] <= now_norm))]
print(ddf)
print('number of observations found in filtered df = {}'.format(len(ddf)))
print(len(ddf.columns))
# histogram of number of observations by date
df_grouped_date = df.groupby(['date'])
df_date_count = df_grouped_date['reqid'].aggregate(['count'])
#df_date_count = df_grouped_date.aggregate(['count'])
print(df_date_count)
#exclude_cols = ['title count']
#df_date_count.ix[:, df_date_count.columns.difference(exclude_cols)].plot(kind='bar')
df_date_count.ix[:, df_date_count.columns].plot(kind='bar')
plt.legend(loc='best').get_texts()[0].set_text('Reqs Added Per Day')
file_name = 'myBar'
file_name = re.sub('\s+','_',file_name)
plt.savefig(file_name)
plt.show()
| mit |
nmayorov/scikit-learn | examples/svm/plot_svm_nonlinear.py | 268 | 1091 | """
==============
Non-linear SVM
==============
Perform binary classification using non-linear SVC
with RBF kernel. The target to predict is a XOR of the
inputs.
The color map illustrates the decision function learned by the SVC.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn import svm
xx, yy = np.meshgrid(np.linspace(-3, 3, 500),
np.linspace(-3, 3, 500))
np.random.seed(0)
X = np.random.randn(300, 2)
Y = np.logical_xor(X[:, 0] > 0, X[:, 1] > 0)
# fit the model
clf = svm.NuSVC()
clf.fit(X, Y)
# plot the decision function for each datapoint on the grid
Z = clf.decision_function(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
plt.imshow(Z, interpolation='nearest',
extent=(xx.min(), xx.max(), yy.min(), yy.max()), aspect='auto',
origin='lower', cmap=plt.cm.PuOr_r)
contours = plt.contour(xx, yy, Z, levels=[0], linewidths=2,
linetypes='--')
plt.scatter(X[:, 0], X[:, 1], s=30, c=Y, cmap=plt.cm.Paired)
plt.xticks(())
plt.yticks(())
plt.axis([-3, 3, -3, 3])
plt.show()
| bsd-3-clause |
Nyker510/scikit-learn | examples/text/hashing_vs_dict_vectorizer.py | 284 | 3265 | """
===========================================
FeatureHasher and DictVectorizer Comparison
===========================================
Compares FeatureHasher and DictVectorizer by using both to vectorize
text documents.
The example demonstrates syntax and speed only; it doesn't actually do
anything useful with the extracted vectors. See the example scripts
{document_classification_20newsgroups,clustering}.py for actual learning
on text documents.
A discrepancy between the number of terms reported for DictVectorizer and
for FeatureHasher is to be expected due to hash collisions.
"""
# Author: Lars Buitinck <L.J.Buitinck@uva.nl>
# License: BSD 3 clause
from __future__ import print_function
from collections import defaultdict
import re
import sys
from time import time
import numpy as np
from sklearn.datasets import fetch_20newsgroups
from sklearn.feature_extraction import DictVectorizer, FeatureHasher
def n_nonzero_columns(X):
"""Returns the number of non-zero columns in a CSR matrix X."""
return len(np.unique(X.nonzero()[1]))
def tokens(doc):
"""Extract tokens from doc.
This uses a simple regex to break strings into tokens. For a more
principled approach, see CountVectorizer or TfidfVectorizer.
"""
return (tok.lower() for tok in re.findall(r"\w+", doc))
def token_freqs(doc):
"""Extract a dict mapping tokens from doc to their frequencies."""
freq = defaultdict(int)
for tok in tokens(doc):
freq[tok] += 1
return freq
categories = [
'alt.atheism',
'comp.graphics',
'comp.sys.ibm.pc.hardware',
'misc.forsale',
'rec.autos',
'sci.space',
'talk.religion.misc',
]
# Uncomment the following line to use a larger set (11k+ documents)
#categories = None
print(__doc__)
print("Usage: %s [n_features_for_hashing]" % sys.argv[0])
print(" The default number of features is 2**18.")
print()
try:
n_features = int(sys.argv[1])
except IndexError:
n_features = 2 ** 18
except ValueError:
print("not a valid number of features: %r" % sys.argv[1])
sys.exit(1)
print("Loading 20 newsgroups training data")
raw_data = fetch_20newsgroups(subset='train', categories=categories).data
data_size_mb = sum(len(s.encode('utf-8')) for s in raw_data) / 1e6
print("%d documents - %0.3fMB" % (len(raw_data), data_size_mb))
print()
print("DictVectorizer")
t0 = time()
vectorizer = DictVectorizer()
vectorizer.fit_transform(token_freqs(d) for d in raw_data)
duration = time() - t0
print("done in %fs at %0.3fMB/s" % (duration, data_size_mb / duration))
print("Found %d unique terms" % len(vectorizer.get_feature_names()))
print()
print("FeatureHasher on frequency dicts")
t0 = time()
hasher = FeatureHasher(n_features=n_features)
X = hasher.transform(token_freqs(d) for d in raw_data)
duration = time() - t0
print("done in %fs at %0.3fMB/s" % (duration, data_size_mb / duration))
print("Found %d unique terms" % n_nonzero_columns(X))
print()
print("FeatureHasher on raw tokens")
t0 = time()
hasher = FeatureHasher(n_features=n_features, input_type="string")
X = hasher.transform(tokens(d) for d in raw_data)
duration = time() - t0
print("done in %fs at %0.3fMB/s" % (duration, data_size_mb / duration))
print("Found %d unique terms" % n_nonzero_columns(X))
| bsd-3-clause |
neale/CS-program | 434-MachineLearning/final_project/linearClassifier/sklearn/datasets/__init__.py | 72 | 3807 | """
The :mod:`sklearn.datasets` module includes utilities to load datasets,
including methods to load and fetch popular reference datasets. It also
features some artificial data generators.
"""
from .base import load_diabetes
from .base import load_digits
from .base import load_files
from .base import load_iris
from .base import load_breast_cancer
from .base import load_linnerud
from .base import load_boston
from .base import get_data_home
from .base import clear_data_home
from .base import load_sample_images
from .base import load_sample_image
from .covtype import fetch_covtype
from .kddcup99 import fetch_kddcup99
from .mlcomp import load_mlcomp
from .lfw import load_lfw_pairs
from .lfw import load_lfw_people
from .lfw import fetch_lfw_pairs
from .lfw import fetch_lfw_people
from .twenty_newsgroups import fetch_20newsgroups
from .twenty_newsgroups import fetch_20newsgroups_vectorized
from .mldata import fetch_mldata, mldata_filename
from .samples_generator import make_classification
from .samples_generator import make_multilabel_classification
from .samples_generator import make_hastie_10_2
from .samples_generator import make_regression
from .samples_generator import make_blobs
from .samples_generator import make_moons
from .samples_generator import make_circles
from .samples_generator import make_friedman1
from .samples_generator import make_friedman2
from .samples_generator import make_friedman3
from .samples_generator import make_low_rank_matrix
from .samples_generator import make_sparse_coded_signal
from .samples_generator import make_sparse_uncorrelated
from .samples_generator import make_spd_matrix
from .samples_generator import make_swiss_roll
from .samples_generator import make_s_curve
from .samples_generator import make_sparse_spd_matrix
from .samples_generator import make_gaussian_quantiles
from .samples_generator import make_biclusters
from .samples_generator import make_checkerboard
from .svmlight_format import load_svmlight_file
from .svmlight_format import load_svmlight_files
from .svmlight_format import dump_svmlight_file
from .olivetti_faces import fetch_olivetti_faces
from .species_distributions import fetch_species_distributions
from .california_housing import fetch_california_housing
from .rcv1 import fetch_rcv1
__all__ = ['clear_data_home',
'dump_svmlight_file',
'fetch_20newsgroups',
'fetch_20newsgroups_vectorized',
'fetch_lfw_pairs',
'fetch_lfw_people',
'fetch_mldata',
'fetch_olivetti_faces',
'fetch_species_distributions',
'fetch_california_housing',
'fetch_covtype',
'fetch_rcv1',
'fetch_kddcup99',
'get_data_home',
'load_boston',
'load_diabetes',
'load_digits',
'load_files',
'load_iris',
'load_breast_cancer',
'load_lfw_pairs',
'load_lfw_people',
'load_linnerud',
'load_mlcomp',
'load_sample_image',
'load_sample_images',
'load_svmlight_file',
'load_svmlight_files',
'make_biclusters',
'make_blobs',
'make_circles',
'make_classification',
'make_checkerboard',
'make_friedman1',
'make_friedman2',
'make_friedman3',
'make_gaussian_quantiles',
'make_hastie_10_2',
'make_low_rank_matrix',
'make_moons',
'make_multilabel_classification',
'make_regression',
'make_s_curve',
'make_sparse_coded_signal',
'make_sparse_spd_matrix',
'make_sparse_uncorrelated',
'make_spd_matrix',
'make_swiss_roll',
'mldata_filename']
| unlicense |
ma-compbio/SPEID | pairwise/read_FIMO_results.py | 2 | 5408 | import numpy as np
import csv
from sklearn.metrics import average_precision_score
from keras.optimizers import Adam # needed to compile prediction model
import h5py
import load_data_pairs as ld # my own scripts for loading data
import build_small_model as bm
import util
fimo_root = '/home/sss1/Desktop/projects/DeepInteractions/pairwise/FIMO/'
data_path = '/home/sss1/Desktop/projects/DeepInteractions/data/uniform_len/original/all_data.h5'
cell_lines = ['GM12878', 'HeLa-S3', 'HUVEC', 'IMR90', 'K562', 'NHEK']
data_types = ['enhancers', 'promoters']
# num_repeats = 5 # number of i.i.d. trials to run; too slow to do :(
random_window_length = 20 # number of bp to randomize at each feature occurence
# Randomize each appearance of the pattern pattern in the data
def randomize_window(sequence):
for base_idx in range(np.shape(sequence)[0]):
sequence[base_idx] = np.zeros(4)
sequence[base_idx, np.random.randint(0, 4)] = 1
# Returns a deep copy of the data, with motif occurrences randomized out.
# A deep copy is made because this is much faster than reloading the data for
# every motif.
# data: (num_sequences X sequence_length X 4) 3-tensor of num_sequences
# one-hot encoded nucleotide sequences of equal-length sequence_length
# motifs_idxs: list of (sample_idx, start_idx, stop_idx) triples
def replace_motifs_in_data(data, motif_idxs):
data_copy = np.copy(data)
for (sample_idx, motif_start, motif_stop) in idxs:
mid = (motif_start + motif_stop)/2
start = max(0, mid - (random_window_length/2))
stop = min(np.shape(data)[1], start + random_window_length)
randomize_window(data_copy[sample_idx, start:stop, :])
return data_copy
for cell_line in cell_lines:
for data_type in data_types:
fimo_path = fimo_root + cell_line + '_' + data_type + '_all_retest/fimo.txt'
# data_path = data_root + cell_line + '/' + cell_line + '_ep_split.h5'
matches = dict() # dict mapping motif_names to lists of (sample_idx, start_idx, stop_idx) triples
print 'Reading and processing FIMO output...'
with open(fimo_path, 'rb') as csv_file:
reader = csv.reader(csv_file, delimiter='\t')
row_idx = -1
for row in reader:
row_idx += 1
if row_idx == 0: # skip header row
continue
motif_name = row[0]
if not motif_name in matches: # if this is the first match of that motif
matches[motif_name] = []
sample_idx = int(row[1])
motif_start = int(row[2])
motif_stop = int(row[3])
matches[motif_name].append((sample_idx, motif_start, motif_stop))
print 'Identified ' + str(len(matches)) + ' distinct motifs.'
print 'Loading original data...'
# X_enhancers_original, X_promoters_original, y = ld.load_hdf5_ep_split(data_path)
with h5py.File(data_path, 'r') as hf:
X_enhancers_original = np.array(hf.get(cell_line + '_X_enhancers')).transpose((0, 2, 1))
X_promoters_original = np.array(hf.get(cell_line + '_X_promoters')).transpose((0, 2, 1))
y = np.array(hf.get(cell_line + 'labels'))
print 'np.shape(X_enhancers_original): ' + str(np.shape(X_enhancers_original))
print 'np.shape(X_promoters_original): ' + str(np.shape(X_promoters_original))
print 'np.shape(y): ' + str(np.shape(y))
model = bm.build_model(use_JASPAR = False)
print 'Compiling model...'
opt = Adam(lr = 1e-5)
model.compile(loss = 'binary_crossentropy',
optimizer = opt,
metrics = ["accuracy"])
print 'Loading ' + cell_line + ' ' + data_type + ' model weights...'
model.load_weights('/home/sss1/Desktop/projects/DeepInteractions/weights/' + cell_line + '-basic.hdf5')
out_root = '/home/sss1/Desktop/projects/DeepInteractions/feature_importances/SPEID/from_HOCOMOCO_motifs/'
out_path = out_root + cell_line + '_' + data_type + '_feature_importance.csv'
print 'Running predictions on original data'
y_score = model.predict([X_enhancers_original, X_promoters_original], batch_size = 100, verbose = 1)
true_AUPR = average_precision_score(y, y_score)
print 'True AUPR is ' + str(true_AUPR)
true_MS = y_score.mean()
print 'True MS is ' + str(true_MS)
with open(out_path, 'wb') as csv_file:
writer = csv.writer(csv_file, delimiter = ',')
writer.writerow(['Motif Name', 'Motif Count', 'AUPR Difference', 'MS Difference'])
for motif, idxs in matches.iteritems():
print 'Randomizing ' + str(len(idxs)) + ' occurrences of motif ' + motif + ' in ' + cell_line + ' ' + data_type + '...'
if data_type == 'enhancers':
X_enhancers = replace_motifs_in_data(X_enhancers_original, idxs)
X_promoters = X_promoters_original
elif data_type == 'promoters':
X_enhancers = X_enhancers_original
X_promoters = replace_motifs_in_data(X_promoters_original, idxs)
else:
raise ValueError
print 'Running predictions on motif ' + motif + '...'
y_score = model.predict([X_enhancers, X_promoters], batch_size = 200, verbose = 1)
AUPR = average_precision_score(y, y_score)
print 'AUPR after removing motif ' + motif + ' was ' + str(AUPR) + '\n'
MS = y_score.mean()
print 'MS after removing motif ' + motif + ' was ' + str(MS) + '\n'
writer.writerow([motif, str(len(idxs)), str(true_AUPR - AUPR), str(true_MS - MS)])
| gpl-3.0 |
neale/CS-program | 434-MachineLearning/final_project/linearClassifier/sklearn/semi_supervised/label_propagation.py | 9 | 15941 | # coding=utf8
"""
Label propagation in the context of this module refers to a set of
semisupervised classification algorithms. In the high level, these algorithms
work by forming a fully-connected graph between all points given and solving
for the steady-state distribution of labels at each point.
These algorithms perform very well in practice. The cost of running can be very
expensive, at approximately O(N^3) where N is the number of (labeled and
unlabeled) points. The theory (why they perform so well) is motivated by
intuitions from random walk algorithms and geometric relationships in the data.
For more information see the references below.
Model Features
--------------
Label clamping:
The algorithm tries to learn distributions of labels over the dataset. In the
"Hard Clamp" mode, the true ground labels are never allowed to change. They
are clamped into position. In the "Soft Clamp" mode, they are allowed some
wiggle room, but some alpha of their original value will always be retained.
Hard clamp is the same as soft clamping with alpha set to 1.
Kernel:
A function which projects a vector into some higher dimensional space. This
implementation supprots RBF and KNN kernels. Using the RBF kernel generates
a dense matrix of size O(N^2). KNN kernel will generate a sparse matrix of
size O(k*N) which will run much faster. See the documentation for SVMs for
more info on kernels.
Examples
--------
>>> from sklearn import datasets
>>> from sklearn.semi_supervised import LabelPropagation
>>> label_prop_model = LabelPropagation()
>>> iris = datasets.load_iris()
>>> random_unlabeled_points = np.where(np.random.randint(0, 2,
... size=len(iris.target)))
>>> labels = np.copy(iris.target)
>>> labels[random_unlabeled_points] = -1
>>> label_prop_model.fit(iris.data, labels)
... # doctest: +NORMALIZE_WHITESPACE +ELLIPSIS
LabelPropagation(...)
Notes
-----
References:
[1] Yoshua Bengio, Olivier Delalleau, Nicolas Le Roux. In Semi-Supervised
Learning (2006), pp. 193-216
[2] Olivier Delalleau, Yoshua Bengio, Nicolas Le Roux. Efficient
Non-Parametric Function Induction in Semi-Supervised Learning. AISTAT 2005
"""
# Authors: Clay Woolam <clay@woolam.org>
# Licence: BSD
from abc import ABCMeta, abstractmethod
import numpy as np
from scipy import sparse
from ..base import BaseEstimator, ClassifierMixin
from ..externals import six
from ..metrics.pairwise import rbf_kernel
from ..neighbors.unsupervised import NearestNeighbors
from ..utils.extmath import safe_sparse_dot
from ..utils.graph import graph_laplacian
from ..utils.multiclass import check_classification_targets
from ..utils.validation import check_X_y, check_is_fitted, check_array
# Helper functions
def _not_converged(y_truth, y_prediction, tol=1e-3):
"""basic convergence check"""
return np.abs(y_truth - y_prediction).sum() > tol
class BaseLabelPropagation(six.with_metaclass(ABCMeta, BaseEstimator,
ClassifierMixin)):
"""Base class for label propagation module.
Parameters
----------
kernel : {'knn', 'rbf'}
String identifier for kernel function to use.
Only 'rbf' and 'knn' kernels are currently supported..
gamma : float
Parameter for rbf kernel
alpha : float
Clamping factor
max_iter : float
Change maximum number of iterations allowed
tol : float
Convergence tolerance: threshold to consider the system at steady
state
n_neighbors : integer > 0
Parameter for knn kernel
n_jobs : int, optional (default = 1)
The number of parallel jobs to run.
If ``-1``, then the number of jobs is set to the number of CPU cores.
"""
def __init__(self, kernel='rbf', gamma=20, n_neighbors=7,
alpha=1, max_iter=30, tol=1e-3, n_jobs=1):
self.max_iter = max_iter
self.tol = tol
# kernel parameters
self.kernel = kernel
self.gamma = gamma
self.n_neighbors = n_neighbors
# clamping factor
self.alpha = alpha
self.n_jobs = n_jobs
def _get_kernel(self, X, y=None):
if self.kernel == "rbf":
if y is None:
return rbf_kernel(X, X, gamma=self.gamma)
else:
return rbf_kernel(X, y, gamma=self.gamma)
elif self.kernel == "knn":
if self.nn_fit is None:
self.nn_fit = NearestNeighbors(self.n_neighbors,
n_jobs=self.n_jobs).fit(X)
if y is None:
return self.nn_fit.kneighbors_graph(self.nn_fit._fit_X,
self.n_neighbors,
mode='connectivity')
else:
return self.nn_fit.kneighbors(y, return_distance=False)
else:
raise ValueError("%s is not a valid kernel. Only rbf and knn"
" are supported at this time" % self.kernel)
@abstractmethod
def _build_graph(self):
raise NotImplementedError("Graph construction must be implemented"
" to fit a label propagation model.")
def predict(self, X):
"""Performs inductive inference across the model.
Parameters
----------
X : array_like, shape = [n_samples, n_features]
Returns
-------
y : array_like, shape = [n_samples]
Predictions for input data
"""
probas = self.predict_proba(X)
return self.classes_[np.argmax(probas, axis=1)].ravel()
def predict_proba(self, X):
"""Predict probability for each possible outcome.
Compute the probability estimates for each single sample in X
and each possible outcome seen during training (categorical
distribution).
Parameters
----------
X : array_like, shape = [n_samples, n_features]
Returns
-------
probabilities : array, shape = [n_samples, n_classes]
Normalized probability distributions across
class labels
"""
check_is_fitted(self, 'X_')
X_2d = check_array(X, accept_sparse=['csc', 'csr', 'coo', 'dok',
'bsr', 'lil', 'dia'])
weight_matrices = self._get_kernel(self.X_, X_2d)
if self.kernel == 'knn':
probabilities = []
for weight_matrix in weight_matrices:
ine = np.sum(self.label_distributions_[weight_matrix], axis=0)
probabilities.append(ine)
probabilities = np.array(probabilities)
else:
weight_matrices = weight_matrices.T
probabilities = np.dot(weight_matrices, self.label_distributions_)
normalizer = np.atleast_2d(np.sum(probabilities, axis=1)).T
probabilities /= normalizer
return probabilities
def fit(self, X, y):
"""Fit a semi-supervised label propagation model based
All the input data is provided matrix X (labeled and unlabeled)
and corresponding label matrix y with a dedicated marker value for
unlabeled samples.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
A {n_samples by n_samples} size matrix will be created from this
y : array_like, shape = [n_samples]
n_labeled_samples (unlabeled points are marked as -1)
All unlabeled samples will be transductively assigned labels
Returns
-------
self : returns an instance of self.
"""
X, y = check_X_y(X, y)
self.X_ = X
check_classification_targets(y)
# actual graph construction (implementations should override this)
graph_matrix = self._build_graph()
# label construction
# construct a categorical distribution for classification only
classes = np.unique(y)
classes = (classes[classes != -1])
self.classes_ = classes
n_samples, n_classes = len(y), len(classes)
y = np.asarray(y)
unlabeled = y == -1
clamp_weights = np.ones((n_samples, 1))
clamp_weights[unlabeled, 0] = self.alpha
# initialize distributions
self.label_distributions_ = np.zeros((n_samples, n_classes))
for label in classes:
self.label_distributions_[y == label, classes == label] = 1
y_static = np.copy(self.label_distributions_)
if self.alpha > 0.:
y_static *= 1 - self.alpha
y_static[unlabeled] = 0
l_previous = np.zeros((self.X_.shape[0], n_classes))
remaining_iter = self.max_iter
if sparse.isspmatrix(graph_matrix):
graph_matrix = graph_matrix.tocsr()
while (_not_converged(self.label_distributions_, l_previous, self.tol)
and remaining_iter > 1):
l_previous = self.label_distributions_
self.label_distributions_ = safe_sparse_dot(
graph_matrix, self.label_distributions_)
# clamp
self.label_distributions_ = np.multiply(
clamp_weights, self.label_distributions_) + y_static
remaining_iter -= 1
normalizer = np.sum(self.label_distributions_, axis=1)[:, np.newaxis]
self.label_distributions_ /= normalizer
# set the transduction item
transduction = self.classes_[np.argmax(self.label_distributions_,
axis=1)]
self.transduction_ = transduction.ravel()
self.n_iter_ = self.max_iter - remaining_iter
return self
class LabelPropagation(BaseLabelPropagation):
"""Label Propagation classifier
Read more in the :ref:`User Guide <label_propagation>`.
Parameters
----------
kernel : {'knn', 'rbf'}
String identifier for kernel function to use.
Only 'rbf' and 'knn' kernels are currently supported..
gamma : float
Parameter for rbf kernel
n_neighbors : integer > 0
Parameter for knn kernel
alpha : float
Clamping factor
max_iter : float
Change maximum number of iterations allowed
tol : float
Convergence tolerance: threshold to consider the system at steady
state
Attributes
----------
X_ : array, shape = [n_samples, n_features]
Input array.
classes_ : array, shape = [n_classes]
The distinct labels used in classifying instances.
label_distributions_ : array, shape = [n_samples, n_classes]
Categorical distribution for each item.
transduction_ : array, shape = [n_samples]
Label assigned to each item via the transduction.
n_iter_ : int
Number of iterations run.
Examples
--------
>>> from sklearn import datasets
>>> from sklearn.semi_supervised import LabelPropagation
>>> label_prop_model = LabelPropagation()
>>> iris = datasets.load_iris()
>>> random_unlabeled_points = np.where(np.random.randint(0, 2,
... size=len(iris.target)))
>>> labels = np.copy(iris.target)
>>> labels[random_unlabeled_points] = -1
>>> label_prop_model.fit(iris.data, labels)
... # doctest: +NORMALIZE_WHITESPACE +ELLIPSIS
LabelPropagation(...)
References
----------
Xiaojin Zhu and Zoubin Ghahramani. Learning from labeled and unlabeled data
with label propagation. Technical Report CMU-CALD-02-107, Carnegie Mellon
University, 2002 http://pages.cs.wisc.edu/~jerryzhu/pub/CMU-CALD-02-107.pdf
See Also
--------
LabelSpreading : Alternate label propagation strategy more robust to noise
"""
def _build_graph(self):
"""Matrix representing a fully connected graph between each sample
This basic implementation creates a non-stochastic affinity matrix, so
class distributions will exceed 1 (normalization may be desired).
"""
if self.kernel == 'knn':
self.nn_fit = None
affinity_matrix = self._get_kernel(self.X_)
normalizer = affinity_matrix.sum(axis=0)
if sparse.isspmatrix(affinity_matrix):
affinity_matrix.data /= np.diag(np.array(normalizer))
else:
affinity_matrix /= normalizer[:, np.newaxis]
return affinity_matrix
class LabelSpreading(BaseLabelPropagation):
"""LabelSpreading model for semi-supervised learning
This model is similar to the basic Label Propgation algorithm,
but uses affinity matrix based on the normalized graph Laplacian
and soft clamping across the labels.
Read more in the :ref:`User Guide <label_propagation>`.
Parameters
----------
kernel : {'knn', 'rbf'}
String identifier for kernel function to use.
Only 'rbf' and 'knn' kernels are currently supported.
gamma : float
parameter for rbf kernel
n_neighbors : integer > 0
parameter for knn kernel
alpha : float
clamping factor
max_iter : float
maximum number of iterations allowed
tol : float
Convergence tolerance: threshold to consider the system at steady
state
n_jobs : int, optional (default = 1)
The number of parallel jobs to run.
If ``-1``, then the number of jobs is set to the number of CPU cores.
Attributes
----------
X_ : array, shape = [n_samples, n_features]
Input array.
classes_ : array, shape = [n_classes]
The distinct labels used in classifying instances.
label_distributions_ : array, shape = [n_samples, n_classes]
Categorical distribution for each item.
transduction_ : array, shape = [n_samples]
Label assigned to each item via the transduction.
n_iter_ : int
Number of iterations run.
Examples
--------
>>> from sklearn import datasets
>>> from sklearn.semi_supervised import LabelSpreading
>>> label_prop_model = LabelSpreading()
>>> iris = datasets.load_iris()
>>> random_unlabeled_points = np.where(np.random.randint(0, 2,
... size=len(iris.target)))
>>> labels = np.copy(iris.target)
>>> labels[random_unlabeled_points] = -1
>>> label_prop_model.fit(iris.data, labels)
... # doctest: +NORMALIZE_WHITESPACE +ELLIPSIS
LabelSpreading(...)
References
----------
Dengyong Zhou, Olivier Bousquet, Thomas Navin Lal, Jason Weston,
Bernhard Schoelkopf. Learning with local and global consistency (2004)
http://citeseer.ist.psu.edu/viewdoc/summary?doi=10.1.1.115.3219
See Also
--------
LabelPropagation : Unregularized graph based semi-supervised learning
"""
def __init__(self, kernel='rbf', gamma=20, n_neighbors=7, alpha=0.2,
max_iter=30, tol=1e-3, n_jobs=1):
# this one has different base parameters
super(LabelSpreading, self).__init__(kernel=kernel, gamma=gamma,
n_neighbors=n_neighbors,
alpha=alpha, max_iter=max_iter,
tol=tol,
n_jobs=n_jobs)
def _build_graph(self):
"""Graph matrix for Label Spreading computes the graph laplacian"""
# compute affinity matrix (or gram matrix)
if self.kernel == 'knn':
self.nn_fit = None
n_samples = self.X_.shape[0]
affinity_matrix = self._get_kernel(self.X_)
laplacian = graph_laplacian(affinity_matrix, normed=True)
laplacian = -laplacian
if sparse.isspmatrix(laplacian):
diag_mask = (laplacian.row == laplacian.col)
laplacian.data[diag_mask] = 0.0
else:
laplacian.flat[::n_samples + 1] = 0.0 # set diag to 0.0
return laplacian
| unlicense |
vitaly-krugl/nupic | external/linux32/lib/python2.6/site-packages/matplotlib/backends/backend_gtkagg.py | 70 | 4184 | """
Render to gtk from agg
"""
from __future__ import division
import os
import matplotlib
from matplotlib.figure import Figure
from matplotlib.backends.backend_agg import FigureCanvasAgg
from matplotlib.backends.backend_gtk import gtk, FigureManagerGTK, FigureCanvasGTK,\
show, draw_if_interactive,\
error_msg_gtk, NavigationToolbar, PIXELS_PER_INCH, backend_version, \
NavigationToolbar2GTK
from matplotlib.backends._gtkagg import agg_to_gtk_drawable
DEBUG = False
class NavigationToolbar2GTKAgg(NavigationToolbar2GTK):
def _get_canvas(self, fig):
return FigureCanvasGTKAgg(fig)
class FigureManagerGTKAgg(FigureManagerGTK):
def _get_toolbar(self, canvas):
# must be inited after the window, drawingArea and figure
# attrs are set
if matplotlib.rcParams['toolbar']=='classic':
toolbar = NavigationToolbar (canvas, self.window)
elif matplotlib.rcParams['toolbar']=='toolbar2':
toolbar = NavigationToolbar2GTKAgg (canvas, self.window)
else:
toolbar = None
return toolbar
def new_figure_manager(num, *args, **kwargs):
"""
Create a new figure manager instance
"""
if DEBUG: print 'backend_gtkagg.new_figure_manager'
FigureClass = kwargs.pop('FigureClass', Figure)
thisFig = FigureClass(*args, **kwargs)
canvas = FigureCanvasGTKAgg(thisFig)
return FigureManagerGTKAgg(canvas, num)
if DEBUG: print 'backend_gtkagg.new_figure_manager done'
class FigureCanvasGTKAgg(FigureCanvasGTK, FigureCanvasAgg):
filetypes = FigureCanvasGTK.filetypes.copy()
filetypes.update(FigureCanvasAgg.filetypes)
def configure_event(self, widget, event=None):
if DEBUG: print 'FigureCanvasGTKAgg.configure_event'
if widget.window is None:
return
try:
del self.renderer
except AttributeError:
pass
w,h = widget.window.get_size()
if w==1 or h==1: return # empty fig
# compute desired figure size in inches
dpival = self.figure.dpi
winch = w/dpival
hinch = h/dpival
self.figure.set_size_inches(winch, hinch)
self._need_redraw = True
self.resize_event()
if DEBUG: print 'FigureCanvasGTKAgg.configure_event end'
return True
def _render_figure(self, pixmap, width, height):
if DEBUG: print 'FigureCanvasGTKAgg.render_figure'
FigureCanvasAgg.draw(self)
if DEBUG: print 'FigureCanvasGTKAgg.render_figure pixmap', pixmap
#agg_to_gtk_drawable(pixmap, self.renderer._renderer, None)
buf = self.buffer_rgba(0,0)
ren = self.get_renderer()
w = int(ren.width)
h = int(ren.height)
pixbuf = gtk.gdk.pixbuf_new_from_data(
buf, gtk.gdk.COLORSPACE_RGB, True, 8, w, h, w*4)
pixmap.draw_pixbuf(pixmap.new_gc(), pixbuf, 0, 0, 0, 0, w, h,
gtk.gdk.RGB_DITHER_NONE, 0, 0)
if DEBUG: print 'FigureCanvasGTKAgg.render_figure done'
def blit(self, bbox=None):
if DEBUG: print 'FigureCanvasGTKAgg.blit'
if DEBUG: print 'FigureCanvasGTKAgg.blit', self._pixmap
agg_to_gtk_drawable(self._pixmap, self.renderer._renderer, bbox)
x, y, w, h = self.allocation
self.window.draw_drawable (self.style.fg_gc[self.state], self._pixmap,
0, 0, 0, 0, w, h)
if DEBUG: print 'FigureCanvasGTKAgg.done'
def print_png(self, filename, *args, **kwargs):
# Do this so we can save the resolution of figure in the PNG file
agg = self.switch_backends(FigureCanvasAgg)
return agg.print_png(filename, *args, **kwargs)
"""\
Traceback (most recent call last):
File "/home/titan/johnh/local/lib/python2.3/site-packages/matplotlib/backends/backend_gtk.py", line 304, in expose_event
self._render_figure(self._pixmap, w, h)
File "/home/titan/johnh/local/lib/python2.3/site-packages/matplotlib/backends/backend_gtkagg.py", line 77, in _render_figure
pixbuf = gtk.gdk.pixbuf_new_from_data(
ValueError: data length (3156672) is less then required by the other parameters (3160608)
"""
| agpl-3.0 |
BhallaLab/moose-examples | tutorials/ExcInhNet/ExcInhNet_Ostojic2014_Brunel2000_brian2.py | 2 | 8226 | '''
The LIF network is based on:
Ostojic, S. (2014).
Two types of asynchronous activity in networks of
excitatory and inhibitory spiking neurons.
Nat Neurosci 17, 594-600.
Key parameter to change is synaptic coupling J (mV).
Tested with Brian 1.4.1
Written by Aditya Gilra, CAMP 2014, Bangalore, 20 June, 2014.
Updated to match MOOSE implementation by Aditya Gilra, Jan, 2015.
Currently, simtime and dt are modified to compare across MOOSE, Brian1 and Brian2.
'''
#import modules and functions to be used
# 'from pylab import *' which imports:
# matplot like commands into the namespace, further
# also can use np. for numpy and mpl. for matplotlib
try:
from brian2 import * # importing brian also does:
except ImportError as e:
print( "[INFO ] brian2 is not found." )
quit()
#prefs.codegen.target='numpy'
#prefs.codegen.target='weave'
set_device('cpp_standalone')
import random
import time
np.random.seed(100) # set seed for reproducibility of simulations
random.seed(100) # set seed for reproducibility of simulations
# ###########################################
# Simulation parameters
# ###########################################
simdt = 0.01*ms
simtime = 10.0*second # Simulation time
defaultclock.dt = simdt # Brian's default sim time step
dt = defaultclock.dt/second # convert to value in seconds
# ###########################################
# Neuron model
# ###########################################
# equation: dv/dt=(1/taum)*(-(v-el))
# with spike when v>vt, reset to vr
el = -65.*mV # Resting potential
vt = -45.*mV # Spiking threshold
taum = 20.*ms # Membrane time constant
vr = -55.*mV # Reset potential
inp = 20.1*mV/taum # input I/C to each neuron
# same as setting el=-41 mV and inp=0
taur = 0.5*ms # Refractory period
taudelay = 0.5*ms + dt*second # synaptic delay
eqs_neurons='''
dv/dt=(1/taum)*(-(v-el))+inp : volt
'''
# ###########################################
# Network parameters: numbers
# ###########################################
N = 1000 # Total number of neurons
fexc = 0.8 # Fraction of exc neurons
NE = int(fexc*N) # Number of excitatory cells
NI = N-NE # Number of inhibitory cells
# ###########################################
# Network parameters: synapses
# ###########################################
C = 100 # Number of incoming connections on each neuron (exc or inh)
fC = fexc # fraction fC incoming connections are exc, rest inhibitory
excC = int(fC*C) # number of exc incoming connections
J = 0.8*mV # exc strength is J (in mV as we add to voltage)
# Critical J is ~ 0.45 mV in paper for N = 1000, C = 1000
g = 5.0 # -gJ is the inh strength. For exc-inh balance g>~f(1-f)=4
# ###########################################
# Initialize neuron (sub)groups
# ###########################################
P=NeuronGroup(N,model=eqs_neurons,\
threshold='v>=vt',reset='v=vr',refractory=taur,method='euler')
# not distributing uniformly to ensure match with MOOSE
#Pe.v = uniform(el,vt+10*mV,NE)
#Pi.v = uniform(el,vt+10*mV,NI)
P.v = linspace(el/mV-20,vt/mV,N)*mV
# ###########################################
# Connecting the network
# ###########################################
sparseness_e = fC*C/float(NE)
sparseness_i = (1-fC)*C/float(NI)
# Follow Dale's law -- exc (inh) neurons only have +ve (-ve) synapses
# hence need to set w correctly (always set after creating connections
con = Synapses(P,P,'w:volt',pre='v_post+=w',method='euler')
# I don't use Brian's connect_random,
# instead I use the same algorithm and seed as in the MOOSE version
#con_e.connect_random(sparseness=sparseness_e)
#con_i.connect_random(sparseness=sparseness_i)
## Connections from some Exc/Inh neurons to each neuron
random.seed(100) # set seed for reproducibility of simulations
conn_i = []
conn_j = []
for j in range(0,N):
## draw excC number of neuron indices out of NmaxExc neurons
preIdxsE = random.sample(list(range(NE)),excC)
## draw inhC=C-excC number of neuron indices out of inhibitory neurons
preIdxsI = random.sample(list(range(NE,N)),C-excC)
## connect these presynaptically to i-th post-synaptic neuron
## choose the synapses object based on whether post-syn nrn is exc or inh
conn_i += preIdxsE
conn_j += [j]*excC
conn_i += preIdxsI
conn_j += [j]*(C-excC)
con.connect(conn_i,conn_j)
con.delay = taudelay
con.w['i<NE'] = J
con.w['i>=NE'] = -g*J
# ###########################################
# Setting up monitors
# ###########################################
Nmon = N
sm = SpikeMonitor(P)
# Population monitor
popm = PopulationRateMonitor(P)
# voltage monitor
sm_vm = StateMonitor(P,'v',record=list(range(10))+list(range(NE,NE+10)))
# ###########################################
# Simulate
# ###########################################
print(("Setup complete, running for",simtime,"at dt =",dt,"s."))
t1 = time.time()
run(simtime,report='text')
device.build(directory='output', compile=True, run=True, debug=False)
print(('inittime + runtime, t = ', time.time() - t1))
#print "For g,J =",g,J,"mean exc rate =",\
# sm_e.num_spikes/float(NE)/(simtime/second),'Hz.'
#print "For g,J =",g,J,"mean inh rate =",\
# sm_i.num_spikes/float(NI)/(simtime/second),'Hz.'
# ###########################################
# Analysis functions
# ###########################################
tau=50e-3
sigma = tau/2.
# normalized Gaussian kernel, integral with dt is normed to 1
# to count as 1 spike smeared over a finite interval
norm_factor = 1./(sqrt(2.*pi)*sigma)
gauss_kernel = array([norm_factor*exp(-x**2/(2.*sigma**2))\
for x in arange(-5.*sigma,5.*sigma+dt,dt)])
def rate_from_spiketrain(spikemon,fulltime,nrnidx=None):
"""
Returns a rate series of spiketimes convolved with a Gaussian kernel;
all times must be in SI units,
remember to divide fulltime and dt by second
"""
if nrnidx is None:
spiketimes = spikemon.t # take spiketimes of all neurons
else:
# take spiketimes of only neuron index nrnidx
spiketimes = spikemon.t[where(spikemon.i==nrnidx)[0]]
kernel_len = len(gauss_kernel)
# need to accommodate half kernel_len on either side of fulltime
rate_full = zeros(int(fulltime/dt)+kernel_len)
for spiketime in spiketimes:
idx = int(spiketime/dt)
rate_full[idx:idx+kernel_len] += gauss_kernel
# only the middle fulltime part of the rate series
# This is already in Hz,
# since should have multiplied by dt for above convolution
# and divided by dt to get a rate, so effectively not doing either.
return rate_full[kernel_len/2:kernel_len/2+int(fulltime/dt)]
# ###########################################
# Make plots
# ###########################################
fig = figure()
# Vm plots
timeseries = arange(0,simtime/second+dt,dt)
for i in range(3):
plot(timeseries[:len(sm_vm.t)],sm_vm[i].v)
fig = figure()
# raster plots
subplot(231)
plot(sm.t,sm.i,',')
title(str(N)+" exc & inh neurons")
xlim([0,simtime/second])
xlabel("")
print("plotting firing rates")
subplot(232)
# firing rates
timeseries = arange(0,simtime/second+dt,dt)
num_to_plot = 10
#rates = []
for nrni in range(num_to_plot):
rate = rate_from_spiketrain(sm,simtime/second,nrni)
plot(timeseries[:len(rate)],rate)
#print mean(rate),len(sm_e[nrni])
#rates.append(rate)
title(str(num_to_plot)+" exc rates")
ylabel("Hz")
ylim(0,300)
subplot(235)
for nrni in range(NE,NE+num_to_plot):
rate = rate_from_spiketrain(sm,simtime/second,nrni)
plot(timeseries[:len(rate)],rate)
#print mean(rate),len(sm_i[nrni])
#rates.append(rate)
title(str(num_to_plot)+" inh rates")
ylim(0,300)
#print "Mean rate = ",mean(rates)
xlabel("Time (s)")
ylabel("Hz")
print("plotting pop firing rates")
# Population firing rates
subplot(233)
timeseries = arange(0,simtime/second,dt)
#plot(timeseries,popm_e.smooth_rate(width=50.*ms,filter="gaussian"),color='grey')
rate = rate_from_spiketrain(sm,simtime/second)/float(N)
plot(timeseries[:len(rate)],rate)
title("population rate")
ylabel("Hz")
xlabel("Time (s)")
fig.tight_layout()
show()
| gpl-2.0 |
BlueFern/DBiharMesher | util/PlotColumn2D.py | 1 | 2787 | # -*- coding: utf-8 -*-
"""
Read SMC Ca2+ values from a line of cells in temporal series and produce a 2D plot
"""
import re
import os
import vtk
import numpy
import matplotlib.pyplot as plt
print 'Importing ', __file__
def tryInt(s):
try:
return int(s)
except:
return s
def alphaNumKey(s):
""" Turn a string into a list of string and number chunks.
"z23a" -> ["z", 23, "a"]
"""
return [tryInt(c) for c in re.split('([0-9]+)', s)]
def sortNicely(l):
""" Sort the given list in the way that humans expect.
"""
l.sort(key=alphaNumKey)
splitPos = 'mid'
suffix = ''
yLine = None
xMin = 0
xMax = -1
yMin = 0
yMax = -1
def plotColumn2D(fileList):
# Report our CWD just for testing purposes.
print "CWD:", os.getcwd()
global splitPos
global xMax
global yMax
sortNicely(fileList)
allRows = []
for file in fileList:
print 'Reading', file
file_name = os.path.abspath(file)
reader = vtk.vtkXMLUnstructuredGridReader()
reader.SetFileName(file_name)
reader.Update()
data = reader.GetOutput().GetCellData().GetArray(0)
row = []
for i in range(data.GetNumberOfTuples()):
row.append(data.GetValue(i))
# Fix the crooked backwards ordering in the output data. Sigh...
# Also, splice the branches.
if isinstance(splitPos, str):
if splitPos == 'mid':
splitPos = len(row) / 2
else:
print 'Can\'t split row with splitPos =', splitPos
return
r1 = row[:splitPos]
r2 = row[len(r1):]
row = r2 + r1
# Plot in ascending order.
row.reverse()
allRows.append(row)
array2D = numpy.array(allRows)
array2D = numpy.transpose(array2D)
plt.pcolormesh(array2D, vmin=0.0, vmax=1.0)
if yLine != None:
plt.axhline(yLine, color='r')
figName = os.path.split(os.getcwd())[1]
# plt.title(figName)
plt.xlabel('Time (sec.)')
plt.ylabel('Cell (ord.)')
plt.colorbar()
# plt.axis([0, array2D.shape[1], 0, array2D.shape[0]])
if xMax == -1:
xMax = array2D.shape[1]
if yMax == -1:
yMax = array2D.shape[0]
plt.xlim((xMin, xMax))
plt.ylim((yMin, yMax))
plt.tight_layout()
if suffix != '':
figName = figName + '.' + suffix
figName = figName.replace('.', '-')
plt.savefig(figName + '.png', bbox_inches='tight', dpi=400)
plt.show()
def usage():
print "This script is to be run with global parameters (input files, splitPos, yMin, yMax) set in the calling script."
if __name__ == '__main__':
print "Starting", os.path.basename(__file__)
usage()
print "Exiting", os.path.basename(__file__)
| gpl-2.0 |
mohittahiliani/PIE-ns3 | src/flow-monitor/examples/wifi-olsr-flowmon.py | 108 | 7439 | # -*- Mode: Python; -*-
# Copyright (c) 2009 INESC Porto
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 2 as
# published by the Free Software Foundation;
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
# Authors: Gustavo Carneiro <gjc@inescporto.pt>
import sys
import ns.applications
import ns.core
import ns.flow_monitor
import ns.internet
import ns.mobility
import ns.network
import ns.olsr
import ns.wifi
try:
import ns.visualizer
except ImportError:
pass
DISTANCE = 100 # (m)
NUM_NODES_SIDE = 3
def main(argv):
cmd = ns.core.CommandLine()
cmd.NumNodesSide = None
cmd.AddValue("NumNodesSide", "Grid side number of nodes (total number of nodes will be this number squared)")
cmd.Results = None
cmd.AddValue("Results", "Write XML results to file")
cmd.Plot = None
cmd.AddValue("Plot", "Plot the results using the matplotlib python module")
cmd.Parse(argv)
wifi = ns.wifi.WifiHelper.Default()
wifiMac = ns.wifi.NqosWifiMacHelper.Default()
wifiPhy = ns.wifi.YansWifiPhyHelper.Default()
wifiChannel = ns.wifi.YansWifiChannelHelper.Default()
wifiPhy.SetChannel(wifiChannel.Create())
ssid = ns.wifi.Ssid("wifi-default")
wifi.SetRemoteStationManager("ns3::ArfWifiManager")
wifiMac.SetType ("ns3::AdhocWifiMac",
"Ssid", ns.wifi.SsidValue(ssid))
internet = ns.internet.InternetStackHelper()
list_routing = ns.internet.Ipv4ListRoutingHelper()
olsr_routing = ns.olsr.OlsrHelper()
static_routing = ns.internet.Ipv4StaticRoutingHelper()
list_routing.Add(static_routing, 0)
list_routing.Add(olsr_routing, 100)
internet.SetRoutingHelper(list_routing)
ipv4Addresses = ns.internet.Ipv4AddressHelper()
ipv4Addresses.SetBase(ns.network.Ipv4Address("10.0.0.0"), ns.network.Ipv4Mask("255.255.255.0"))
port = 9 # Discard port(RFC 863)
onOffHelper = ns.applications.OnOffHelper("ns3::UdpSocketFactory",
ns.network.Address(ns.network.InetSocketAddress(ns.network.Ipv4Address("10.0.0.1"), port)))
onOffHelper.SetAttribute("DataRate", ns.network.DataRateValue(ns.network.DataRate("100kbps")))
onOffHelper.SetAttribute("OnTime", ns.core.StringValue ("ns3::ConstantRandomVariable[Constant=1]"))
onOffHelper.SetAttribute("OffTime", ns.core.StringValue ("ns3::ConstantRandomVariable[Constant=0]"))
addresses = []
nodes = []
if cmd.NumNodesSide is None:
num_nodes_side = NUM_NODES_SIDE
else:
num_nodes_side = int(cmd.NumNodesSide)
for xi in range(num_nodes_side):
for yi in range(num_nodes_side):
node = ns.network.Node()
nodes.append(node)
internet.Install(ns.network.NodeContainer(node))
mobility = ns.mobility.ConstantPositionMobilityModel()
mobility.SetPosition(ns.core.Vector(xi*DISTANCE, yi*DISTANCE, 0))
node.AggregateObject(mobility)
devices = wifi.Install(wifiPhy, wifiMac, node)
ipv4_interfaces = ipv4Addresses.Assign(devices)
addresses.append(ipv4_interfaces.GetAddress(0))
for i, node in enumerate(nodes):
destaddr = addresses[(len(addresses) - 1 - i) % len(addresses)]
#print i, destaddr
onOffHelper.SetAttribute("Remote", ns.network.AddressValue(ns.network.InetSocketAddress(destaddr, port)))
app = onOffHelper.Install(ns.network.NodeContainer(node))
urv = ns.core.UniformRandomVariable()
app.Start(ns.core.Seconds(urv.GetValue(20, 30)))
#internet.EnablePcapAll("wifi-olsr")
flowmon_helper = ns.flow_monitor.FlowMonitorHelper()
#flowmon_helper.SetMonitorAttribute("StartTime", ns.core.TimeValue(ns.core.Seconds(31)))
monitor = flowmon_helper.InstallAll()
monitor = flowmon_helper.GetMonitor()
monitor.SetAttribute("DelayBinWidth", ns.core.DoubleValue(0.001))
monitor.SetAttribute("JitterBinWidth", ns.core.DoubleValue(0.001))
monitor.SetAttribute("PacketSizeBinWidth", ns.core.DoubleValue(20))
ns.core.Simulator.Stop(ns.core.Seconds(44.0))
ns.core.Simulator.Run()
def print_stats(os, st):
print >> os, " Tx Bytes: ", st.txBytes
print >> os, " Rx Bytes: ", st.rxBytes
print >> os, " Tx Packets: ", st.txPackets
print >> os, " Rx Packets: ", st.rxPackets
print >> os, " Lost Packets: ", st.lostPackets
if st.rxPackets > 0:
print >> os, " Mean{Delay}: ", (st.delaySum.GetSeconds() / st.rxPackets)
print >> os, " Mean{Jitter}: ", (st.jitterSum.GetSeconds() / (st.rxPackets-1))
print >> os, " Mean{Hop Count}: ", float(st.timesForwarded) / st.rxPackets + 1
if 0:
print >> os, "Delay Histogram"
for i in range(st.delayHistogram.GetNBins () ):
print >> os, " ",i,"(", st.delayHistogram.GetBinStart (i), "-", \
st.delayHistogram.GetBinEnd (i), "): ", st.delayHistogram.GetBinCount (i)
print >> os, "Jitter Histogram"
for i in range(st.jitterHistogram.GetNBins () ):
print >> os, " ",i,"(", st.jitterHistogram.GetBinStart (i), "-", \
st.jitterHistogram.GetBinEnd (i), "): ", st.jitterHistogram.GetBinCount (i)
print >> os, "PacketSize Histogram"
for i in range(st.packetSizeHistogram.GetNBins () ):
print >> os, " ",i,"(", st.packetSizeHistogram.GetBinStart (i), "-", \
st.packetSizeHistogram.GetBinEnd (i), "): ", st.packetSizeHistogram.GetBinCount (i)
for reason, drops in enumerate(st.packetsDropped):
print " Packets dropped by reason %i: %i" % (reason, drops)
#for reason, drops in enumerate(st.bytesDropped):
# print "Bytes dropped by reason %i: %i" % (reason, drops)
monitor.CheckForLostPackets()
classifier = flowmon_helper.GetClassifier()
if cmd.Results is None:
for flow_id, flow_stats in monitor.GetFlowStats():
t = classifier.FindFlow(flow_id)
proto = {6: 'TCP', 17: 'UDP'} [t.protocol]
print "FlowID: %i (%s %s/%s --> %s/%i)" % \
(flow_id, proto, t.sourceAddress, t.sourcePort, t.destinationAddress, t.destinationPort)
print_stats(sys.stdout, flow_stats)
else:
print monitor.SerializeToXmlFile(cmd.Results, True, True)
if cmd.Plot is not None:
import pylab
delays = []
for flow_id, flow_stats in monitor.GetFlowStats():
tupl = classifier.FindFlow(flow_id)
if tupl.protocol == 17 and tupl.sourcePort == 698:
continue
delays.append(flow_stats.delaySum.GetSeconds() / flow_stats.rxPackets)
pylab.hist(delays, 20)
pylab.xlabel("Delay (s)")
pylab.ylabel("Number of Flows")
pylab.show()
return 0
if __name__ == '__main__':
sys.exit(main(sys.argv))
| gpl-2.0 |
vicky2135/lucious | oscar/lib/python2.7/site-packages/IPython/core/tests/test_pylabtools.py | 12 | 7550 | """Tests for pylab tools module.
"""
# Copyright (c) IPython Development Team.
# Distributed under the terms of the Modified BSD License.
from __future__ import print_function
from io import UnsupportedOperation, BytesIO
import matplotlib
matplotlib.use('Agg')
from matplotlib.figure import Figure
from nose import SkipTest
import nose.tools as nt
from matplotlib import pyplot as plt
import numpy as np
from IPython.core.getipython import get_ipython
from IPython.core.interactiveshell import InteractiveShell
from IPython.core.display import _PNG, _JPEG
from .. import pylabtools as pt
from IPython.testing import decorators as dec
def test_figure_to_svg():
# simple empty-figure test
fig = plt.figure()
nt.assert_equal(pt.print_figure(fig, 'svg'), None)
plt.close('all')
# simple check for at least svg-looking output
fig = plt.figure()
ax = fig.add_subplot(1,1,1)
ax.plot([1,2,3])
plt.draw()
svg = pt.print_figure(fig, 'svg')[:100].lower()
nt.assert_in(u'doctype svg', svg)
def _check_pil_jpeg_bytes():
"""Skip if PIL can't write JPEGs to BytesIO objects"""
# PIL's JPEG plugin can't write to BytesIO objects
# Pillow fixes this
from PIL import Image
buf = BytesIO()
img = Image.new("RGB", (4,4))
try:
img.save(buf, 'jpeg')
except Exception as e:
ename = e.__class__.__name__
raise SkipTest("PIL can't write JPEG to BytesIO: %s: %s" % (ename, e))
@dec.skip_without("PIL.Image")
def test_figure_to_jpeg():
_check_pil_jpeg_bytes()
# simple check for at least jpeg-looking output
fig = plt.figure()
ax = fig.add_subplot(1,1,1)
ax.plot([1,2,3])
plt.draw()
jpeg = pt.print_figure(fig, 'jpeg', quality=50)[:100].lower()
assert jpeg.startswith(_JPEG)
def test_retina_figure():
# simple empty-figure test
fig = plt.figure()
nt.assert_equal(pt.retina_figure(fig), None)
plt.close('all')
fig = plt.figure()
ax = fig.add_subplot(1,1,1)
ax.plot([1,2,3])
plt.draw()
png, md = pt.retina_figure(fig)
assert png.startswith(_PNG)
nt.assert_in('width', md)
nt.assert_in('height', md)
_fmt_mime_map = {
'png': 'image/png',
'jpeg': 'image/jpeg',
'pdf': 'application/pdf',
'retina': 'image/png',
'svg': 'image/svg+xml',
}
def test_select_figure_formats_str():
ip = get_ipython()
for fmt, active_mime in _fmt_mime_map.items():
pt.select_figure_formats(ip, fmt)
for mime, f in ip.display_formatter.formatters.items():
if mime == active_mime:
nt.assert_in(Figure, f)
else:
nt.assert_not_in(Figure, f)
def test_select_figure_formats_kwargs():
ip = get_ipython()
kwargs = dict(quality=10, bbox_inches='tight')
pt.select_figure_formats(ip, 'png', **kwargs)
formatter = ip.display_formatter.formatters['image/png']
f = formatter.lookup_by_type(Figure)
cell = f.__closure__[0].cell_contents
nt.assert_equal(cell, kwargs)
# check that the formatter doesn't raise
fig = plt.figure()
ax = fig.add_subplot(1,1,1)
ax.plot([1,2,3])
plt.draw()
formatter.enabled = True
png = formatter(fig)
assert png.startswith(_PNG)
def test_select_figure_formats_set():
ip = get_ipython()
for fmts in [
{'png', 'svg'},
['png'],
('jpeg', 'pdf', 'retina'),
{'svg'},
]:
active_mimes = {_fmt_mime_map[fmt] for fmt in fmts}
pt.select_figure_formats(ip, fmts)
for mime, f in ip.display_formatter.formatters.items():
if mime in active_mimes:
nt.assert_in(Figure, f)
else:
nt.assert_not_in(Figure, f)
def test_select_figure_formats_bad():
ip = get_ipython()
with nt.assert_raises(ValueError):
pt.select_figure_formats(ip, 'foo')
with nt.assert_raises(ValueError):
pt.select_figure_formats(ip, {'png', 'foo'})
with nt.assert_raises(ValueError):
pt.select_figure_formats(ip, ['retina', 'pdf', 'bar', 'bad'])
def test_import_pylab():
ns = {}
pt.import_pylab(ns, import_all=False)
nt.assert_true('plt' in ns)
nt.assert_equal(ns['np'], np)
class TestPylabSwitch(object):
class Shell(InteractiveShell):
def enable_gui(self, gui):
pass
def setup(self):
import matplotlib
def act_mpl(backend):
matplotlib.rcParams['backend'] = backend
# Save rcParams since they get modified
self._saved_rcParams = matplotlib.rcParams
self._saved_rcParamsOrig = matplotlib.rcParamsOrig
matplotlib.rcParams = dict(backend='Qt4Agg')
matplotlib.rcParamsOrig = dict(backend='Qt4Agg')
# Mock out functions
self._save_am = pt.activate_matplotlib
pt.activate_matplotlib = act_mpl
self._save_ip = pt.import_pylab
pt.import_pylab = lambda *a,**kw:None
self._save_cis = pt.configure_inline_support
pt.configure_inline_support = lambda *a,**kw:None
def teardown(self):
pt.activate_matplotlib = self._save_am
pt.import_pylab = self._save_ip
pt.configure_inline_support = self._save_cis
import matplotlib
matplotlib.rcParams = self._saved_rcParams
matplotlib.rcParamsOrig = self._saved_rcParamsOrig
def test_qt(self):
s = self.Shell()
gui, backend = s.enable_matplotlib(None)
nt.assert_equal(gui, 'qt')
nt.assert_equal(s.pylab_gui_select, 'qt')
gui, backend = s.enable_matplotlib('inline')
nt.assert_equal(gui, 'inline')
nt.assert_equal(s.pylab_gui_select, 'qt')
gui, backend = s.enable_matplotlib('qt')
nt.assert_equal(gui, 'qt')
nt.assert_equal(s.pylab_gui_select, 'qt')
gui, backend = s.enable_matplotlib('inline')
nt.assert_equal(gui, 'inline')
nt.assert_equal(s.pylab_gui_select, 'qt')
gui, backend = s.enable_matplotlib()
nt.assert_equal(gui, 'qt')
nt.assert_equal(s.pylab_gui_select, 'qt')
def test_inline(self):
s = self.Shell()
gui, backend = s.enable_matplotlib('inline')
nt.assert_equal(gui, 'inline')
nt.assert_equal(s.pylab_gui_select, None)
gui, backend = s.enable_matplotlib('inline')
nt.assert_equal(gui, 'inline')
nt.assert_equal(s.pylab_gui_select, None)
gui, backend = s.enable_matplotlib('qt')
nt.assert_equal(gui, 'qt')
nt.assert_equal(s.pylab_gui_select, 'qt')
def test_inline_twice(self):
"Using '%matplotlib inline' twice should not reset formatters"
ip = self.Shell()
gui, backend = ip.enable_matplotlib('inline')
nt.assert_equal(gui, 'inline')
fmts = {'png'}
active_mimes = {_fmt_mime_map[fmt] for fmt in fmts}
pt.select_figure_formats(ip, fmts)
gui, backend = ip.enable_matplotlib('inline')
nt.assert_equal(gui, 'inline')
for mime, f in ip.display_formatter.formatters.items():
if mime in active_mimes:
nt.assert_in(Figure, f)
else:
nt.assert_not_in(Figure, f)
def test_qt_gtk(self):
s = self.Shell()
gui, backend = s.enable_matplotlib('qt')
nt.assert_equal(gui, 'qt')
nt.assert_equal(s.pylab_gui_select, 'qt')
gui, backend = s.enable_matplotlib('gtk')
nt.assert_equal(gui, 'qt')
nt.assert_equal(s.pylab_gui_select, 'qt')
| bsd-3-clause |
anhaidgroup/py_entitymatching | py_entitymatching/matcher/logregmatcher.py | 1 | 1284 | """
This module contains the functions for Logistic Regression classifier.
"""
from py_entitymatching.matcher.mlmatcher import MLMatcher
from sklearn.linear_model import LogisticRegression
from py_entitymatching.matcher.matcherutils import get_ts
class LogRegMatcher(MLMatcher):
"""
Logistic Regression matcher.
Args:
*args,**kwargs: THe Arguments to scikit-learn's Logistic Regression
classifier.
name (string): The name of this matcher (defaults to None). If the
matcher name is None, the class automatically generates a string
and assigns it as the name.
"""
def __init__(self, *args, **kwargs):
# If the name is given, then pop it
name = kwargs.pop('name', None)
if name is None:
# If the name of the matcher is give, then create one.
# Currently, we use a constant string + a random number.
self.name = 'LogisticRegression'+ '_' + get_ts()
else:
# Set the name of the matcher, with the given name.
self.name = name
super(LogRegMatcher, self).__init__()
# Set the classifier to the scikit-learn classifier.
self.clf = LogisticRegression(*args, **kwargs)
self.clf.classes_ = [0, 1] | bsd-3-clause |
zihua/scikit-learn | examples/manifold/plot_compare_methods.py | 31 | 4051 | """
=========================================
Comparison of Manifold Learning methods
=========================================
An illustration of dimensionality reduction on the S-curve dataset
with various manifold learning methods.
For a discussion and comparison of these algorithms, see the
:ref:`manifold module page <manifold>`
For a similar example, where the methods are applied to a
sphere dataset, see :ref:`sphx_glr_auto_examples_manifold_plot_manifold_sphere.py`
Note that the purpose of the MDS is to find a low-dimensional
representation of the data (here 2D) in which the distances respect well
the distances in the original high-dimensional space, unlike other
manifold-learning algorithms, it does not seeks an isotropic
representation of the data in the low-dimensional space.
"""
# Author: Jake Vanderplas -- <vanderplas@astro.washington.edu>
print(__doc__)
from time import time
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from matplotlib.ticker import NullFormatter
from sklearn import manifold, datasets
# Next line to silence pyflakes. This import is needed.
Axes3D
n_points = 1000
X, color = datasets.samples_generator.make_s_curve(n_points, random_state=0)
n_neighbors = 10
n_components = 2
fig = plt.figure(figsize=(15, 8))
plt.suptitle("Manifold Learning with %i points, %i neighbors"
% (1000, n_neighbors), fontsize=14)
try:
# compatibility matplotlib < 1.0
ax = fig.add_subplot(251, projection='3d')
ax.scatter(X[:, 0], X[:, 1], X[:, 2], c=color, cmap=plt.cm.Spectral)
ax.view_init(4, -72)
except:
ax = fig.add_subplot(251, projection='3d')
plt.scatter(X[:, 0], X[:, 2], c=color, cmap=plt.cm.Spectral)
methods = ['standard', 'ltsa', 'hessian', 'modified']
labels = ['LLE', 'LTSA', 'Hessian LLE', 'Modified LLE']
for i, method in enumerate(methods):
t0 = time()
Y = manifold.LocallyLinearEmbedding(n_neighbors, n_components,
eigen_solver='auto',
method=method).fit_transform(X)
t1 = time()
print("%s: %.2g sec" % (methods[i], t1 - t0))
ax = fig.add_subplot(252 + i)
plt.scatter(Y[:, 0], Y[:, 1], c=color, cmap=plt.cm.Spectral)
plt.title("%s (%.2g sec)" % (labels[i], t1 - t0))
ax.xaxis.set_major_formatter(NullFormatter())
ax.yaxis.set_major_formatter(NullFormatter())
plt.axis('tight')
t0 = time()
Y = manifold.Isomap(n_neighbors, n_components).fit_transform(X)
t1 = time()
print("Isomap: %.2g sec" % (t1 - t0))
ax = fig.add_subplot(257)
plt.scatter(Y[:, 0], Y[:, 1], c=color, cmap=plt.cm.Spectral)
plt.title("Isomap (%.2g sec)" % (t1 - t0))
ax.xaxis.set_major_formatter(NullFormatter())
ax.yaxis.set_major_formatter(NullFormatter())
plt.axis('tight')
t0 = time()
mds = manifold.MDS(n_components, max_iter=100, n_init=1)
Y = mds.fit_transform(X)
t1 = time()
print("MDS: %.2g sec" % (t1 - t0))
ax = fig.add_subplot(258)
plt.scatter(Y[:, 0], Y[:, 1], c=color, cmap=plt.cm.Spectral)
plt.title("MDS (%.2g sec)" % (t1 - t0))
ax.xaxis.set_major_formatter(NullFormatter())
ax.yaxis.set_major_formatter(NullFormatter())
plt.axis('tight')
t0 = time()
se = manifold.SpectralEmbedding(n_components=n_components,
n_neighbors=n_neighbors)
Y = se.fit_transform(X)
t1 = time()
print("SpectralEmbedding: %.2g sec" % (t1 - t0))
ax = fig.add_subplot(259)
plt.scatter(Y[:, 0], Y[:, 1], c=color, cmap=plt.cm.Spectral)
plt.title("SpectralEmbedding (%.2g sec)" % (t1 - t0))
ax.xaxis.set_major_formatter(NullFormatter())
ax.yaxis.set_major_formatter(NullFormatter())
plt.axis('tight')
t0 = time()
tsne = manifold.TSNE(n_components=n_components, init='pca', random_state=0)
Y = tsne.fit_transform(X)
t1 = time()
print("t-SNE: %.2g sec" % (t1 - t0))
ax = fig.add_subplot(2, 5, 10)
plt.scatter(Y[:, 0], Y[:, 1], c=color, cmap=plt.cm.Spectral)
plt.title("t-SNE (%.2g sec)" % (t1 - t0))
ax.xaxis.set_major_formatter(NullFormatter())
ax.yaxis.set_major_formatter(NullFormatter())
plt.axis('tight')
plt.show()
| bsd-3-clause |
Lawrence-Liu/scikit-learn | examples/cluster/plot_kmeans_assumptions.py | 270 | 2040 | """
====================================
Demonstration of k-means assumptions
====================================
This example is meant to illustrate situations where k-means will produce
unintuitive and possibly unexpected clusters. In the first three plots, the
input data does not conform to some implicit assumption that k-means makes and
undesirable clusters are produced as a result. In the last plot, k-means
returns intuitive clusters despite unevenly sized blobs.
"""
print(__doc__)
# Author: Phil Roth <mr.phil.roth@gmail.com>
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn.cluster import KMeans
from sklearn.datasets import make_blobs
plt.figure(figsize=(12, 12))
n_samples = 1500
random_state = 170
X, y = make_blobs(n_samples=n_samples, random_state=random_state)
# Incorrect number of clusters
y_pred = KMeans(n_clusters=2, random_state=random_state).fit_predict(X)
plt.subplot(221)
plt.scatter(X[:, 0], X[:, 1], c=y_pred)
plt.title("Incorrect Number of Blobs")
# Anisotropicly distributed data
transformation = [[ 0.60834549, -0.63667341], [-0.40887718, 0.85253229]]
X_aniso = np.dot(X, transformation)
y_pred = KMeans(n_clusters=3, random_state=random_state).fit_predict(X_aniso)
plt.subplot(222)
plt.scatter(X_aniso[:, 0], X_aniso[:, 1], c=y_pred)
plt.title("Anisotropicly Distributed Blobs")
# Different variance
X_varied, y_varied = make_blobs(n_samples=n_samples,
cluster_std=[1.0, 2.5, 0.5],
random_state=random_state)
y_pred = KMeans(n_clusters=3, random_state=random_state).fit_predict(X_varied)
plt.subplot(223)
plt.scatter(X_varied[:, 0], X_varied[:, 1], c=y_pred)
plt.title("Unequal Variance")
# Unevenly sized blobs
X_filtered = np.vstack((X[y == 0][:500], X[y == 1][:100], X[y == 2][:10]))
y_pred = KMeans(n_clusters=3, random_state=random_state).fit_predict(X_filtered)
plt.subplot(224)
plt.scatter(X_filtered[:, 0], X_filtered[:, 1], c=y_pred)
plt.title("Unevenly Sized Blobs")
plt.show()
| bsd-3-clause |
esquishesque/musical-potato | analysis_bulk.py | 1 | 14383 | import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
import numpy as np
import sys
import math
#full data frame with all data
df=pd.read_excel('first survey - English final (Responses).xlsx',sheetname=5,skiprows=1,keep_default_na=False)
#df=pd.read_excel('survey_test.xls',sheetname=1)
#get the column names
c=df.columns
#select the subset of the questions that are about wages etc
cdep=70
cdiv=71
ccampus=72
cexp=77
cage=78
wagesStart = 2
hiringStart = 7
workloadStart = 14
leavesStart = 21
workcondStart = 24
trainingStart = 29
equityStart = 34
interactionStart = 41
healthStart = 45
financeStart = 49
fundingStart = 55
wagesLen = 5
hiringLen = 7
workloadLen = 7
leavesLen = 3
workcondLen = 5
trainingLen = 5
equityLen = 7
interactionLen = 4
healthLen = 4
financeLen = 6
fundingLen = 3
cwages=c[wagesStart:wagesStart+wagesLen]
chiring=c[hiringStart:hiringStart+hiringLen]
cworkload=c[workloadStart:workloadStart+workloadLen]
cleaves=c[leavesStart:leavesStart+leavesLen]
cworkcond=c[workcondStart:workcondStart+workcondLen]
ctraining=c[trainingStart:trainingStart+trainingLen]
cequity=c[equityStart:equityStart+equityLen]
cinteraction=c[interactionStart:interactionStart+interactionLen]
chealth=c[healthStart:healthStart+healthLen]
cfinance=c[financeStart:financeStart+financeLen]
cfunding=c[fundingStart:fundingStart+fundingLen]
#set up other variables
issues_list=[cwages,chiring,cworkload,cleaves,cworkcond,ctraining,cequity,cinteraction,chealth,cfinance,cfunding]
titles_list = [issue[0][:issue[0].find(" [")] for issue in issues_list]
#this is a hack, but the slash in one entry is a problem
titles_list[-2].replace('/','-')
numResponses = len(df.index)
#clean up df and set up binaries for affects and imporant
binariesStart = len(c)
binaries_issues_list=[]
issuesColNames = c[wagesStart:fundingLen+fundingStart]
for issue in issues_list:
start = len(df.columns)
for question in issue: #using contains because there are a few variations on the exact wording
df[question][(df[question].str.contains("UofT")==True)]="important"
df[question][(df[question].str.contains("both")==True)]="both"
df[question][(df[question].str.contains("personally")==True)]="affects"
df[question][(df[question].str.contains("N/A")==True)]="neither"
#set up columns for affects and important binaries
temp1 = question + " - affects"
temp2 = question + " - important"
df[temp1] = ((df[question] == "affects") | (df[question] == "both"))
df[temp2] = ((df[question] == "important") | (df[question] == "both"))
df[temp1].replace(True,"affects",inplace=True)
df[temp2].replace(True,"important",inplace=True)
#creates a list of tuples with the start loc and num rows for each issue
binaries_issues_list.append((start,len(df.columns)-start))
##################################################
script_option=int(sys.argv[1])
#for debugging
if script_option==0:
pass
if script_option==1:
#loop over the issues_list
for ii in range(0,len(issues_list)):
#clear the figure if one previous
plt.clf()
#define a figure with a size because we want to save it now
f=plt.figure(figsize=(10,12),dpi=100)
f.suptitle(titles_list[ii])
#loop over every item in each sub category
for jj in range(0,len(issues_list[ii])):
#set the subplot axes
plt.subplot(len(issues_list[ii]),1,jj+1)
#plot the histogram, sort_index sets the items alphabetically
df[issues_list[ii][jj]].value_counts().sort_index().plot.barh()
#set the xaxis based on the total number of responses
plt.xlim([0,numResponses])
#add a title so we remember what it is
plt.title(issues_list[ii][jj][(issues_list[ii][jj].find("[")+1):len(issues_list[ii][jj])-1])
#add some blank space
f.subplots_adjust(hspace=1)
#outputs the figures
f.savefig(titles_list[ii].replace("/",".")+'_hist.png')
if script_option==3:
#analysis just of binary affects/important questions
for ii in range(0,len(binaries_issues_list)):
#clear the figure if one previous
plt.clf()
#define a figure with a size because we want to save it now
f=plt.figure(figsize=(10,12),dpi=100)
f.suptitle(titles_list[ii])
#loop over every item in each sub category
for jj in range(binaries_issues_list[ii][1]):
currentCol = binaries_issues_list[ii][0]+jj
#set the subplot axes
plt.subplot((math.ceil(binaries_issues_list[ii][1]/2)),2,jj+1)
#plot the histogram, sort_index sets the items alphabetically
df.iloc[:,currentCol][df.iloc[:,currentCol]!=False].value_counts().sort_index().plot.barh()
#set the xaxis based on the total number of responses
#plt.xlim([0,df[issues_list[ii][jj]].value_counts().sum()])
plt.xlim([0,numResponses])
#hackily title only every other one
if(jj % 2 == 0):
plt.title(df.columns[currentCol][(df.columns[currentCol].find("[")+1):(df.columns[currentCol].find("]"))], x=1)
#add some blank space
f.subplots_adjust(hspace=1)
#outputs the figures
f.savefig(titles_list[ii].replace("/",".")+'_binaries_hist.png')
if script_option==2:
#Gender analysis section
#I'm using a three category classification now, but I could do other things
#this part is going to be a bit hacky because I don't want to assign new axes labels
#add a separate column to keep track of the new category
df['G']='N'
df['G'][df['Gender identity']=='Man']='M'
df['G'][df['Gender identity']=='Cis, Man']='M'
df['G'][df['Gender identity']=='Woman']='F'
df['G'][df['Gender identity']=='Cis, Woman']='F'
for ii in range(0,len(issues_list)):
#clear the figure if one previous
plt.clf()
#define a figure with a size because we want to save it now
f=plt.figure(figsize=(15,30),dpi=100)
#loop over every item in each sub category
for jj in range(0,len(issues_list[ii])):
#set the subplot axes - subplot(nrows, ncols, plot_number)
plt.subplot(len(issues_list[ii]),1,jj+1)
#plot the histograme, sort_index sets the items alphabetically
df2=df.groupby(df['G'])
df2[issues_list[ii][jj]].value_counts().sort_index().plot.barh()
#set the xaxis based on the total number of responses
plt.xlim([0,df2[issues_list[ii][jj]].value_counts().sum()])
#add a title so we remember what it is
plt.title(issues_list[ii][jj])
#add some blank space
f.subplots_adjust(hspace=.5)
#outputs the figures
f.savefig('Gender_'+titles_list[ii].replace("/",".")+'_hist.png')
def supercount_equity(df,Icol,col):
Ei=df.loc[(df[Icol]=='yes') & ((df[col]=='important')| (df[col]=='both'))].count()[0]
Ea=df.loc[(df[Icol]=='yes') & ((df[col]=='affects')| (df[col]=='both'))].count()[0]
Ni=df.loc[(df[Icol]!='yes') & ((df[col]=='important')| (df[col]=='both'))].count()[0]
Na=df.loc[(df[Icol]!='yes') & ((df[col]=='affects')| (df[col]=='both'))].count()[0]
Es=df.loc[(df[Icol]=='yes')].count()[0]
Ns=df.loc[(df[Icol]!='yes')].count()[0]
return[float(Ei)/Es,float(Ea)/Es,float(Ni)/Ns,float(Na)/Ns]
def make_comp_plot_equity(df,Icol,cols,outname):
counts=np.zeros([len(cols),4])
for ii in range(0,len(cols)):
counts[ii]=supercount_equity(df,Icol,cols[ii])
counts*=100
fig,ax=plt.subplots()
fig.set_size_inches(24.5, 10.5)
ax.plot(counts[:,2],counts[:,3],'ro')
for i, txt in enumerate(cols):
ax.annotate(i, (counts[i,0],counts[i,1]))
ax.plot(counts[i,0],counts[i,1],'bo',label=str(i)+'--- E : '+cols[i][cols[i].find("[")+1:-1])
ax.annotate(i, (counts[i,2],counts[i,3]))
ax.plot(counts[i,2],counts [i,3],'ro',label=str(i)+'--- N : '+cols[i][cols[i].find("[")+1:-1])
ax.legend(loc='center left', bbox_to_anchor=(1, 0.5))
plt.ylabel('Affects (%)')
plt.xlabel('Important (%)')
# plt.tight_layout()
fig.savefig(outname)
if script_option==4:
eq_cols=['eq_race','eq_trans','eq_gender','eq_disability','eq_sexuality']
for eq in eq_cols:
for issues in issues_list:
make_comp_plot_equity(df,eq,issues,'comp_plot_'+eq+'_'+issues[0][:issues[0].find("[")-1].replace('/','-')+'.png')
def supercount_work(df,Icol,col):
TAs=float(df.loc[(df[Icol].str.contains('TA')==True)].count()[0])
TAi=df.loc[(df[Icol].str.contains('TA')==True) & ((df[col]=='important')| (df[col]=='both')) ].count()[0]/TAs
TAa=df.loc[(df[Icol].str.contains('TA')==True) & ((df[col]=='affects')| (df[col]=='both'))].count()[0]/TAs
CIs=float(df.loc[(df[Icol].str.contains('CI')==True)].count()[0])
CIi=df.loc[(df[Icol].str.contains('CI')==True) & ((df[col]=='important')| (df[col]=='both')) ].count()[0]/CIs
CIa=df.loc[(df[Icol].str.contains('CI')==True) & ((df[col]=='affects')| (df[col]=='both'))].count()[0]/CIs
CPOs=float(df.loc[(df[Icol].str.contains('CPO')==True)].count()[0])
CPOi=df.loc[(df[Icol].str.contains('CPO')==True) & ((df[col]=='important')| (df[col]=='both')) ].count()[0]/CPOs
CPOa=df.loc[(df[Icol].str.contains('CPO')==True) & ((df[col]=='affects')| (df[col]=='both'))].count()[0]/CPOs
AIs=float(df.loc[(df[Icol].str.contains('AI')==True)].count()[0])
if AIs!=0:
AIi=df.loc[(df[Icol].str.contains('AI')==True) & ((df[col]=='important')| (df[col]=='both')) ].count()[0]/AIs
AIa=df.loc[(df[Icol].str.contains('AI')==True) & ((df[col]=='affects')| (df[col]=='both'))].count()[0]/AIs
else:
AIi=0
AIa=0
return[TAi,TAa,CIi,CIa,CPOi,CPOa,AIi,AIa]
def make_comp_plot_work(df,cols,outname):
counts=np.zeros([len(cols),8])
for ii in range(0,len(cols)):
counts[ii]=supercount_work(df,'What kind(s) of Unit 1 work have you done? ',cols[ii])
counts*=100
fig,ax=plt.subplots()
fig.set_size_inches(24.5, 10.5)
ax.plot(counts[:,2],counts[:,3],'ro')
for i, txt in enumerate(cols):
ax.annotate(i, (counts[i,0],counts[i,1]))
ax.plot(counts[i,0],counts[i,1],'bo',label=str(i)+'--- TA : '+cols[i][cols[i].find("[")+1:-1])
ax.annotate(i, (counts[i,2],counts[i,3]))
ax.plot(counts[i,2],counts [i,3],'ro',label=str(i)+'--- CI : '+cols[i][cols[i].find("[")+1:-1])
ax.annotate(i, (counts[i,4],counts[i,5]))
ax.plot(counts[i,4],counts[i,5],'go',label=str(i)+'--- CPO : '+cols[i][cols[i].find("[")+1:-1])
ax.annotate(i, (counts[i,6],counts[i,7]))
ax.plot(counts[i,6],counts [i,7],'mo',label=str(i)+'--- AI : '+cols[i][cols[i].find("[")+1:-1])
ax.legend(loc='center left', bbox_to_anchor=(1, 0.5))
plt.ylabel('Affects (%)')
plt.xlabel('Important (%)')
# plt.tight_layout()
fig.savefig(outname)
if script_option==5:
for issues in issues_list:
make_comp_plot_work(df,issues,'comp_plot_work_type_'+issues[0][:issues[0].find("[")-1].replace('/','-')+'.png')
def supercount_div(df,Icol,col):
D1s=float(df.loc[(df[Icol].str.contains('Humanities')==True)].count()[0])
D1i=df.loc[(df[Icol].str.contains('Humanities')==True) & ((df[col]=='important')| (df[col]=='both')) ].count()[0]/D1s
D1a=df.loc[(df[Icol].str.contains('Humanities')==True) & ((df[col]=='affects')| (df[col]=='both'))].count()[0]/D1s
D2s=float(df.loc[(df[Icol].str.contains('Social')==True)].count()[0])
D2i=df.loc[(df[Icol].str.contains('Social')==True) & ((df[col]=='important')| (df[col]=='both')) ].count()[0]/D2s
D2a=df.loc[(df[Icol].str.contains('Social')==True) & ((df[col]=='affects')| (df[col]=='both'))].count()[0]/D2s
D3s=float(df.loc[(df[Icol].str.contains('Physical')==True)].count()[0])
D3i=df.loc[(df[Icol].str.contains('Physical')==True) & ((df[col]=='important')| (df[col]=='both')) ].count()[0]/D3s
D3a=df.loc[(df[Icol].str.contains('Physical')==True) & ((df[col]=='affects')| (df[col]=='both'))].count()[0]/D3s
D4s=float(df.loc[(df[Icol].str.contains('Life')==True)].count()[0])
D4i=df.loc[(df[Icol].str.contains('Life')==True) & ((df[col]=='important')| (df[col]=='both')) ].count()[0]/D4s
D4a=df.loc[(df[Icol].str.contains('Life')==True) & ((df[col]=='affects')| (df[col]=='both'))].count()[0]/D4s
D5s=float(df.loc[(df[Icol].str.contains('OISE')==True)].count()[0])
D5i=df.loc[(df[Icol].str.contains('OISE')==True) & ((df[col]=='important')| (df[col]=='both')) ].count()[0]/D5s
D5a=df.loc[(df[Icol].str.contains('OISE')==True) & ((df[col]=='affects')| (df[col]=='both'))].count()[0]/D5s
return[D1s,D1a,D2s,D2a,D3s,D3a,D4a,D4a,D5a,D5s]
def make_comp_plot_div(df,cols,outname):
counts=np.zeros([len(cols),8])
for ii in range(0,len(cols)):
counts[ii]=supercount_work(df,'What department(s) are you a student in?',cols[ii])
counts*=100
fig,ax=plt.subplots()
fig.set_size_inches(24.5, 10.5)
ax.plot(counts[:,2],counts[:,3],'ro')
for i, txt in enumerate(cols):
ax.annotate(i, (counts[i,0],counts[i,1]))
ax.plot(counts[i,0],counts[i,1],'bo',label=str(i)+'--- D1 : '+cols[i][cols[i].find("[")+1:-1])
ax.annotate(i, (counts[i,2],counts[i,3]))
ax.plot(counts[i,2],counts [i,3],'ro',label=str(i)+'--- D2 : '+cols[i][cols[i].find("[")+1:-1])
ax.annotate(i, (counts[i,4],counts[i,5]))
ax.plot(counts[i,4],counts[i,5],'go',label=str(i)+'--- D3 : '+cols[i][cols[i].find("[")+1:-1])
ax.annotate(i, (counts[i,6],counts[i,7]))
ax.plot(counts[i,6],counts [i,7],'mo',label=str(i)+'--- D4 : '+cols[i][cols[i].find("[")+1:-1])
ax.annotate(i, (counts[i,8],counts[i,9]))
ax.plot(counts[i,8],counts [i,9],'mo',label=str(i)+'--- D5 : '+cols[i][cols[i].find("[")+1:-1])
ax.legend(loc='center left', bbox_to_anchor=(1, 0.5))
plt.ylabel('Affects (%)')
plt.xlabel('Important (%)')
# plt.tight_layout()
fig.savefig(outname)
if script_option==6:
for issues in issues_list:
make_comp_plot_work(df,issues,'comp_plot_student_div_'+issues[0][:issues[0].find("[")-1].replace('/','-')+'.png')
| gpl-3.0 |
dcolombo/FilFinder | examples/paper_figures/match_resolution.py | 3 | 7990 | # Licensed under an MIT open source license - see LICENSE
'''
Check resolution effects of masking process.
Degrade an image to match the resolution of a more distant one,
then compare the outputs.
'''
from fil_finder import fil_finder_2D
import numpy as np
from astropy.io.fits import getdata
from astropy import convolution
import matplotlib.pyplot as p
# We want to compare one of the closest regions (Pipe)
# to one of the most distant (Orion-A S).
pipe_img, pipe_hdr = getdata("pipeCenterB59-250.fits", header=True)
pipe_distance = 140. # pc
# orion_img, orion_hdr = getdata("orionA-S-250.fits", header=True)
orion_distance = 400. # pc
r = orion_distance / pipe_distance
conv = np.sqrt(r**2. - 1)
## What to do?
compute = False
output = True
downsample = False
if compute:
kernel = convolution.Gaussian2DKernel(conv)
pipe_degraded = convolution.convolve(pipe_img, kernel, boundary='fill',
fill_value=np.NaN)
p.subplot(121)
p.imshow(np.arctan(pipe_img/np.percentile(pipe_img[np.isfinite(pipe_img)], 95)),
origin="lower", interpolation="nearest")
p.subplot(122)
p.imshow(np.arctan(pipe_degraded/np.percentile(pipe_degraded[np.isfinite(pipe_degraded)], 95)),
origin="lower", interpolation="nearest")
p.show()
filfind = fil_finder_2D(pipe_degraded, pipe_hdr, 18.2, 30, 15, 30, distance=400, glob_thresh=20)
filfind.run(verbose=False, save_name="degraded_pipe", save_plots=False)
## Analysis
if output:
from astropy.table import Table
deg_pipe_analysis = Table.read("degraded_pipe_table.fits")
pipe_analysis = Table.read("pipeCenterB59-250/pipeCenterB59-250_table.fits")
# Plot lengths, widths, orientation, curvature. Adjust for distance difference
# p.subplot2grid((4,2), (0,0))
p.subplot(411)
num1 = int(np.sqrt(deg_pipe_analysis["FWHM"][np.isfinite(deg_pipe_analysis["FWHM"])].size))
num2 = int(np.sqrt(pipe_analysis["FWHM"][np.isfinite(pipe_analysis["FWHM"])].size))
p.hist(deg_pipe_analysis["FWHM"][np.isfinite(deg_pipe_analysis["FWHM"])] / conv,
bins=num1, label="Degraded", alpha=0.5, color='g')
p.hist(pipe_analysis["FWHM"][np.isfinite(pipe_analysis["FWHM"])],
bins=num2, label="Normal", alpha=0.5, color='b')
p.xlabel("Width (pc)")
p.legend()
# p.subplot2grid((4,2), (0,1))
p.subplot(412)
p.hist(deg_pipe_analysis["Lengths"] / conv, bins=num1, label="Degraded", alpha=0.5)
p.hist(pipe_analysis["Lengths"], bins=num2, label="Normal", alpha=0.5)
p.xlabel("Length (pc)")
# p.legend()
# p.subplot2grid((4,2), (1,0))
p.subplot(413)
p.hist(deg_pipe_analysis["Orientation"], bins=num1, label="Degraded", alpha=0.5)
p.hist(pipe_analysis["Orientation"], bins=num2, label="Normal", alpha=0.5)
p.xlabel("Orientation")
# p.legend()
# p.subplot2grid((4,2), (1,1))
p.subplot(414)
p.hist(deg_pipe_analysis["Curvature"], bins=num1, label="Degraded", alpha=0.5)
p.hist(pipe_analysis["Curvature"], bins=num2, label="Normal", alpha=0.5)
p.xlabel("Curvature")
# p.legend()
# p.savefig("pipe_comparison_hists.pdf")
# p.savefig("pipe_comparison_hists.eps")
p.show()
## Compare distributions using KS Test
from scipy.stats import ks_2samp
fwhm_ks = ks_2samp(deg_pipe_analysis["FWHM"][np.isfinite(deg_pipe_analysis["FWHM"])] / conv,
pipe_analysis["FWHM"][np.isfinite(pipe_analysis["FWHM"])])
l_ks = ks_2samp(deg_pipe_analysis["Lengths"] / conv,
pipe_analysis["Lengths"])
o_ks = ks_2samp(np.sin(deg_pipe_analysis["Orientation"]),
np.sin(pipe_analysis["Orientation"]))
c_ks = ks_2samp(deg_pipe_analysis["Curvature"],
pipe_analysis["Curvature"])
ks_tab = Table([fwhm_ks, l_ks, o_ks, c_ks],
names=["FWHM", "Length", "Orientation", "Curvature"])
# ks_tab.write("pipe_comparison_ks_results.csv")
# ks_tab.write("pipe_comparison_ks_results.tex")
## Compare skeletons
deg_pipe_skel = getdata("degraded_pipe_skeletons.fits", 0)
deg_pipe_skel[np.where(deg_pipe_skel>1)] = 1
deg_pipe_skel = deg_pipe_skel[510:1200, 1440:1920]
filfind = fil_finder_2D(pipe_img, pipe_hdr, 18.2, 30, 15, 30, distance=400, glob_thresh=20)
filfind.create_mask(border_masking=True)
filfind.medskel(verbose=False)
filfind.analyze_skeletons()
pipe_skel = filfind.skeleton[30:-30, 30:-30] #getdata("pipeCenterB59-250/pipeCenterB59-250_skeletons.fits", 0)
pipe_skel[np.where(pipe_skel>1)] = 1
pipe_skel = pipe_skel[510:1200, 1440:1920]
# p.subplot2grid((4,2), (2,0), colspan=2, rowspan=2)
pipe_img = pipe_img[510:1200, 1440:1920]
ax = p.imshow(np.arctan(pipe_img/np.percentile(pipe_img[np.isfinite(pipe_img)], 95)),
origin="lower", interpolation="nearest", cmap="binary")
ax.axes.get_xaxis().set_ticks([])
ax.axes.get_yaxis().set_ticks([])
cont1 = p.contour(pipe_skel, colors="b", linewidths=3, label="Normal")
cont1.collections[0].set_label("Normal")
cont2 = p.contour(deg_pipe_skel, colors="g", alpha=0.5, label="Degraded")
cont2.collections[0].set_label("Degraded")
p.legend(loc="upper right")
p.show()
if downsample:
def downsample_axis(myarr, factor, axis, estimator=np.nanmean, truncate=False):
"""
Downsample an ND array by averaging over *factor* pixels along an axis.
Crops right side if the shape is not a multiple of factor.
This code is pure np and should be fast.
Parameters
----------
myarr : `~numpy.ndarray`
The array to downsample
factor : int
The factor to downsample by
axis : int
The axis to downsample along
estimator : function
defaults to mean. You can downsample by summing or
something else if you want a different estimator
(e.g., downsampling error: you want to sum & divide by sqrt(n))
truncate : bool
Whether to truncate the last chunk or average over a smaller number.
e.g., if you downsample [1,2,3,4] by a factor of 3, you could get either
[2] or [2,4] if truncate is True or False, respectively.
"""
# size of the dimension of interest
xs = myarr.shape[axis]
if xs % int(factor) != 0:
if truncate:
view = [slice(None) for ii in range(myarr.ndim)]
view[axis] = slice(None,xs-(xs % int(factor)))
crarr = myarr[view]
else:
newshape = list(myarr.shape)
newshape[axis] = (factor - xs % int(factor))
extension = np.empty(newshape) * np.nan
crarr = np.concatenate((myarr,extension), axis=axis)
else:
crarr = myarr
def makeslice(startpoint,axis=axis,step=factor):
# make empty slices
view = [slice(None) for ii in range(myarr.ndim)]
# then fill the appropriate slice
view[axis] = slice(startpoint,None,step)
return view
# The extra braces here are crucial: We're adding an extra dimension so we
# can average across it!
stacked_array = np.concatenate([[crarr[makeslice(ii)]] for ii in range(factor)])
dsarr = estimator(stacked_array, axis=0)
return dsarr
downsample = downsample_axis(pipe_img, 3, axis=0)
downsample = downsample_axis(downsample, 3, axis=1)
print downsample.shape
p.subplot(1,2,1)
p.title("Pipe Normal")
p.imshow(np.arctan(pipe_img/np.percentile(pipe_img[np.isfinite(pipe_img)], 95)),
origin="lower", interpolation="nearest")
p.subplot(1,2,2)
p.title("Downsample")
p.imshow(np.arctan(downsample/np.percentile(downsample[np.isfinite(downsample)], 95)),
origin="lower", interpolation="nearest")
p.show() | mit |
ua-snap/downscale | old/bin/old/cld_cru_ts31_downscaling.py | 2 | 11324 | # # #
# Current implementation of the cru ts31 (ts32) delta downscaling procedure
#
# Author: Michael Lindgren (malindgren@alaska.edu)
# # #
import numpy as np
def write_gtiff( output_arr, template_meta, output_filename, compress=True ):
'''
DESCRIPTION:
------------
output a GeoTiff given a numpy ndarray, rasterio-style
metadata dictionary, and and output_filename.
If a multiband file is to be processed, the Longitude
dimension is expected to be the right-most.
--> dimensions should be (band, latitude, longitude)
ARGUMENTS:
----------
output_arr = [numpy.ndarray] with longitude as the right-most dimension
template_meta = [dict] rasterio-style raster meta dictionary. Typically
found in a template raster by: rasterio.open( fn ).meta
output_filename = [str] path to and name of the output GeoTiff to be
created. currently only 'GTiff' is supported.
compress = [bool] if True (default) LZW-compression is applied to the
output GeoTiff. If False, no compression is applied.
* this can also be added (along with many other gdal creation options)
to the template meta as a key value pair template_meta.update( compress='lzw' ).
See Rasterio documentation for more details. This is just a common one that is
supported here.
RETURNS:
--------
string path to the new output_filename created
'''
import os
if 'transform' in template_meta.keys():
_ = template_meta.pop( 'transform' )
if not output_filename.endswith( '.tif' ):
UserWarning( 'output_filename does not end with ".tif", it has been fixed for you.' )
output_filename = os.path.splitext( output_filename )[0] + '.tif'
if output_arr.ndim == 2:
# add in a new dimension - can get you into trouble with very large rasters...
output_arr = output_arr[ np.newaxis, ... ]
elif output_arr.ndim < 2:
raise ValueError( 'output_arr must have at least 2 dimensions' )
nbands, nrows, ncols = output_arr.shape
if template_meta[ 'count' ] != nbands:
raise ValueError( 'template_meta[ "count" ] must match output_arr bands' )
if compress == True and 'compress' not in template_meta.keys():
template_meta.update( compress='lzw' )
with rasterio.open( output_filename, 'w', **template_meta ) as out:
for band in range( 1, nbands+1 ):
out.write( output_arr[ band-1, ... ], band )
return output_filename
def shiftgrid(lon0,datain,lonsin,start=True,cyclic=360.0):
import numpy as np
"""
Shift global lat/lon grid east or west.
.. tabularcolumns:: |l|L|
============== ====================================================
Arguments Description
============== ====================================================
lon0 starting longitude for shifted grid
(ending longitude if start=False). lon0 must be on
input grid (within the range of lonsin).
datain original data with longitude the right-most
dimension.
lonsin original longitudes.
============== ====================================================
.. tabularcolumns:: |l|L|
============== ====================================================
Keywords Description
============== ====================================================
start if True, lon0 represents the starting longitude
of the new grid. if False, lon0 is the ending
longitude. Default True.
cyclic width of periodic domain (default 360)
============== ====================================================
returns ``dataout,lonsout`` (data and longitudes on shifted grid).
"""
if np.fabs(lonsin[-1]-lonsin[0]-cyclic) > 1.e-4:
# Use all data instead of raise ValueError, 'cyclic point not included'
start_idx = 0
else:
# If cyclic, remove the duplicate point
start_idx = 1
if lon0 < lonsin[0] or lon0 > lonsin[-1]:
raise ValueError('lon0 outside of range of lonsin')
i0 = np.argmin(np.fabs(lonsin-lon0))
i0_shift = len(lonsin)-i0
if np.ma.isMA(datain):
dataout = np.ma.zeros(datain.shape,datain.dtype)
else:
dataout = np.zeros(datain.shape,datain.dtype)
if np.ma.isMA(lonsin):
lonsout = np.ma.zeros(lonsin.shape,lonsin.dtype)
else:
lonsout = np.zeros(lonsin.shape,lonsin.dtype)
if start:
lonsout[0:i0_shift] = lonsin[i0:]
else:
lonsout[0:i0_shift] = lonsin[i0:]-cyclic
dataout[...,0:i0_shift] = datain[...,i0:]
if start:
lonsout[i0_shift:] = lonsin[start_idx:i0+start_idx]+cyclic
else:
lonsout[i0_shift:] = lonsin[start_idx:i0+start_idx]
dataout[...,i0_shift:] = datain[...,start_idx:i0+start_idx]
return dataout,lonsout
def bounds_to_extent( bounds ):
'''
take input rasterio bounds object and return an extent
'''
l,b,r,t = bounds
return [ (l,b), (r,b), (r,t), (l,t), (l,b) ]
def padded_bounds( rst, npixels, crs ):
'''
convert the extents of 2 overlapping rasters to a shapefile with
an expansion of the intersection of the rasters extents by npixels
rst1: rasterio raster object
rst2: rasterio raster object
npixels: tuple of 4 (left(-),bottom(-),right(+),top(+)) number of pixels to
expand in each direction. for 5 pixels in each direction it would look like
this: (-5. -5. 5, 5) or just in the right and top directions like this:
(0,0,5,5).
crs: epsg code or proj4string defining the geospatial reference
system
output_shapefile: string full path to the newly created output shapefile
'''
import rasterio, os, sys
from shapely.geometry import Polygon
resolution = rst.res[0]
new_bounds = [ bound+(expand*resolution) for bound, expand in zip( rst.bounds, npixels ) ]
# new_ext = bounds_to_extent( new_bounds )
return new_bounds
def xyz_to_grid( x, y, z, grid, method='cubic', output_dtype=np.float32 ):
'''
interpolate points to a grid. simple wrapper around
scipy.interpolate.griddata. Points and grid must be
in the same coordinate system
x = 1-D np.array of x coordinates / x,y,z must be same length
y = 1-D np.array of y coordinates / x,y,z must be same length
z = 1-D np.array of z coordinates / x,y,z must be same length
grid = tuple of meshgrid as made using numpy.meshgrid()
order (xi, yi)
method = one of 'cubic', 'near', linear
'''
import numpy as np
from scipy.interpolate import griddata
zi = griddata( (x, y), z, grid, method=method )
zi = np.flipud( zi.astype( output_dtype ) )
return zi
def run( df, meshgrid_tuple, lons_pcll, template_raster_fn, src_transform, src_crs, src_nodata, output_filename ):
'''
run the interpolation to a grid, and reprojection / resampling to the Alaska / Canada rasters
extent, resolution, origin (template_raster).
This function is intended to be used to run a pathos.multiprocessing Pool's map function
across a list of pre-computed arguments.
RETURNS:
[str] path to the output filename generated
'''
template_raster = rasterio.open( template_raster_fn )
interp_arr = xyz_to_grid( np.array(df['lon'].tolist()), \
np.array(df['lat'].tolist()), \
np.array(df['anom'].tolist()), grid=meshgrid_tuple, method='cubic' )
src_nodata = -9999.0 # nodata
interp_arr[ np.isnan( interp_arr ) ] = src_nodata
dat, lons = shiftgrid( 180., interp_arr, lons_pcll, start=False )
output_arr = np.empty_like( template_raster.read( 1 ) )
template_meta = template_raster.meta
if 'transform' in template_meta.keys():
template_meta.pop( 'transform' )
template_meta.update( crs={'init':'epsg:3338'} )
reproject( dat, output_arr, src_transform=src_transform, src_crs=src_crs, src_nodata=src_nodata, \
dst_transform=template_meta['affine'], dst_crs=template_meta['crs'],\
dst_nodata=None, resampling=RESAMPLING.nearest, num_threads=1, SOURCE_EXTRA=1000 )
return write_gtiff( output_arr, template_meta, output_filename, compress=True )
if __name__ == '__main__':
import rasterio, xray, os, glob, affine
from rasterio.warp import reproject, RESAMPLING
import geopandas as gpd
import pandas as pd
import numpy as np
from collections import OrderedDict
from shapely.geometry import Point
from pathos import multiprocessing as mp
ncores = 15
# filenames
base_path = '/workspace/Shared/Tech_Projects/ALFRESCO_Inputs/project_data/TEM_Data'
cld_ts31 = '/Data/Base_Data/Climate/World/CRU_grids/CRU_TS31/cru_ts_3_10.1901.2009.cld.dat.nc'
template_raster_fn = '/workspace/Shared/Tech_Projects/ALFRESCO_Inputs/project_data/TEM_Data/templates/tas_mean_C_AR5_GFDL-CM3_historical_01_1860.tif'
output_path = os.path.join( base_path, 'OCTOBER' )
# this is the set of modified GTiffs produced in the conversion procedure with the ts2.0 data
cld_ts20 = '' # read in the already pre-produced files. They should be in 10'... or maybe I need to change that.
climatology_begin = '1961'
climatology_end = '1990'
year_begin = 1901
year_end = 2009
# open with xray
cld_ts31 = xray.open_dataset( cld_ts31 )
# open template raster
template_raster = rasterio.open( template_raster_fn )
template_meta = template_raster.meta
template_meta.update( crs={'init':'epsg:3338'} )
# calculate the anomalies
clim_ds = cld_ts31.loc[ {'time':slice(climatology_begin,climatology_end)} ]
climatology = clim_ds.cld.groupby( 'time.month' ).mean( 'time' )
anomalies = cld_ts31.cld.groupby( 'time.month' ) / climatology
# rotate the anomalies to pacific centered latlong -- this is already in the greenwich latlong
dat_pcll, lons_pcll = shiftgrid( 0., anomalies, anomalies.lon.data )
# # generate an expanded extent (from the template_raster) to interpolate across
template_raster = rasterio.open( template_raster_fn )
# output_resolution = (1000.0, 1000.0) # hardwired, but we are building this for IEM which requires 1km
template_meta = template_raster.meta
# # interpolate to a new grid
# get longitudes and latitudes using meshgrid
lo, la = [ i.ravel() for i in np.meshgrid( lons_pcll, anomalies.lat ) ] # mesh the lons/lats
# convert into GeoDataFrame and drop all the NaNs
df_list = [ pd.DataFrame({ 'anom':i.ravel(), 'lat':la, 'lon':lo }).dropna( axis=0, how='any' ) for i in dat_pcll ]
xi, yi = np.meshgrid( lons_pcll, anomalies.lat.data )
# meshgrid_tuple = np.meshgrid( lons_pcll, anomalies.lat.data )
# argument setup
src_transform = affine.Affine( 0.5, 0.0, -180.0, 0.0, -0.5, 90.0 )
src_crs = {'init':'epsg:4326'}
src_nodata = -9999.0
# output_filenames setup
years = np.arange( year_begin, year_end+1, 1 ).astype( str ).tolist()
months = [ i if len(i)==2 else '0'+i for i in np.arange( 1, 12+1, 1 ).astype( str ).tolist() ]
month_year = [ (month, year) for year in years for month in months ]
output_filenames = [ os.path.join( output_path, '_'.join([ 'cld_pct_cru_ts31',month,year])+'.tif' ) for month, year in month_year ]
# build a list of keyword args to pass to the pool of workers.
args_list = [ {'df':df, 'meshgrid_tuple':(xi, yi), 'lons_pcll':lons_pcll, \
'template_raster_fn':template_raster_fn, 'src_transform':src_transform, \
'src_crs':src_crs, 'src_nodata':src_nodata, 'output_filename':fn } for df, fn in zip( df_list, output_filenames ) ]
# interpolate / reproject / resample
pool = mp.Pool( processes=ncores )
out = pool.map( lambda args: run( **args ), args_list )
pool.close()
# To Complete the CRU TS3.1 Downscaling we need the following:
# [1] DOWNSCALE WITH THE CRU CL2.0 Calculated Cloud Climatology from Sunshine Percent
# [2] Mask the data
# [3] give proper naming convention
# [4] output to GTiff
| mit |
crystalrood/piggie | public/test_scripts/updating_status_mongo.py | 1 | 1334 | # this test file takes an encoded emai, decodes it, parese out order information and saves it to the database
# also changes the status of the messages db from "need to scrape" to "scraped"
#
#
#
#required encoding for scraping, otherwise defaults to unicode and screws things up
from bs4 import BeautifulSoup
import requests
import sys;
import json;
#reload(sys);
#sys.setdefaultencoding("utf8")
import re
import pandas as pd
import pprint
import numpy as np
import csv, sys
import base64
import datefinder
import pymongo
from pymongo import MongoClient
uri = 'mongodb://heroku_4jtg3rvf:r9nq5ealpnfrlda5la4fj8r192@ds161503.mlab.com:61503/heroku_4jtg3rvf'
client = MongoClient(uri)
db = client['heroku_4jtg3rvf']
#client = MongoClient('mongodb://localhost:27017/test')
#db = client.test
#Read data from stdin
def read_in():
lines = sys.stdin.readlines()
# Since our input would only be having one line, parse our JSON data from that
return json.loads(lines[0])
def main():
#get our data as an array from read_in()
lines = read_in()
print(lines[0] + lines[1])
db.order_info_item_scrapes.update_many(
{"order_num": lines[0], "item_name": lines[1]},
{"$set": {"status": "contacted"}}
)
print('MIMZEY did it')
# Start process
if __name__ == '__main__':
main()
| mit |
great-expectations/great_expectations | tests/core/test_expectation_suite.py | 1 | 17469 | import datetime
from copy import copy, deepcopy
from typing import Any, Dict, List
import pytest
from ruamel.yaml import YAML
from great_expectations.core.expectation_configuration import ExpectationConfiguration
from great_expectations.core.expectation_suite import ExpectationSuite
from great_expectations.util import filter_properties_dict
@pytest.fixture
def exp1():
return ExpectationConfiguration(
expectation_type="expect_column_values_to_be_in_set",
kwargs={"column": "a", "value_set": [1, 2, 3], "result_format": "BASIC"},
meta={"notes": "This is an expectation."},
)
@pytest.fixture
def exp2():
return ExpectationConfiguration(
expectation_type="expect_column_values_to_be_in_set",
kwargs={"column": "b", "value_set": [-1, -2, -3], "result_format": "BASIC"},
meta={"notes": "This is an expectation."},
)
@pytest.fixture
def exp3():
return ExpectationConfiguration(
expectation_type="expect_column_values_to_be_in_set",
kwargs={"column": "b", "value_set": [-1, -2, -3], "result_format": "BASIC"},
meta={"notes": "This is an expectation."},
)
@pytest.fixture
def exp4():
return ExpectationConfiguration(
expectation_type="expect_column_values_to_be_in_set",
kwargs={"column": "b", "value_set": [1, 2, 3], "result_format": "BASIC"},
meta={"notes": "This is an expectation."},
)
@pytest.fixture
def column_pair_expectation():
return ExpectationConfiguration(
expectation_type="expect_column_pair_values_to_be_in_set",
kwargs={
"column_A": "1",
"column_B": "b",
"value_set": [(1, 1), (2, 2)],
"result_format": "BASIC",
},
)
@pytest.fixture
def table_exp1():
return ExpectationConfiguration(
expectation_type="expect_table_columns_to_match_ordered_list",
kwargs={"value": ["a", "b", "c"]},
)
@pytest.fixture
def table_exp2():
return ExpectationConfiguration(
expectation_type="expect_table_row_count_to_be_between",
kwargs={"min_value": 0, "max_value": 1},
)
@pytest.fixture
def table_exp3():
return ExpectationConfiguration(
expectation_type="expect_table_row_count_to_equal", kwargs={"value": 1}
)
@pytest.fixture
def empty_suite():
return ExpectationSuite(
expectation_suite_name="warning",
expectations=[],
meta={"notes": "This is an expectation suite."},
)
@pytest.fixture
def single_expectation_suite(exp1):
return ExpectationSuite(
expectation_suite_name="warning",
expectations=[exp1],
meta={"notes": "This is an expectation suite."},
)
@pytest.fixture
def suite_with_table_and_column_expectations(
exp1, exp2, exp3, exp4, column_pair_expectation, table_exp1, table_exp2, table_exp3
):
suite = ExpectationSuite(
expectation_suite_name="warning",
expectations=[
exp1,
exp2,
exp3,
exp4,
column_pair_expectation,
table_exp1,
table_exp2,
table_exp3,
],
meta={"notes": "This is an expectation suite."},
)
assert suite.expectations == [
exp1,
exp2,
exp3,
exp4,
column_pair_expectation,
table_exp1,
table_exp2,
table_exp3,
]
return suite
@pytest.fixture
def baseline_suite(exp1, exp2):
return ExpectationSuite(
expectation_suite_name="warning",
expectations=[exp1, exp2],
meta={"notes": "This is an expectation suite."},
)
@pytest.fixture
def identical_suite(exp1, exp3):
return ExpectationSuite(
expectation_suite_name="warning",
expectations=[exp1, exp3],
meta={"notes": "This is an expectation suite."},
)
@pytest.fixture
def equivalent_suite(exp1, exp3):
return ExpectationSuite(
expectation_suite_name="danger",
expectations=[exp1, exp3],
meta={
"notes": "This is another expectation suite, with a different name and meta"
},
)
@pytest.fixture
def different_suite(exp1, exp4):
return ExpectationSuite(
expectation_suite_name="warning",
expectations=[exp1, exp4],
meta={"notes": "This is an expectation suite."},
)
@pytest.fixture
def profiler_config():
# Profiler configuration is pulled from the Bobster use case in tests/rule_based_profiler/
yaml_config = """
# This profiler is meant to be used on the NYC taxi data:
# tests/test_sets/taxi_yellow_trip_data_samples/yellow_trip_data_sample_20(18|19|20)-*.csv
variables:
# BatchRequest yielding thirty five (35) batches (January, 2018 -- November, 2020 trip data)
jan_2018_thru_nov_2020_monthly_trip_data_batch_request:
datasource_name: taxi_pandas
data_connector_name: monthly
data_asset_name: my_reports
data_connector_query:
index: ":-1"
confidence_level: 9.5e-1
mostly: 1.0
rules:
row_count_range_rule:
domain_builder:
class_name: TableDomainBuilder
parameter_builders:
- parameter_name: row_count_range
class_name: NumericMetricRangeMultiBatchParameterBuilder
batch_request: $variables.jan_2018_thru_nov_2020_monthly_trip_data_batch_request
metric_name: table.row_count
confidence_level: $variables.confidence_level
round_decimals: 0
truncate_values:
lower_bound: 0
expectation_configuration_builders:
- expectation_type: expect_table_row_count_to_be_between
class_name: DefaultExpectationConfigurationBuilder
module_name: great_expectations.rule_based_profiler.expectation_configuration_builder
min_value: $parameter.row_count_range.value.min_value
max_value: $parameter.row_count_range.value.max_value
mostly: $variables.mostly
meta:
profiler_details: $parameter.row_count_range.details
"""
yaml = YAML()
return yaml.load(yaml_config)
def test_expectation_suite_equality(baseline_suite, identical_suite, equivalent_suite):
"""Equality should depend on all defined properties of a configuration object, but not on whether the *instances*
are the same."""
assert baseline_suite is baseline_suite # no difference
assert (
baseline_suite is not identical_suite
) # different instances, but same content
assert baseline_suite == identical_suite # different instances, but same content
assert not (baseline_suite != identical_suite) # ne works properly
assert not (baseline_suite == equivalent_suite) # different meta
assert baseline_suite != equivalent_suite # ne works properly
def test_expectation_suite_equivalence(
baseline_suite,
identical_suite,
equivalent_suite,
different_suite,
single_expectation_suite,
):
"""Equivalence should depend only on properties that affect the result of the expectation."""
assert baseline_suite.isEquivalentTo(baseline_suite) # no difference
assert baseline_suite.isEquivalentTo(identical_suite)
assert baseline_suite.isEquivalentTo(equivalent_suite) # different meta
assert not baseline_suite.isEquivalentTo(
different_suite
) # different value_set in one expectation
assert not single_expectation_suite.isEquivalentTo(baseline_suite)
def test_expectation_suite_dictionary_equivalence(baseline_suite):
assert (
baseline_suite.isEquivalentTo(
{
"expectation_suite_name": "warning",
"expectations": [
{
"expectation_type": "expect_column_values_to_be_in_set",
"kwargs": {
"column": "a",
"value_set": [1, 2, 3],
"result_format": "BASIC",
},
"meta": {"notes": "This is an expectation."},
},
{
"expectation_type": "expect_column_values_to_be_in_set",
"kwargs": {
"column": "b",
"value_set": [-1, -2, -3],
"result_format": "BASIC",
},
"meta": {"notes": "This is an expectation."},
},
],
"meta": {"notes": "This is an expectation suite."},
}
)
is True
)
assert (
baseline_suite.isEquivalentTo(
{
"expectation_suite_name": "warning",
"expectations": [
{
"expectation_type": "expect_column_values_to_be_in_set",
"kwargs": {
"column": "a",
"value_set": [-1, 2, 3], # One value changed here
"result_format": "BASIC",
},
"meta": {"notes": "This is an expectation."},
},
{
"expectation_type": "expect_column_values_to_be_in_set",
"kwargs": {
"column": "b",
"value_set": [-1, -2, -3],
"result_format": "BASIC",
},
"meta": {"notes": "This is an expectation."},
},
],
"meta": {"notes": "This is an expectation suite."},
}
)
is False
)
def test_expectation_suite_copy(baseline_suite):
suite_copy = copy(baseline_suite)
assert suite_copy == baseline_suite
suite_copy.data_asset_type = "blarg!"
assert (
baseline_suite.data_asset_type != "blarg"
) # copy on primitive properties shouldn't propagate
suite_copy.expectations[0].meta["notes"] = "a different note"
assert (
baseline_suite.expectations[0].meta["notes"] == "a different note"
) # copy on deep attributes does propagate
def test_expectation_suite_deepcopy(baseline_suite):
suite_deepcopy = deepcopy(baseline_suite)
assert suite_deepcopy == baseline_suite
suite_deepcopy.data_asset_type = "blarg!"
assert (
baseline_suite.data_asset_type != "blarg"
) # copy on primitive properties shouldn't propagate
suite_deepcopy.expectations[0].meta["notes"] = "a different note"
# deepcopy on deep attributes does not propagate
assert baseline_suite.expectations[0].meta["notes"] == "This is an expectation."
def test_suite_without_metadata_includes_ge_version_metadata_if_none_is_provided():
suite = ExpectationSuite("foo")
assert "great_expectations_version" in suite.meta.keys()
def test_suite_does_not_overwrite_existing_version_metadata():
suite = ExpectationSuite("foo", meta={"great_expectations_version": "0.0.0"})
assert "great_expectations_version" in suite.meta.keys()
assert suite.meta["great_expectations_version"] == "0.0.0"
def test_suite_with_metadata_includes_ge_version_metadata(baseline_suite):
assert "great_expectations_version" in baseline_suite.meta.keys()
def test_add_citation(baseline_suite):
assert (
"citations" not in baseline_suite.meta
or len(baseline_suite.meta["citations"]) == 0
)
baseline_suite.add_citation("hello!")
assert baseline_suite.meta["citations"][0].get("comment") == "hello!"
def test_add_citation_with_profiler_config(baseline_suite, profiler_config):
assert (
"citations" not in baseline_suite.meta
or len(baseline_suite.meta["citations"]) == 0
)
baseline_suite.add_citation(
"adding profiler config citation",
profiler_config=profiler_config,
)
assert baseline_suite.meta["citations"][0].get("profiler_config") == profiler_config
def test_get_citations_with_no_citations(baseline_suite):
assert "citations" not in baseline_suite.meta
assert baseline_suite.get_citations() == []
def test_get_citations_not_sorted(baseline_suite):
assert "citations" not in baseline_suite.meta
baseline_suite.add_citation("first", citation_date="2000-01-01")
baseline_suite.add_citation("third", citation_date="2000-01-03")
baseline_suite.add_citation("second", citation_date="2000-01-02")
properties_dict_list: List[Dict[str, Any]] = baseline_suite.get_citations(
sort=False
)
for properties_dict in properties_dict_list:
filter_properties_dict(
properties=properties_dict, clean_falsy=True, inplace=True
)
properties_dict.pop("interactive", None)
assert properties_dict_list == [
{"citation_date": "2000-01-01T00:00:00.000000Z", "comment": "first"},
{"citation_date": "2000-01-03T00:00:00.000000Z", "comment": "third"},
{"citation_date": "2000-01-02T00:00:00.000000Z", "comment": "second"},
]
def test_get_citations_sorted(baseline_suite):
assert "citations" not in baseline_suite.meta
dt: datetime.datetime
baseline_suite.add_citation("first", citation_date="2000-01-01")
baseline_suite.add_citation("third", citation_date="2000-01-03")
baseline_suite.add_citation("second", citation_date="2000-01-02")
properties_dict_list: List[Dict[str, Any]] = baseline_suite.get_citations(sort=True)
for properties_dict in properties_dict_list:
filter_properties_dict(
properties=properties_dict, clean_falsy=True, inplace=True
)
properties_dict.pop("interactive", None)
assert properties_dict_list == [
{
"citation_date": "2000-01-01T00:00:00.000000Z",
"comment": "first",
},
{
"citation_date": "2000-01-02T00:00:00.000000Z",
"comment": "second",
},
{
"citation_date": "2000-01-03T00:00:00.000000Z",
"comment": "third",
},
]
def test_get_citations_with_multiple_citations_containing_batch_kwargs(baseline_suite):
assert "citations" not in baseline_suite.meta
baseline_suite.add_citation(
"first", batch_kwargs={"path": "first"}, citation_date="2000-01-01"
)
baseline_suite.add_citation(
"second", batch_kwargs={"path": "second"}, citation_date="2001-01-01"
)
baseline_suite.add_citation("third", citation_date="2002-01-01")
properties_dict_list: List[Dict[str, Any]] = baseline_suite.get_citations(
sort=True, require_batch_kwargs=True
)
for properties_dict in properties_dict_list:
filter_properties_dict(
properties=properties_dict, clean_falsy=True, inplace=True
)
properties_dict.pop("interactive", None)
assert properties_dict_list == [
{
"citation_date": "2000-01-01T00:00:00.000000Z",
"batch_kwargs": {"path": "first"},
"comment": "first",
},
{
"citation_date": "2001-01-01T00:00:00.000000Z",
"batch_kwargs": {"path": "second"},
"comment": "second",
},
]
def test_get_citations_with_multiple_citations_containing_profiler_config(
baseline_suite, profiler_config
):
assert "citations" not in baseline_suite.meta
baseline_suite.add_citation(
"first",
citation_date="2000-01-01",
profiler_config=profiler_config,
)
baseline_suite.add_citation(
"second",
citation_date="2001-01-01",
profiler_config=profiler_config,
)
baseline_suite.add_citation("third", citation_date="2002-01-01")
properties_dict_list: List[Dict[str, Any]] = baseline_suite.get_citations(
sort=True, require_profiler_config=True
)
for properties_dict in properties_dict_list:
filter_properties_dict(
properties=properties_dict, clean_falsy=True, inplace=True
)
properties_dict.pop("interactive", None)
assert properties_dict_list == [
{
"citation_date": "2000-01-01T00:00:00.000000Z",
"profiler_config": profiler_config,
"comment": "first",
},
{
"citation_date": "2001-01-01T00:00:00.000000Z",
"profiler_config": profiler_config,
"comment": "second",
},
]
def test_get_table_expectations_returns_empty_list_on_empty_suite(empty_suite):
assert empty_suite.get_table_expectations() == []
def test_get_table_expectations_returns_empty_list_on_suite_without_any(baseline_suite):
assert baseline_suite.get_table_expectations() == []
def test_get_table_expectations(
suite_with_table_and_column_expectations, table_exp1, table_exp2, table_exp3
):
obs = suite_with_table_and_column_expectations.get_table_expectations()
assert obs == [table_exp1, table_exp2, table_exp3]
def test_get_column_expectations_returns_empty_list_on_empty_suite(empty_suite):
assert empty_suite.get_column_expectations() == []
def test_get_column_expectations(
suite_with_table_and_column_expectations, exp1, exp2, exp3, exp4
):
obs = suite_with_table_and_column_expectations.get_column_expectations()
assert obs == [exp1, exp2, exp3, exp4]
| apache-2.0 |
carefree0910/MachineLearning | b_NaiveBayes/Original/GaussianNB.py | 1 | 4093 | import os
import sys
root_path = os.path.abspath("../../")
if root_path not in sys.path:
sys.path.append(root_path)
import matplotlib.pyplot as plt
from b_NaiveBayes.Original.Basic import *
from b_NaiveBayes.Original.MultinomialNB import MultinomialNB
from Util.Util import DataUtil
class GaussianNB(NaiveBayes):
GaussianNBTiming = Timing()
@GaussianNBTiming.timeit(level=1, prefix="[API] ")
def feed_data(self, x, y, sample_weight=None):
if sample_weight is not None:
sample_weight = np.asarray(sample_weight)
x = np.array([list(map(lambda c: float(c), sample)) for sample in x])
labels = list(set(y))
label_dict = {label: i for i, label in enumerate(labels)}
y = np.array([label_dict[yy] for yy in y])
cat_counter = np.bincount(y)
labels = [y == value for value in range(len(cat_counter))]
labelled_x = [x[label].T for label in labels]
self._x, self._y = x.T, y
self._labelled_x, self._label_zip = labelled_x, labels
self._cat_counter, self.label_dict = cat_counter, {i: l for l, i in label_dict.items()}
self.feed_sample_weight(sample_weight)
@GaussianNBTiming.timeit(level=1, prefix="[Core] ")
def feed_sample_weight(self, sample_weight=None):
if sample_weight is not None:
local_weights = sample_weight * len(sample_weight)
for i, label in enumerate(self._label_zip):
self._labelled_x[i] *= local_weights[label]
@GaussianNBTiming.timeit(level=1, prefix="[Core] ")
def _fit(self, lb):
n_category = len(self._cat_counter)
p_category = self.get_prior_probability(lb)
data = [
NBFunctions.gaussian_maximum_likelihood(
self._labelled_x, n_category, dim) for dim in range(len(self._x))]
self._data = data
def func(input_x, tar_category):
rs = 1
for d, xx in enumerate(input_x):
rs *= data[d][tar_category](xx)
return rs * p_category[tar_category]
return func
def visualize(self, save=False):
colors = plt.cm.Paired([i / len(self.label_dict) for i in range(len(self.label_dict))])
colors = {cat: color for cat, color in zip(self.label_dict.values(), colors)}
for j in range(len(self._x)):
tmp_data = self._x[j]
x_min, x_max = np.min(tmp_data), np.max(tmp_data)
gap = x_max - x_min
tmp_x = np.linspace(x_min-0.1*gap, x_max+0.1*gap, 200)
title = "$j = {}$".format(j + 1)
plt.figure()
plt.title(title)
for c in range(len(self.label_dict)):
plt.plot(tmp_x, [self._data[j][c](xx) for xx in tmp_x],
c=colors[self.label_dict[c]], label="class: {}".format(self.label_dict[c]))
plt.xlim(x_min-0.2*gap, x_max+0.2*gap)
plt.legend()
if not save:
plt.show()
else:
plt.savefig("d{}".format(j + 1))
if __name__ == '__main__':
import time
xs, ys = DataUtil.get_dataset("mushroom", "../../_Data/mushroom.txt", tar_idx=0)
nb = MultinomialNB()
nb.feed_data(xs, ys)
xs, ys = nb["x"].tolist(), nb["y"].tolist()
train_num = 6000
x_train, x_test = xs[:train_num], xs[train_num:]
y_train, y_test = ys[:train_num], ys[train_num:]
learning_time = time.time()
nb = GaussianNB()
nb.fit(x_train, y_train)
learning_time = time.time() - learning_time
estimation_time = time.time()
nb.evaluate(x_train, y_train)
nb.evaluate(x_test, y_test)
estimation_time = time.time() - estimation_time
print(
"Model building : {:12.6} s\n"
"Estimation : {:12.6} s\n"
"Total : {:12.6} s".format(
learning_time, estimation_time,
learning_time + estimation_time
)
)
nb.show_timing_log()
nb.visualize()
| mit |
mindw/shapely | docs/code/polygon2.py | 6 | 1798 | from matplotlib import pyplot
from matplotlib.patches import Circle
from shapely.geometry import Polygon
from descartes.patch import PolygonPatch
from figures import SIZE
COLOR = {
True: '#6699cc',
False: '#ff3333'
}
def v_color(ob):
return COLOR[ob.is_valid]
def plot_coords(ax, ob):
x, y = ob.xy
ax.plot(x, y, 'o', color='#999999', zorder=1)
fig = pyplot.figure(1, figsize=SIZE, dpi=90)
# 3: invalid polygon, ring touch along a line
ax = fig.add_subplot(121)
ext = [(0, 0), (0, 2), (2, 2), (2, 0), (0, 0)]
int = [(0.5, 0), (1.5, 0), (1.5, 1), (0.5, 1), (0.5, 0)]
polygon = Polygon(ext, [int])
plot_coords(ax, polygon.interiors[0])
plot_coords(ax, polygon.exterior)
patch = PolygonPatch(polygon, facecolor=v_color(polygon), edgecolor=v_color(polygon), alpha=0.5, zorder=2)
ax.add_patch(patch)
ax.set_title('c) invalid')
xrange = [-1, 3]
yrange = [-1, 3]
ax.set_xlim(*xrange)
ax.set_xticks(range(*xrange) + [xrange[-1]])
ax.set_ylim(*yrange)
ax.set_yticks(range(*yrange) + [yrange[-1]])
ax.set_aspect(1)
#4: invalid self-touching ring
ax = fig.add_subplot(122)
ext = [(0, 0), (0, 2), (2, 2), (2, 0), (0, 0)]
int_1 = [(0.5, 0.25), (1.5, 0.25), (1.5, 1.25), (0.5, 1.25), (0.5, 0.25)]
int_2 = [(0.5, 1.25), (1, 1.25), (1, 1.75), (0.5, 1.75)]
# int_2 = [
polygon = Polygon(ext, [int_1, int_2])
plot_coords(ax, polygon.interiors[0])
plot_coords(ax, polygon.interiors[1])
plot_coords(ax, polygon.exterior)
patch = PolygonPatch(polygon, facecolor=v_color(polygon), edgecolor=v_color(polygon), alpha=0.5, zorder=2)
ax.add_patch(patch)
ax.set_title('d) invalid')
xrange = [-1, 3]
yrange = [-1, 3]
ax.set_xlim(*xrange)
ax.set_xticks(range(*xrange) + [xrange[-1]])
ax.set_ylim(*yrange)
ax.set_yticks(range(*yrange) + [yrange[-1]])
ax.set_aspect(1)
pyplot.show()
| bsd-3-clause |
FireElementalNE/RetroColorAnalysis | scatterplots/scatter_plot.py | 1 | 1813 | import os
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
import globals.global_values as global_values
class ScatterPlot:
def __init__(self, cl, _dirs, _is_agg):
'''
init a scatter plot
:param cl: the list of colors
:param _dirs: the dirs structure
:param _type: the type of plot
'''
self.color_list = cl
self.dirs = _dirs
self.is_agg = _is_agg
def make_file_name(self):
'''
create the correct filename for the scatter plot
:return: the filename
'''
if not self.is_agg:
base_name = os.path.basename(self.dirs[0]).split('.')[0]
d_name = '%s_scatter.png' % (base_name)
return os.path.join(self.dirs[3], d_name)
else:
return os.path.join(self.dirs[8], 'agg_scatter.png')
def make_scatter_plot(self):
'''
make a scatter plot from the given figures
:return: nothing
'''
# TODO: make correct labels
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
r = []
g = []
b = []
for k, v in self.color_list.iteritems():
ax.scatter(v[0], v[1], v[2], c='r', marker='.')
if global_values.DISTANCE_TYPE == 'HSV':
labels = global_values.HSV_SCATTER_LABELS
elif global_values.DISTANCE_TYPE == 'LAB':
labels = global_values.LAB_SCATTER_LABELS
elif global_values.DISTANCE_TYPE == 'RGB':
labels = global_values.RGB_SCATTER_LABELS
else:
labels = ['???', '???', '???']
ax.set_xlabel(labels[0])
ax.set_ylabel(labels[1])
ax.set_zlabel(labels[2])
plt.savefig(self.make_file_name())
plt.close()
| gpl-2.0 |
trichter/sito | bin/noise/noise_s_final_autocorr1.py | 1 | 4932 | #!/usr/bin/env python
# by TR
from obspy.core import UTCDateTime as UTC
from sito.data import IPOC
from sito.noisexcorr import (prepare, get_correlations,
plotXcorrs, noisexcorrf, stack)
from sito import util
import matplotlib.pyplot as plt
from sito.stream import read
from multiprocessing import Pool
import time
from sito import seismometer
def main():
stations = 'PB01 PB02 PB03 PB04 PB05 PB06 PB07 PB08 PB09 PB10 PB11 PB12 PB13 PB14 PB15 PB16 HMBCX MNMCX PATCX PSGCX LVC'
stations2 = None
components = 'Z'
# TOcopilla earthquake: 2007-11-14 15:14
t1 = UTC('2006-02-01')
t2 = UTC('2012-10-01')
shift = 500
correlations = get_correlations(stations, components, stations2, only_auto=True)
method = 'FINAL_filter0.01-0.5_1bit_auto'
data = IPOC(xcorr_append='/' + method, use_local_LVC=False)
data.setXLogger('_' + method)
# pool = Pool()
# prepare(data, stations.split(), t1, t2, component=components,
# filter=(0.01, 0.5, 2, True), downsample=5,
# eventremoval='waterlevel_env2', param_removal=(10, 0),
# whitening=False,
# normalize='1bit', param_norm=None,
# pool=pool)
# noisexcorrf(data, correlations, t1, t2, shift, pool=pool)
# pool.close()
# pool.join()
# plotXcorrs(data, correlations, t1, t2, start=None, end=None, plot_overview=True, plot_years=False, use_dlognorm=False,
# plot_stack=True, plot_psd=False, add_to_title='', downsample=None)
plotXcorrs(data, correlations, t1, t2, start=0, end=200, plot_overview=True, plot_years=False, use_dlognorm=False,
plot_stack=True, plot_psd=False, add_to_title='', downsample=None, ext='_hg.png', vmax=0.1)
# stack(data, correlations, dt= -1)
# stack(data, correlations, dt=10 * 24 * 3600, shift=2 * 24 * 3600)
# plotXcorrs(data, correlations, t1=None, t2=None, start=None, end=None, plot_overview=True, plot_years=False, use_dlognorm=False,
# plot_stack=True, plot_psd=False, add_to_title='', downsample=None,
# stack=('10days', '2days'))
plotXcorrs(data, correlations, t1=None, t2=None, start=0, end=200, plot_overview=True, plot_years=False, use_dlognorm=False,
plot_stack=True, plot_psd=False, add_to_title='', downsample=None,
stack=('10days', '2days'), ext='_hg.png', vmax=0.1)
# util.checkDir(data.getPlotX(('', ''), t1))
#for correlation in correlations:
# stations = correlation[0][:-1], correlation[1][:-1]
# dist = data.stations.dist(*stations)
## if dist >= 120:
## t = (dist // 100) * 50 + 50
## else:
## t = 70
# t = 200
# stream = data.readDayXcorr(correlation, t1, t2)
# if len(stream) > 0:
# stream.plotXcorr(-t, t, imshow=True, vmax=0.01, vmin_rel='vmax',
# fig=plt.figure(figsize=(8.267, 11.693)),
# figtitle='station ' + method + ' around Tocopilla event',
# dateformatter='%y-%m-%d', show=False,
# save=data.getPlotX(correlation, 'Tocopilla_0.01.png'),
# stack_lim=None)
#
# method = 'rm5_filter0.1-1'
# data = IPOC(xcorr_append='/tests/' + method, use_local_LVC=True)
# data.setXLogger('_' + method)
# prepare(data, stations.split(' '), t1, t2, filter=(0.1, 1.), downsample=10,
# component=components, normalize='runningmean', norm_param=5 * 10 + 1,
# use_floating_stream=True)
# xcorr_day(data, correlations, t1, t2, shift, use_floating_stream=True)
# plotXcorrs(data, correlations, t1, t2, plot_overview=False, plot_stack=True, plot_psd=True, add_to_title=method)
#
#
# method = 'rm50_filter0.01'
# data = IPOC(xcorr_append='/tests/' + method, use_local_LVC=True)
# data.setXLogger('_' + method)
# prepare(data, stations.split(' '), t1, t2, filter=(0.01, None), downsample=None,
# component=components, normalize='runningmean', norm_param=50 * 100 + 1,
# use_floating_stream=True)
# xcorr_day(data, correlations, t1, t2, shift, use_floating_stream=True)
# plotXcorrs(data, correlations, t1, t2, plot_overview=False, plot_stack=True, plot_psd=True, add_to_title=method)
#
#
# method = 'rm0.25_filter2'
# data = IPOC(xcorr_append='/tests/' + method, use_local_LVC=True)
# data.setXLogger('_' + method)
# prepare(data, stations.split(' '), t1, t2, filter=(2, None), downsample=None,
# component=components, normalize='runningmean', norm_param=100 // 4 + 1,
# use_floating_stream=True)
# xcorr_day(data, correlations, t1, t2, shift, use_floating_stream=True)
# plotXcorrs(data, correlations, t1, t2, plot_overview=False, plot_stack=True, plot_psd=True, add_to_title=method)
if __name__ == '__main__':
main()
| mit |
mjgrav2001/scikit-learn | examples/tree/plot_tree_regression_multioutput.py | 206 | 1800 | """
===================================================================
Multi-output Decision Tree Regression
===================================================================
An example to illustrate multi-output regression with decision tree.
The :ref:`decision trees <tree>`
is used to predict simultaneously the noisy x and y observations of a circle
given a single underlying feature. As a result, it learns local linear
regressions approximating the circle.
We can see that if the maximum depth of the tree (controlled by the
`max_depth` parameter) is set too high, the decision trees learn too fine
details of the training data and learn from the noise, i.e. they overfit.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn.tree import DecisionTreeRegressor
# Create a random dataset
rng = np.random.RandomState(1)
X = np.sort(200 * rng.rand(100, 1) - 100, axis=0)
y = np.array([np.pi * np.sin(X).ravel(), np.pi * np.cos(X).ravel()]).T
y[::5, :] += (0.5 - rng.rand(20, 2))
# Fit regression model
regr_1 = DecisionTreeRegressor(max_depth=2)
regr_2 = DecisionTreeRegressor(max_depth=5)
regr_3 = DecisionTreeRegressor(max_depth=8)
regr_1.fit(X, y)
regr_2.fit(X, y)
regr_3.fit(X, y)
# Predict
X_test = np.arange(-100.0, 100.0, 0.01)[:, np.newaxis]
y_1 = regr_1.predict(X_test)
y_2 = regr_2.predict(X_test)
y_3 = regr_3.predict(X_test)
# Plot the results
plt.figure()
plt.scatter(y[:, 0], y[:, 1], c="k", label="data")
plt.scatter(y_1[:, 0], y_1[:, 1], c="g", label="max_depth=2")
plt.scatter(y_2[:, 0], y_2[:, 1], c="r", label="max_depth=5")
plt.scatter(y_3[:, 0], y_3[:, 1], c="b", label="max_depth=8")
plt.xlim([-6, 6])
plt.ylim([-6, 6])
plt.xlabel("data")
plt.ylabel("target")
plt.title("Multi-output Decision Tree Regression")
plt.legend()
plt.show()
| bsd-3-clause |
NixaSoftware/CVis | venv/lib/python2.7/site-packages/pandas/tests/series/test_asof.py | 11 | 5289 | # coding=utf-8
import pytest
import numpy as np
from pandas import (offsets, Series, notna,
isna, date_range, Timestamp)
import pandas.util.testing as tm
from .common import TestData
class TestSeriesAsof(TestData):
def test_basic(self):
# array or list or dates
N = 50
rng = date_range('1/1/1990', periods=N, freq='53s')
ts = Series(np.random.randn(N), index=rng)
ts[15:30] = np.nan
dates = date_range('1/1/1990', periods=N * 3, freq='25s')
result = ts.asof(dates)
assert notna(result).all()
lb = ts.index[14]
ub = ts.index[30]
result = ts.asof(list(dates))
assert notna(result).all()
lb = ts.index[14]
ub = ts.index[30]
mask = (result.index >= lb) & (result.index < ub)
rs = result[mask]
assert (rs == ts[lb]).all()
val = result[result.index[result.index >= ub][0]]
assert ts[ub] == val
def test_scalar(self):
N = 30
rng = date_range('1/1/1990', periods=N, freq='53s')
ts = Series(np.arange(N), index=rng)
ts[5:10] = np.NaN
ts[15:20] = np.NaN
val1 = ts.asof(ts.index[7])
val2 = ts.asof(ts.index[19])
assert val1 == ts[4]
assert val2 == ts[14]
# accepts strings
val1 = ts.asof(str(ts.index[7]))
assert val1 == ts[4]
# in there
result = ts.asof(ts.index[3])
assert result == ts[3]
# no as of value
d = ts.index[0] - offsets.BDay()
assert np.isnan(ts.asof(d))
def test_with_nan(self):
# basic asof test
rng = date_range('1/1/2000', '1/2/2000', freq='4h')
s = Series(np.arange(len(rng)), index=rng)
r = s.resample('2h').mean()
result = r.asof(r.index)
expected = Series([0, 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6.],
index=date_range('1/1/2000', '1/2/2000', freq='2h'))
tm.assert_series_equal(result, expected)
r.iloc[3:5] = np.nan
result = r.asof(r.index)
expected = Series([0, 0, 1, 1, 1, 1, 3, 3, 4, 4, 5, 5, 6.],
index=date_range('1/1/2000', '1/2/2000', freq='2h'))
tm.assert_series_equal(result, expected)
r.iloc[-3:] = np.nan
result = r.asof(r.index)
expected = Series([0, 0, 1, 1, 1, 1, 3, 3, 4, 4, 4, 4, 4.],
index=date_range('1/1/2000', '1/2/2000', freq='2h'))
tm.assert_series_equal(result, expected)
def test_periodindex(self):
from pandas import period_range, PeriodIndex
# array or list or dates
N = 50
rng = period_range('1/1/1990', periods=N, freq='H')
ts = Series(np.random.randn(N), index=rng)
ts[15:30] = np.nan
dates = date_range('1/1/1990', periods=N * 3, freq='37min')
result = ts.asof(dates)
assert notna(result).all()
lb = ts.index[14]
ub = ts.index[30]
result = ts.asof(list(dates))
assert notna(result).all()
lb = ts.index[14]
ub = ts.index[30]
pix = PeriodIndex(result.index.values, freq='H')
mask = (pix >= lb) & (pix < ub)
rs = result[mask]
assert (rs == ts[lb]).all()
ts[5:10] = np.nan
ts[15:20] = np.nan
val1 = ts.asof(ts.index[7])
val2 = ts.asof(ts.index[19])
assert val1 == ts[4]
assert val2 == ts[14]
# accepts strings
val1 = ts.asof(str(ts.index[7]))
assert val1 == ts[4]
# in there
assert ts.asof(ts.index[3]) == ts[3]
# no as of value
d = ts.index[0].to_timestamp() - offsets.BDay()
assert isna(ts.asof(d))
def test_errors(self):
s = Series([1, 2, 3],
index=[Timestamp('20130101'),
Timestamp('20130103'),
Timestamp('20130102')])
# non-monotonic
assert not s.index.is_monotonic
with pytest.raises(ValueError):
s.asof(s.index[0])
# subset with Series
N = 10
rng = date_range('1/1/1990', periods=N, freq='53s')
s = Series(np.random.randn(N), index=rng)
with pytest.raises(ValueError):
s.asof(s.index[0], subset='foo')
def test_all_nans(self):
# GH 15713
# series is all nans
result = Series([np.nan]).asof([0])
expected = Series([np.nan])
tm.assert_series_equal(result, expected)
# testing non-default indexes
N = 50
rng = date_range('1/1/1990', periods=N, freq='53s')
dates = date_range('1/1/1990', periods=N * 3, freq='25s')
result = Series(np.nan, index=rng).asof(dates)
expected = Series(np.nan, index=dates)
tm.assert_series_equal(result, expected)
# testing scalar input
date = date_range('1/1/1990', periods=N * 3, freq='25s')[0]
result = Series(np.nan, index=rng).asof(date)
assert isna(result)
# test name is propagated
result = Series(np.nan, index=[1, 2, 3, 4], name='test').asof([4, 5])
expected = Series(np.nan, index=[4, 5], name='test')
tm.assert_series_equal(result, expected)
| apache-2.0 |
jkarnows/scikit-learn | sklearn/cluster/tests/test_spectral.py | 262 | 7954 | """Testing for Spectral Clustering methods"""
from sklearn.externals.six.moves import cPickle
dumps, loads = cPickle.dumps, cPickle.loads
import numpy as np
from scipy import sparse
from sklearn.utils import check_random_state
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_warns_message
from sklearn.cluster import SpectralClustering, spectral_clustering
from sklearn.cluster.spectral import spectral_embedding
from sklearn.cluster.spectral import discretize
from sklearn.metrics import pairwise_distances
from sklearn.metrics import adjusted_rand_score
from sklearn.metrics.pairwise import kernel_metrics, rbf_kernel
from sklearn.datasets.samples_generator import make_blobs
def test_spectral_clustering():
S = np.array([[1.0, 1.0, 1.0, 0.2, 0.0, 0.0, 0.0],
[1.0, 1.0, 1.0, 0.2, 0.0, 0.0, 0.0],
[1.0, 1.0, 1.0, 0.2, 0.0, 0.0, 0.0],
[0.2, 0.2, 0.2, 1.0, 1.0, 1.0, 1.0],
[0.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0],
[0.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0],
[0.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0]])
for eigen_solver in ('arpack', 'lobpcg'):
for assign_labels in ('kmeans', 'discretize'):
for mat in (S, sparse.csr_matrix(S)):
model = SpectralClustering(random_state=0, n_clusters=2,
affinity='precomputed',
eigen_solver=eigen_solver,
assign_labels=assign_labels
).fit(mat)
labels = model.labels_
if labels[0] == 0:
labels = 1 - labels
assert_array_equal(labels, [1, 1, 1, 0, 0, 0, 0])
model_copy = loads(dumps(model))
assert_equal(model_copy.n_clusters, model.n_clusters)
assert_equal(model_copy.eigen_solver, model.eigen_solver)
assert_array_equal(model_copy.labels_, model.labels_)
def test_spectral_amg_mode():
# Test the amg mode of SpectralClustering
centers = np.array([
[0., 0., 0.],
[10., 10., 10.],
[20., 20., 20.],
])
X, true_labels = make_blobs(n_samples=100, centers=centers,
cluster_std=1., random_state=42)
D = pairwise_distances(X) # Distance matrix
S = np.max(D) - D # Similarity matrix
S = sparse.coo_matrix(S)
try:
from pyamg import smoothed_aggregation_solver
amg_loaded = True
except ImportError:
amg_loaded = False
if amg_loaded:
labels = spectral_clustering(S, n_clusters=len(centers),
random_state=0, eigen_solver="amg")
# We don't care too much that it's good, just that it *worked*.
# There does have to be some lower limit on the performance though.
assert_greater(np.mean(labels == true_labels), .3)
else:
assert_raises(ValueError, spectral_embedding, S,
n_components=len(centers),
random_state=0, eigen_solver="amg")
def test_spectral_unknown_mode():
# Test that SpectralClustering fails with an unknown mode set.
centers = np.array([
[0., 0., 0.],
[10., 10., 10.],
[20., 20., 20.],
])
X, true_labels = make_blobs(n_samples=100, centers=centers,
cluster_std=1., random_state=42)
D = pairwise_distances(X) # Distance matrix
S = np.max(D) - D # Similarity matrix
S = sparse.coo_matrix(S)
assert_raises(ValueError, spectral_clustering, S, n_clusters=2,
random_state=0, eigen_solver="<unknown>")
def test_spectral_unknown_assign_labels():
# Test that SpectralClustering fails with an unknown assign_labels set.
centers = np.array([
[0., 0., 0.],
[10., 10., 10.],
[20., 20., 20.],
])
X, true_labels = make_blobs(n_samples=100, centers=centers,
cluster_std=1., random_state=42)
D = pairwise_distances(X) # Distance matrix
S = np.max(D) - D # Similarity matrix
S = sparse.coo_matrix(S)
assert_raises(ValueError, spectral_clustering, S, n_clusters=2,
random_state=0, assign_labels="<unknown>")
def test_spectral_clustering_sparse():
X, y = make_blobs(n_samples=20, random_state=0,
centers=[[1, 1], [-1, -1]], cluster_std=0.01)
S = rbf_kernel(X, gamma=1)
S = np.maximum(S - 1e-4, 0)
S = sparse.coo_matrix(S)
labels = SpectralClustering(random_state=0, n_clusters=2,
affinity='precomputed').fit(S).labels_
assert_equal(adjusted_rand_score(y, labels), 1)
def test_affinities():
# Note: in the following, random_state has been selected to have
# a dataset that yields a stable eigen decomposition both when built
# on OSX and Linux
X, y = make_blobs(n_samples=20, random_state=0,
centers=[[1, 1], [-1, -1]], cluster_std=0.01
)
# nearest neighbors affinity
sp = SpectralClustering(n_clusters=2, affinity='nearest_neighbors',
random_state=0)
assert_warns_message(UserWarning, 'not fully connected', sp.fit, X)
assert_equal(adjusted_rand_score(y, sp.labels_), 1)
sp = SpectralClustering(n_clusters=2, gamma=2, random_state=0)
labels = sp.fit(X).labels_
assert_equal(adjusted_rand_score(y, labels), 1)
X = check_random_state(10).rand(10, 5) * 10
kernels_available = kernel_metrics()
for kern in kernels_available:
# Additive chi^2 gives a negative similarity matrix which
# doesn't make sense for spectral clustering
if kern != 'additive_chi2':
sp = SpectralClustering(n_clusters=2, affinity=kern,
random_state=0)
labels = sp.fit(X).labels_
assert_equal((X.shape[0],), labels.shape)
sp = SpectralClustering(n_clusters=2, affinity=lambda x, y: 1,
random_state=0)
labels = sp.fit(X).labels_
assert_equal((X.shape[0],), labels.shape)
def histogram(x, y, **kwargs):
# Histogram kernel implemented as a callable.
assert_equal(kwargs, {}) # no kernel_params that we didn't ask for
return np.minimum(x, y).sum()
sp = SpectralClustering(n_clusters=2, affinity=histogram, random_state=0)
labels = sp.fit(X).labels_
assert_equal((X.shape[0],), labels.shape)
# raise error on unknown affinity
sp = SpectralClustering(n_clusters=2, affinity='<unknown>')
assert_raises(ValueError, sp.fit, X)
def test_discretize(seed=8):
# Test the discretize using a noise assignment matrix
random_state = np.random.RandomState(seed)
for n_samples in [50, 100, 150, 500]:
for n_class in range(2, 10):
# random class labels
y_true = random_state.random_integers(0, n_class, n_samples)
y_true = np.array(y_true, np.float)
# noise class assignment matrix
y_indicator = sparse.coo_matrix((np.ones(n_samples),
(np.arange(n_samples),
y_true)),
shape=(n_samples,
n_class + 1))
y_true_noisy = (y_indicator.toarray()
+ 0.1 * random_state.randn(n_samples,
n_class + 1))
y_pred = discretize(y_true_noisy, random_state)
assert_greater(adjusted_rand_score(y_true, y_pred), 0.8)
| bsd-3-clause |
manifoldai/merf | merf/merf.py | 1 | 15249 | """
Mixed Effects Random Forest model.
"""
import logging
import numpy as np
import pandas as pd
from sklearn.ensemble import RandomForestRegressor
from sklearn.exceptions import NotFittedError
logger = logging.getLogger(__name__)
class MERF(object):
"""
This is the core class to instantiate, train, and predict using a mixed effects random forest model.
It roughly adheres to the sklearn estimator API.
Note that the user must pass in an already instantiated fixed_effects_model that adheres to the
sklearn regression estimator API, i.e. must have a fit() and predict() method defined.
It assumes a data model of the form:
.. math::
y = f(X) + b_i Z + e
* y is the target variable. The current code only supports regression for now, e.g. continuously varying scalar value
* X is the fixed effect features. Assume p dimensional
* f(.) is the nonlinear fixed effects mode, e.g. random forest
* Z is the random effect features. Assume q dimensional.
* e is iid noise ~N(0, sigma_e²)
* i is the cluster index. Assume k clusters in the training.
* bi is the random effect coefficients. They are different per cluster i but are assumed to be drawn from the same distribution ~N(0, Sigma_b) where Sigma_b is learned from the data.
Args:
fixed_effects_model (sklearn.base.RegressorMixin): instantiated model class
gll_early_stop_threshold (float): early stopping threshold on GLL improvement
max_iterations (int): maximum number of EM iterations to train
"""
def __init__(
self,
fixed_effects_model=RandomForestRegressor(n_estimators=300, n_jobs=-1),
gll_early_stop_threshold=None,
max_iterations=20,
):
self.gll_early_stop_threshold = gll_early_stop_threshold
self.max_iterations = max_iterations
self.cluster_counts = None
# Note fixed_effects_model must already be instantiated when passed in.
self.fe_model = fixed_effects_model
self.trained_fe_model = None
self.trained_b = None
self.b_hat_history = []
self.sigma2_hat_history = []
self.D_hat_history = []
self.gll_history = []
self.val_loss_history = []
def predict(self, X: np.ndarray, Z: np.ndarray, clusters: pd.Series):
"""
Predict using trained MERF. For known clusters the trained random effect correction is applied.
For unknown clusters the pure fixed effect (RF) estimate is used.
Args:
X (np.ndarray): fixed effect covariates
Z (np.ndarray): random effect covariates
clusters (pd.Series): cluster assignments for samples
Returns:
np.ndarray: the predictions y_hat
"""
if type(clusters) != pd.Series:
raise TypeError("clusters must be a pandas Series.")
if self.trained_fe_model is None:
raise NotFittedError(
"This MERF instance is not fitted yet. Call 'fit' with appropriate arguments before "
"using this method"
)
Z = np.array(Z) # cast Z to numpy array (required if it's a dataframe, otw, the matrix mults later fail)
# Apply fixed effects model to all
y_hat = self.trained_fe_model.predict(X)
# Apply random effects correction to all known clusters. Note that then, by default, the new clusters get no
# random effects correction -- which is the desired behavior.
for cluster_id in self.cluster_counts.index:
indices_i = clusters == cluster_id
# If cluster doesn't exist in test data that's ok. Just move on.
if len(indices_i) == 0:
continue
# If cluster does exist, apply the correction.
b_i = self.trained_b.loc[cluster_id]
Z_i = Z[indices_i]
y_hat[indices_i] += Z_i.dot(b_i)
return y_hat
def fit(
self,
X: np.ndarray,
Z: np.ndarray,
clusters: pd.Series,
y: np.ndarray,
X_val: np.ndarray = None,
Z_val: np.ndarray = None,
clusters_val: pd.Series = None,
y_val: np.ndarray = None,
):
"""
Fit MERF using Expectation-Maximization algorithm.
Args:
X (np.ndarray): fixed effect covariates
Z (np.ndarray): random effect covariates
clusters (pd.Series): cluster assignments for samples
y (np.ndarray): response/target variable
Returns:
MERF: fitted model
"""
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Input Checks ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
if type(clusters) != pd.Series:
raise TypeError("clusters must be a pandas Series.")
assert len(Z) == len(X)
assert len(y) == len(X)
assert len(clusters) == len(X)
if X_val is None:
assert Z_val is None
assert clusters_val is None
assert y_val is None
else:
assert len(Z_val) == len(X_val)
assert len(clusters_val) == len(X_val)
assert len(y_val) == len(X_val)
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Initialization ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
n_clusters = clusters.nunique()
n_obs = len(y)
q = Z.shape[1] # random effects dimension
Z = np.array(Z) # cast Z to numpy array (required if it's a dataframe, otw, the matrix mults later fail)
# Create a series where cluster_id is the index and n_i is the value
self.cluster_counts = clusters.value_counts()
# Do expensive slicing operations only once
Z_by_cluster = {}
y_by_cluster = {}
n_by_cluster = {}
I_by_cluster = {}
indices_by_cluster = {}
# TODO: Can these be replaced with groupbys? Groupbys are less understandable than brute force.
for cluster_id in self.cluster_counts.index:
# Find the index for all the samples from this cluster in the large vector
indices_i = clusters == cluster_id
indices_by_cluster[cluster_id] = indices_i
# Slice those samples from Z and y
Z_by_cluster[cluster_id] = Z[indices_i]
y_by_cluster[cluster_id] = y[indices_i]
# Get the counts for each cluster and create the appropriately sized identity matrix for later computations
n_by_cluster[cluster_id] = self.cluster_counts[cluster_id]
I_by_cluster[cluster_id] = np.eye(self.cluster_counts[cluster_id])
# Intialize for EM algorithm
iteration = 0
# Note we are using a dataframe to hold the b_hat because this is easier to index into by cluster_id
# Before we were using a simple numpy array -- but we were indexing into that wrong because the cluster_ids
# are not necessarily in order.
b_hat_df = pd.DataFrame(np.zeros((n_clusters, q)), index=self.cluster_counts.index)
sigma2_hat = 1
D_hat = np.eye(q)
# vectors to hold history
self.b_hat_history.append(b_hat_df)
self.sigma2_hat_history.append(sigma2_hat)
self.D_hat_history.append(D_hat)
early_stop_flag = False
while iteration < self.max_iterations and not early_stop_flag:
iteration += 1
logger.debug("~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~")
logger.debug("Iteration: {}".format(iteration))
logger.debug("~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~")
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ E-step ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# fill up y_star for all clusters
y_star = np.zeros(len(y))
for cluster_id in self.cluster_counts.index:
# Get cached cluster slices
y_i = y_by_cluster[cluster_id]
Z_i = Z_by_cluster[cluster_id]
b_hat_i = b_hat_df.loc[cluster_id] # used to be ix
logger.debug("E-step, cluster {}, b_hat = {}".format(cluster_id, b_hat_i))
indices_i = indices_by_cluster[cluster_id]
# Compute y_star for this cluster and put back in right place
y_star_i = y_i - Z_i.dot(b_hat_i)
y_star[indices_i] = y_star_i
# check that still one dimensional
# TODO: Other checks we want to do?
assert len(y_star.shape) == 1
# Do the fixed effects regression with all the fixed effects features
self.fe_model.fit(X, y_star)
f_hat = self.fe_model.predict(X)
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ M-step ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
sigma2_hat_sum = 0
D_hat_sum = 0
for cluster_id in self.cluster_counts.index:
# Get cached cluster slices
indices_i = indices_by_cluster[cluster_id]
y_i = y_by_cluster[cluster_id]
Z_i = Z_by_cluster[cluster_id]
n_i = n_by_cluster[cluster_id]
I_i = I_by_cluster[cluster_id]
# index into f_hat
f_hat_i = f_hat[indices_i]
# Compute V_hat_i
V_hat_i = Z_i.dot(D_hat).dot(Z_i.T) + sigma2_hat * I_i
# Compute b_hat_i
V_hat_inv_i = np.linalg.pinv(V_hat_i)
logger.debug("M-step, pre-update, cluster {}, b_hat = {}".format(cluster_id, b_hat_df.loc[cluster_id]))
b_hat_i = D_hat.dot(Z_i.T).dot(V_hat_inv_i).dot(y_i - f_hat_i)
logger.debug("M-step, post-update, cluster {}, b_hat = {}".format(cluster_id, b_hat_i))
# Compute the total error for this cluster
eps_hat_i = y_i - f_hat_i - Z_i.dot(b_hat_i)
logger.debug("------------------------------------------")
logger.debug("M-step, cluster {}".format(cluster_id))
logger.debug("error squared for cluster = {}".format(eps_hat_i.T.dot(eps_hat_i)))
# Store b_hat for cluster both in numpy array and in dataframe
# Note this HAS to be assigned with loc, otw whole df get erroneously assigned and things go to hell
b_hat_df.loc[cluster_id, :] = b_hat_i
logger.debug(
"M-step, post-update, recalled from db, cluster {}, "
"b_hat = {}".format(cluster_id, b_hat_df.loc[cluster_id])
)
# Update the sums for sigma2_hat and D_hat. We will update after the entire loop over clusters
sigma2_hat_sum += eps_hat_i.T.dot(eps_hat_i) + sigma2_hat * (n_i - sigma2_hat * np.trace(V_hat_inv_i))
D_hat_sum += np.outer(b_hat_i, b_hat_i) + (
D_hat - D_hat.dot(Z_i.T).dot(V_hat_inv_i).dot(Z_i).dot(D_hat)
) # noqa: E127
# Normalize the sums to get sigma2_hat and D_hat
sigma2_hat = (1.0 / n_obs) * sigma2_hat_sum
D_hat = (1.0 / n_clusters) * D_hat_sum
logger.debug("b_hat = {}".format(b_hat_df))
logger.debug("sigma2_hat = {}".format(sigma2_hat))
logger.debug("D_hat = {}".format(D_hat))
# Store off history so that we can see the evolution of the EM algorithm
self.b_hat_history.append(b_hat_df.copy())
self.sigma2_hat_history.append(sigma2_hat)
self.D_hat_history.append(D_hat)
# Generalized Log Likelihood computation to check convergence
gll = 0
for cluster_id in self.cluster_counts.index:
# Get cached cluster slices
indices_i = indices_by_cluster[cluster_id]
y_i = y_by_cluster[cluster_id]
Z_i = Z_by_cluster[cluster_id]
I_i = I_by_cluster[cluster_id]
# Slice f_hat and get b_hat
f_hat_i = f_hat[indices_i]
R_hat_i = sigma2_hat * I_i
b_hat_i = b_hat_df.loc[cluster_id]
# Numerically stable way of computing log(det(A))
_, logdet_D_hat = np.linalg.slogdet(D_hat)
_, logdet_R_hat_i = np.linalg.slogdet(R_hat_i)
gll += (
(y_i - f_hat_i - Z_i.dot(b_hat_i))
.T.dot(np.linalg.pinv(R_hat_i))
.dot(y_i - f_hat_i - Z_i.dot(b_hat_i))
+ b_hat_i.T.dot(np.linalg.pinv(D_hat)).dot(b_hat_i)
+ logdet_D_hat
+ logdet_R_hat_i
) # noqa: E127
logger.info("Training GLL is {} at iteration {}.".format(gll, iteration))
self.gll_history.append(gll)
# Save off the most updated fixed effects model and random effects coefficents
self.trained_fe_model = self.fe_model
self.trained_b = b_hat_df
# Early Stopping. This code is entered only if the early stop threshold is specified and
# if the gll_history array is longer than 1 element, e.g. we are past the first iteration.
if self.gll_early_stop_threshold is not None and len(self.gll_history) > 1:
curr_threshold = np.abs((gll - self.gll_history[-2]) / self.gll_history[-2])
logger.debug("stop threshold = {}".format(curr_threshold))
if curr_threshold < self.gll_early_stop_threshold:
logger.info("Gll {} less than threshold {}, stopping early ...".format(gll, curr_threshold))
early_stop_flag = True
# Compute Validation Loss
if X_val is not None:
yhat_val = self.predict(X_val, Z_val, clusters_val)
val_loss = np.square(np.subtract(y_val, yhat_val)).mean()
logger.info(f"Validation MSE Loss is {val_loss} at iteration {iteration}.")
self.val_loss_history.append(val_loss)
return self
def score(self, X, Z, clusters, y):
raise NotImplementedError()
def get_bhat_history_df(self):
"""
This function does a complicated reshape and re-indexing operation to get the
list of dataframes for the b_hat_history into a multi-indexed dataframe. This
dataframe is easier to work with in plotting utilities and other downstream
analyses than the list of dataframes b_hat_history.
Args:
b_hat_history (list): list of dataframes of bhat at every iteration
Returns:
pd.DataFrame: multi-index dataframe with outer index as iteration, inner index as cluster
"""
# Step 1 - vertical stack all the arrays at each iteration into a single numpy array
b_array = np.vstack(self.b_hat_history)
# Step 2 - Create the multi-index. Note the outer index is iteration. The inner index is cluster.
iterations = range(len(self.b_hat_history))
clusters = self.b_hat_history[0].index
mi = pd.MultiIndex.from_product([iterations, clusters], names=("iteration", "cluster"))
# Step 3 - Create the multi-indexed dataframe
b_hat_history_df = pd.DataFrame(b_array, index=mi)
return b_hat_history_df
| mit |
heli522/scikit-learn | sklearn/linear_model/tests/test_least_angle.py | 98 | 20870 | from nose.tools import assert_equal
import numpy as np
from scipy import linalg
from sklearn.cross_validation import train_test_split
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import ignore_warnings
from sklearn.utils.testing import assert_no_warnings, assert_warns
from sklearn.utils.testing import TempMemmap
from sklearn.utils import ConvergenceWarning
from sklearn import linear_model, datasets
from sklearn.linear_model.least_angle import _lars_path_residues
diabetes = datasets.load_diabetes()
X, y = diabetes.data, diabetes.target
# TODO: use another dataset that has multiple drops
def test_simple():
# Principle of Lars is to keep covariances tied and decreasing
# also test verbose output
from sklearn.externals.six.moves import cStringIO as StringIO
import sys
old_stdout = sys.stdout
try:
sys.stdout = StringIO()
alphas_, active, coef_path_ = linear_model.lars_path(
diabetes.data, diabetes.target, method="lar", verbose=10)
sys.stdout = old_stdout
for (i, coef_) in enumerate(coef_path_.T):
res = y - np.dot(X, coef_)
cov = np.dot(X.T, res)
C = np.max(abs(cov))
eps = 1e-3
ocur = len(cov[C - eps < abs(cov)])
if i < X.shape[1]:
assert_true(ocur == i + 1)
else:
# no more than max_pred variables can go into the active set
assert_true(ocur == X.shape[1])
finally:
sys.stdout = old_stdout
def test_simple_precomputed():
# The same, with precomputed Gram matrix
G = np.dot(diabetes.data.T, diabetes.data)
alphas_, active, coef_path_ = linear_model.lars_path(
diabetes.data, diabetes.target, Gram=G, method="lar")
for i, coef_ in enumerate(coef_path_.T):
res = y - np.dot(X, coef_)
cov = np.dot(X.T, res)
C = np.max(abs(cov))
eps = 1e-3
ocur = len(cov[C - eps < abs(cov)])
if i < X.shape[1]:
assert_true(ocur == i + 1)
else:
# no more than max_pred variables can go into the active set
assert_true(ocur == X.shape[1])
def test_all_precomputed():
# Test that lars_path with precomputed Gram and Xy gives the right answer
X, y = diabetes.data, diabetes.target
G = np.dot(X.T, X)
Xy = np.dot(X.T, y)
for method in 'lar', 'lasso':
output = linear_model.lars_path(X, y, method=method)
output_pre = linear_model.lars_path(X, y, Gram=G, Xy=Xy, method=method)
for expected, got in zip(output, output_pre):
assert_array_almost_equal(expected, got)
def test_lars_lstsq():
# Test that Lars gives least square solution at the end
# of the path
X1 = 3 * diabetes.data # use un-normalized dataset
clf = linear_model.LassoLars(alpha=0.)
clf.fit(X1, y)
coef_lstsq = np.linalg.lstsq(X1, y)[0]
assert_array_almost_equal(clf.coef_, coef_lstsq)
def test_lasso_gives_lstsq_solution():
# Test that Lars Lasso gives least square solution at the end
# of the path
alphas_, active, coef_path_ = linear_model.lars_path(X, y, method="lasso")
coef_lstsq = np.linalg.lstsq(X, y)[0]
assert_array_almost_equal(coef_lstsq, coef_path_[:, -1])
def test_collinearity():
# Check that lars_path is robust to collinearity in input
X = np.array([[3., 3., 1.],
[2., 2., 0.],
[1., 1., 0]])
y = np.array([1., 0., 0])
f = ignore_warnings
_, _, coef_path_ = f(linear_model.lars_path)(X, y, alpha_min=0.01)
assert_true(not np.isnan(coef_path_).any())
residual = np.dot(X, coef_path_[:, -1]) - y
assert_less((residual ** 2).sum(), 1.) # just make sure it's bounded
n_samples = 10
X = np.random.rand(n_samples, 5)
y = np.zeros(n_samples)
_, _, coef_path_ = linear_model.lars_path(X, y, Gram='auto', copy_X=False,
copy_Gram=False, alpha_min=0.,
method='lasso', verbose=0,
max_iter=500)
assert_array_almost_equal(coef_path_, np.zeros_like(coef_path_))
def test_no_path():
# Test that the ``return_path=False`` option returns the correct output
alphas_, active_, coef_path_ = linear_model.lars_path(
diabetes.data, diabetes.target, method="lar")
alpha_, active, coef = linear_model.lars_path(
diabetes.data, diabetes.target, method="lar", return_path=False)
assert_array_almost_equal(coef, coef_path_[:, -1])
assert_true(alpha_ == alphas_[-1])
def test_no_path_precomputed():
# Test that the ``return_path=False`` option with Gram remains correct
G = np.dot(diabetes.data.T, diabetes.data)
alphas_, active_, coef_path_ = linear_model.lars_path(
diabetes.data, diabetes.target, method="lar", Gram=G)
alpha_, active, coef = linear_model.lars_path(
diabetes.data, diabetes.target, method="lar", Gram=G,
return_path=False)
assert_array_almost_equal(coef, coef_path_[:, -1])
assert_true(alpha_ == alphas_[-1])
def test_no_path_all_precomputed():
# Test that the ``return_path=False`` option with Gram and Xy remains
# correct
X, y = 3 * diabetes.data, diabetes.target
G = np.dot(X.T, X)
Xy = np.dot(X.T, y)
alphas_, active_, coef_path_ = linear_model.lars_path(
X, y, method="lasso", Gram=G, Xy=Xy, alpha_min=0.9)
print("---")
alpha_, active, coef = linear_model.lars_path(
X, y, method="lasso", Gram=G, Xy=Xy, alpha_min=0.9, return_path=False)
assert_array_almost_equal(coef, coef_path_[:, -1])
assert_true(alpha_ == alphas_[-1])
def test_singular_matrix():
# Test when input is a singular matrix
X1 = np.array([[1, 1.], [1., 1.]])
y1 = np.array([1, 1])
alphas, active, coef_path = linear_model.lars_path(X1, y1)
assert_array_almost_equal(coef_path.T, [[0, 0], [1, 0]])
def test_rank_deficient_design():
# consistency test that checks that LARS Lasso is handling rank
# deficient input data (with n_features < rank) in the same way
# as coordinate descent Lasso
y = [5, 0, 5]
for X in ([[5, 0],
[0, 5],
[10, 10]],
[[10, 10, 0],
[1e-32, 0, 0],
[0, 0, 1]],
):
# To be able to use the coefs to compute the objective function,
# we need to turn off normalization
lars = linear_model.LassoLars(.1, normalize=False)
coef_lars_ = lars.fit(X, y).coef_
obj_lars = (1. / (2. * 3.)
* linalg.norm(y - np.dot(X, coef_lars_)) ** 2
+ .1 * linalg.norm(coef_lars_, 1))
coord_descent = linear_model.Lasso(.1, tol=1e-6, normalize=False)
coef_cd_ = coord_descent.fit(X, y).coef_
obj_cd = ((1. / (2. * 3.)) * linalg.norm(y - np.dot(X, coef_cd_)) ** 2
+ .1 * linalg.norm(coef_cd_, 1))
assert_less(obj_lars, obj_cd * (1. + 1e-8))
def test_lasso_lars_vs_lasso_cd(verbose=False):
# Test that LassoLars and Lasso using coordinate descent give the
# same results.
X = 3 * diabetes.data
alphas, _, lasso_path = linear_model.lars_path(X, y, method='lasso')
lasso_cd = linear_model.Lasso(fit_intercept=False, tol=1e-8)
for c, a in zip(lasso_path.T, alphas):
if a == 0:
continue
lasso_cd.alpha = a
lasso_cd.fit(X, y)
error = linalg.norm(c - lasso_cd.coef_)
assert_less(error, 0.01)
# similar test, with the classifiers
for alpha in np.linspace(1e-2, 1 - 1e-2, 20):
clf1 = linear_model.LassoLars(alpha=alpha, normalize=False).fit(X, y)
clf2 = linear_model.Lasso(alpha=alpha, tol=1e-8,
normalize=False).fit(X, y)
err = linalg.norm(clf1.coef_ - clf2.coef_)
assert_less(err, 1e-3)
# same test, with normalized data
X = diabetes.data
alphas, _, lasso_path = linear_model.lars_path(X, y, method='lasso')
lasso_cd = linear_model.Lasso(fit_intercept=False, normalize=True,
tol=1e-8)
for c, a in zip(lasso_path.T, alphas):
if a == 0:
continue
lasso_cd.alpha = a
lasso_cd.fit(X, y)
error = linalg.norm(c - lasso_cd.coef_)
assert_less(error, 0.01)
def test_lasso_lars_vs_lasso_cd_early_stopping(verbose=False):
# Test that LassoLars and Lasso using coordinate descent give the
# same results when early stopping is used.
# (test : before, in the middle, and in the last part of the path)
alphas_min = [10, 0.9, 1e-4]
for alphas_min in alphas_min:
alphas, _, lasso_path = linear_model.lars_path(X, y, method='lasso',
alpha_min=0.9)
lasso_cd = linear_model.Lasso(fit_intercept=False, tol=1e-8)
lasso_cd.alpha = alphas[-1]
lasso_cd.fit(X, y)
error = linalg.norm(lasso_path[:, -1] - lasso_cd.coef_)
assert_less(error, 0.01)
alphas_min = [10, 0.9, 1e-4]
# same test, with normalization
for alphas_min in alphas_min:
alphas, _, lasso_path = linear_model.lars_path(X, y, method='lasso',
alpha_min=0.9)
lasso_cd = linear_model.Lasso(fit_intercept=True, normalize=True,
tol=1e-8)
lasso_cd.alpha = alphas[-1]
lasso_cd.fit(X, y)
error = linalg.norm(lasso_path[:, -1] - lasso_cd.coef_)
assert_less(error, 0.01)
def test_lasso_lars_path_length():
# Test that the path length of the LassoLars is right
lasso = linear_model.LassoLars()
lasso.fit(X, y)
lasso2 = linear_model.LassoLars(alpha=lasso.alphas_[2])
lasso2.fit(X, y)
assert_array_almost_equal(lasso.alphas_[:3], lasso2.alphas_)
# Also check that the sequence of alphas is always decreasing
assert_true(np.all(np.diff(lasso.alphas_) < 0))
def test_lasso_lars_vs_lasso_cd_ill_conditioned():
# Test lasso lars on a very ill-conditioned design, and check that
# it does not blow up, and stays somewhat close to a solution given
# by the coordinate descent solver
# Also test that lasso_path (using lars_path output style) gives
# the same result as lars_path and previous lasso output style
# under these conditions.
rng = np.random.RandomState(42)
# Generate data
n, m = 70, 100
k = 5
X = rng.randn(n, m)
w = np.zeros((m, 1))
i = np.arange(0, m)
rng.shuffle(i)
supp = i[:k]
w[supp] = np.sign(rng.randn(k, 1)) * (rng.rand(k, 1) + 1)
y = np.dot(X, w)
sigma = 0.2
y += sigma * rng.rand(*y.shape)
y = y.squeeze()
lars_alphas, _, lars_coef = linear_model.lars_path(X, y, method='lasso')
_, lasso_coef2, _ = linear_model.lasso_path(X, y,
alphas=lars_alphas,
tol=1e-6,
fit_intercept=False)
assert_array_almost_equal(lars_coef, lasso_coef2, decimal=1)
def test_lasso_lars_vs_lasso_cd_ill_conditioned2():
# Create an ill-conditioned situation in which the LARS has to go
# far in the path to converge, and check that LARS and coordinate
# descent give the same answers
# Note it used to be the case that Lars had to use the drop for good
# strategy for this but this is no longer the case with the
# equality_tolerance checks
X = [[1e20, 1e20, 0],
[-1e-32, 0, 0],
[1, 1, 1]]
y = [10, 10, 1]
alpha = .0001
def objective_function(coef):
return (1. / (2. * len(X)) * linalg.norm(y - np.dot(X, coef)) ** 2
+ alpha * linalg.norm(coef, 1))
lars = linear_model.LassoLars(alpha=alpha, normalize=False)
assert_warns(ConvergenceWarning, lars.fit, X, y)
lars_coef_ = lars.coef_
lars_obj = objective_function(lars_coef_)
coord_descent = linear_model.Lasso(alpha=alpha, tol=1e-10, normalize=False)
cd_coef_ = coord_descent.fit(X, y).coef_
cd_obj = objective_function(cd_coef_)
assert_less(lars_obj, cd_obj * (1. + 1e-8))
def test_lars_add_features():
# assure that at least some features get added if necessary
# test for 6d2b4c
# Hilbert matrix
n = 5
H = 1. / (np.arange(1, n + 1) + np.arange(n)[:, np.newaxis])
clf = linear_model.Lars(fit_intercept=False).fit(
H, np.arange(n))
assert_true(np.all(np.isfinite(clf.coef_)))
def test_lars_n_nonzero_coefs(verbose=False):
lars = linear_model.Lars(n_nonzero_coefs=6, verbose=verbose)
lars.fit(X, y)
assert_equal(len(lars.coef_.nonzero()[0]), 6)
# The path should be of length 6 + 1 in a Lars going down to 6
# non-zero coefs
assert_equal(len(lars.alphas_), 7)
def test_multitarget():
# Assure that estimators receiving multidimensional y do the right thing
X = diabetes.data
Y = np.vstack([diabetes.target, diabetes.target ** 2]).T
n_targets = Y.shape[1]
for estimator in (linear_model.LassoLars(), linear_model.Lars()):
estimator.fit(X, Y)
Y_pred = estimator.predict(X)
Y_dec = estimator.decision_function(X)
assert_array_almost_equal(Y_pred, Y_dec)
alphas, active, coef, path = (estimator.alphas_, estimator.active_,
estimator.coef_, estimator.coef_path_)
for k in range(n_targets):
estimator.fit(X, Y[:, k])
y_pred = estimator.predict(X)
assert_array_almost_equal(alphas[k], estimator.alphas_)
assert_array_almost_equal(active[k], estimator.active_)
assert_array_almost_equal(coef[k], estimator.coef_)
assert_array_almost_equal(path[k], estimator.coef_path_)
assert_array_almost_equal(Y_pred[:, k], y_pred)
def test_lars_cv():
# Test the LassoLarsCV object by checking that the optimal alpha
# increases as the number of samples increases.
# This property is not actually garantied in general and is just a
# property of the given dataset, with the given steps chosen.
old_alpha = 0
lars_cv = linear_model.LassoLarsCV()
for length in (400, 200, 100):
X = diabetes.data[:length]
y = diabetes.target[:length]
lars_cv.fit(X, y)
np.testing.assert_array_less(old_alpha, lars_cv.alpha_)
old_alpha = lars_cv.alpha_
def test_lasso_lars_ic():
# Test the LassoLarsIC object by checking that
# - some good features are selected.
# - alpha_bic > alpha_aic
# - n_nonzero_bic < n_nonzero_aic
lars_bic = linear_model.LassoLarsIC('bic')
lars_aic = linear_model.LassoLarsIC('aic')
rng = np.random.RandomState(42)
X = diabetes.data
y = diabetes.target
X = np.c_[X, rng.randn(X.shape[0], 4)] # add 4 bad features
lars_bic.fit(X, y)
lars_aic.fit(X, y)
nonzero_bic = np.where(lars_bic.coef_)[0]
nonzero_aic = np.where(lars_aic.coef_)[0]
assert_greater(lars_bic.alpha_, lars_aic.alpha_)
assert_less(len(nonzero_bic), len(nonzero_aic))
assert_less(np.max(nonzero_bic), diabetes.data.shape[1])
# test error on unknown IC
lars_broken = linear_model.LassoLarsIC('<unknown>')
assert_raises(ValueError, lars_broken.fit, X, y)
def test_no_warning_for_zero_mse():
# LassoLarsIC should not warn for log of zero MSE.
y = np.arange(10, dtype=float)
X = y.reshape(-1, 1)
lars = linear_model.LassoLarsIC(normalize=False)
assert_no_warnings(lars.fit, X, y)
assert_true(np.any(np.isinf(lars.criterion_)))
def test_lars_path_readonly_data():
# When using automated memory mapping on large input, the
# fold data is in read-only mode
# This is a non-regression test for:
# https://github.com/scikit-learn/scikit-learn/issues/4597
splitted_data = train_test_split(X, y, random_state=42)
with TempMemmap(splitted_data) as (X_train, X_test, y_train, y_test):
# The following should not fail despite copy=False
_lars_path_residues(X_train, y_train, X_test, y_test, copy=False)
def test_lars_path_positive_constraint():
# this is the main test for the positive parameter on the lars_path method
# the estimator classes just make use of this function
# we do the test on the diabetes dataset
# ensure that we get negative coefficients when positive=False
# and all positive when positive=True
# for method 'lar' (default) and lasso
for method in ['lar', 'lasso']:
alpha, active, coefs = \
linear_model.lars_path(diabetes['data'], diabetes['target'],
return_path=True, method=method,
positive=False)
assert_true(coefs.min() < 0)
alpha, active, coefs = \
linear_model.lars_path(diabetes['data'], diabetes['target'],
return_path=True, method=method,
positive=True)
assert_true(coefs.min() >= 0)
# now we gonna test the positive option for all estimator classes
default_parameter = {'fit_intercept': False}
estimator_parameter_map = {'Lars': {'n_nonzero_coefs': 5},
'LassoLars': {'alpha': 0.1},
'LarsCV': {},
'LassoLarsCV': {},
'LassoLarsIC': {}}
def test_estimatorclasses_positive_constraint():
# testing the transmissibility for the positive option of all estimator
# classes in this same function here
for estname in estimator_parameter_map:
params = default_parameter.copy()
params.update(estimator_parameter_map[estname])
estimator = getattr(linear_model, estname)(positive=False, **params)
estimator.fit(diabetes['data'], diabetes['target'])
assert_true(estimator.coef_.min() < 0)
estimator = getattr(linear_model, estname)(positive=True, **params)
estimator.fit(diabetes['data'], diabetes['target'])
assert_true(min(estimator.coef_) >= 0)
def test_lasso_lars_vs_lasso_cd_positive(verbose=False):
# Test that LassoLars and Lasso using coordinate descent give the
# same results when using the positive option
# This test is basically a copy of the above with additional positive
# option. However for the middle part, the comparison of coefficient values
# for a range of alphas, we had to make an adaptations. See below.
# not normalized data
X = 3 * diabetes.data
alphas, _, lasso_path = linear_model.lars_path(X, y, method='lasso',
positive=True)
lasso_cd = linear_model.Lasso(fit_intercept=False, tol=1e-8, positive=True)
for c, a in zip(lasso_path.T, alphas):
if a == 0:
continue
lasso_cd.alpha = a
lasso_cd.fit(X, y)
error = linalg.norm(c - lasso_cd.coef_)
assert_less(error, 0.01)
# The range of alphas chosen for coefficient comparison here is restricted
# as compared with the above test without the positive option. This is due
# to the circumstance that the Lars-Lasso algorithm does not converge to
# the least-squares-solution for small alphas, see 'Least Angle Regression'
# by Efron et al 2004. The coefficients are typically in congruence up to
# the smallest alpha reached by the Lars-Lasso algorithm and start to
# diverge thereafter. See
# https://gist.github.com/michigraber/7e7d7c75eca694c7a6ff
for alpha in np.linspace(6e-1, 1 - 1e-2, 20):
clf1 = linear_model.LassoLars(fit_intercept=False, alpha=alpha,
normalize=False, positive=True).fit(X, y)
clf2 = linear_model.Lasso(fit_intercept=False, alpha=alpha, tol=1e-8,
normalize=False, positive=True).fit(X, y)
err = linalg.norm(clf1.coef_ - clf2.coef_)
assert_less(err, 1e-3)
# normalized data
X = diabetes.data
alphas, _, lasso_path = linear_model.lars_path(X, y, method='lasso',
positive=True)
lasso_cd = linear_model.Lasso(fit_intercept=False, normalize=True,
tol=1e-8, positive=True)
for c, a in zip(lasso_path.T[:-1], alphas[:-1]): # don't include alpha=0
lasso_cd.alpha = a
lasso_cd.fit(X, y)
error = linalg.norm(c - lasso_cd.coef_)
assert_less(error, 0.01)
| bsd-3-clause |
etkirsch/scikit-learn | examples/gaussian_process/plot_gp_regression.py | 253 | 4054 | #!/usr/bin/python
# -*- coding: utf-8 -*-
r"""
=========================================================
Gaussian Processes regression: basic introductory example
=========================================================
A simple one-dimensional regression exercise computed in two different ways:
1. A noise-free case with a cubic correlation model
2. A noisy case with a squared Euclidean correlation model
In both cases, the model parameters are estimated using the maximum
likelihood principle.
The figures illustrate the interpolating property of the Gaussian Process
model as well as its probabilistic nature in the form of a pointwise 95%
confidence interval.
Note that the parameter ``nugget`` is applied as a Tikhonov regularization
of the assumed covariance between the training points. In the special case
of the squared euclidean correlation model, nugget is mathematically equivalent
to a normalized variance: That is
.. math::
\mathrm{nugget}_i = \left[\frac{\sigma_i}{y_i}\right]^2
"""
print(__doc__)
# Author: Vincent Dubourg <vincent.dubourg@gmail.com>
# Jake Vanderplas <vanderplas@astro.washington.edu>
# Licence: BSD 3 clause
import numpy as np
from sklearn.gaussian_process import GaussianProcess
from matplotlib import pyplot as pl
np.random.seed(1)
def f(x):
"""The function to predict."""
return x * np.sin(x)
#----------------------------------------------------------------------
# First the noiseless case
X = np.atleast_2d([1., 3., 5., 6., 7., 8.]).T
# Observations
y = f(X).ravel()
# Mesh the input space for evaluations of the real function, the prediction and
# its MSE
x = np.atleast_2d(np.linspace(0, 10, 1000)).T
# Instanciate a Gaussian Process model
gp = GaussianProcess(corr='cubic', theta0=1e-2, thetaL=1e-4, thetaU=1e-1,
random_start=100)
# Fit to data using Maximum Likelihood Estimation of the parameters
gp.fit(X, y)
# Make the prediction on the meshed x-axis (ask for MSE as well)
y_pred, MSE = gp.predict(x, eval_MSE=True)
sigma = np.sqrt(MSE)
# Plot the function, the prediction and the 95% confidence interval based on
# the MSE
fig = pl.figure()
pl.plot(x, f(x), 'r:', label=u'$f(x) = x\,\sin(x)$')
pl.plot(X, y, 'r.', markersize=10, label=u'Observations')
pl.plot(x, y_pred, 'b-', label=u'Prediction')
pl.fill(np.concatenate([x, x[::-1]]),
np.concatenate([y_pred - 1.9600 * sigma,
(y_pred + 1.9600 * sigma)[::-1]]),
alpha=.5, fc='b', ec='None', label='95% confidence interval')
pl.xlabel('$x$')
pl.ylabel('$f(x)$')
pl.ylim(-10, 20)
pl.legend(loc='upper left')
#----------------------------------------------------------------------
# now the noisy case
X = np.linspace(0.1, 9.9, 20)
X = np.atleast_2d(X).T
# Observations and noise
y = f(X).ravel()
dy = 0.5 + 1.0 * np.random.random(y.shape)
noise = np.random.normal(0, dy)
y += noise
# Mesh the input space for evaluations of the real function, the prediction and
# its MSE
x = np.atleast_2d(np.linspace(0, 10, 1000)).T
# Instanciate a Gaussian Process model
gp = GaussianProcess(corr='squared_exponential', theta0=1e-1,
thetaL=1e-3, thetaU=1,
nugget=(dy / y) ** 2,
random_start=100)
# Fit to data using Maximum Likelihood Estimation of the parameters
gp.fit(X, y)
# Make the prediction on the meshed x-axis (ask for MSE as well)
y_pred, MSE = gp.predict(x, eval_MSE=True)
sigma = np.sqrt(MSE)
# Plot the function, the prediction and the 95% confidence interval based on
# the MSE
fig = pl.figure()
pl.plot(x, f(x), 'r:', label=u'$f(x) = x\,\sin(x)$')
pl.errorbar(X.ravel(), y, dy, fmt='r.', markersize=10, label=u'Observations')
pl.plot(x, y_pred, 'b-', label=u'Prediction')
pl.fill(np.concatenate([x, x[::-1]]),
np.concatenate([y_pred - 1.9600 * sigma,
(y_pred + 1.9600 * sigma)[::-1]]),
alpha=.5, fc='b', ec='None', label='95% confidence interval')
pl.xlabel('$x$')
pl.ylabel('$f(x)$')
pl.ylim(-10, 20)
pl.legend(loc='upper left')
pl.show()
| bsd-3-clause |
wesm/statsmodels | scikits/statsmodels/sandbox/examples/thirdparty/ex_ratereturn.py | 1 | 4385 | # -*- coding: utf-8 -*-
"""Playing with correlation of DJ-30 stock returns
this uses pickled data that needs to be created with findow.py
to see graphs, uncomment plt.show()
Created on Sat Jan 30 16:30:18 2010
Author: josef-pktd
"""
import numpy as np
import matplotlib.finance as fin
import matplotlib.pyplot as plt
import datetime as dt
import pandas as pa
import pickle
import scikits.statsmodels.api as sm
import scikits.statsmodels.sandbox as sb
import scikits.statsmodels.sandbox.tools as sbtools
from scikits.statsmodels.graphics.correlation import plot_corr, plot_corr_grid
try:
rrdm = pickle.load(file('dj30rr','rb'))
except Exception: #blanket for any unpickling error
print "Error with unpickling, a new pickle file can be created with findow_1"
raise
ticksym = rrdm.columns.tolist()
rr = rrdm.values[1:400]
rrcorr = np.corrcoef(rr, rowvar=0)
plot_corr(rrcorr, xnames=ticksym)
nvars = rrcorr.shape[0]
plt.figure()
plt.hist(rrcorr[np.triu_indices(nvars,1)])
plt.title('Correlation Coefficients')
xreda, facta, evaa, evea = sbtools.pcasvd(rr)
evallcs = (evaa).cumsum()
print evallcs/evallcs[-1]
xred, fact, eva, eve = sbtools.pcasvd(rr, keepdim=4)
pcacorr = np.corrcoef(xred, rowvar=0)
plot_corr(pcacorr, xnames=ticksym, title='Correlation PCA')
resid = rr-xred
residcorr = np.corrcoef(resid, rowvar=0)
plot_corr(residcorr, xnames=ticksym, title='Correlation Residuals')
plt.matshow(residcorr)
plt.imshow(residcorr, cmap=plt.cm.jet, interpolation='nearest',
extent=(0,30,0,30), vmin=-1.0, vmax=1.0)
plt.colorbar()
normcolor = (0,1) #False #True
fig = plt.figure()
ax = fig.add_subplot(2,2,1)
plot_corr(rrcorr, xnames=ticksym, normcolor=normcolor, ax=ax)
ax2 = fig.add_subplot(2,2,3)
#pcacorr = np.corrcoef(xred, rowvar=0)
plot_corr(pcacorr, xnames=ticksym, title='Correlation PCA',
normcolor=normcolor, ax=ax2)
ax3 = fig.add_subplot(2,2,4)
plot_corr(residcorr, xnames=ticksym, title='Correlation Residuals',
normcolor=normcolor, ax=ax3)
import matplotlib as mpl
images = [c for ax in fig.axes for c in ax.get_children() if isinstance(c, mpl.image.AxesImage)]
print images
print ax.get_children()
#cax = fig.add_subplot(2,2,2)
#[0.85, 0.1, 0.075, 0.8]
fig. subplots_adjust(bottom=0.1, right=0.9, top=0.9)
cax = fig.add_axes([0.9, 0.1, 0.025, 0.8])
fig.colorbar(images[0], cax=cax)
fig.savefig('corrmatrixgrid.png', dpi=120)
has_sklearn = True
try:
import sklearn
except ImportError:
has_sklearn = False
print 'sklearn not available'
def cov2corr(cov):
std_ = np.sqrt(np.diag(cov))
corr = cov / np.outer(std_, std_)
return corr
if has_sklearn:
from sklearn.covariance import LedoitWolf, OAS, MCD
lw = LedoitWolf(store_precision=False)
lw.fit(rr, assume_centered=False)
cov_lw = lw.covariance_
corr_lw = cov2corr(cov_lw)
oas = OAS(store_precision=False)
oas.fit(rr, assume_centered=False)
cov_oas = oas.covariance_
corr_oas = cov2corr(cov_oas)
mcd = MCD()#.fit(rr, reweight=None)
mcd.fit(rr, assume_centered=False)
cov_mcd = mcd.covariance_
corr_mcd = cov2corr(cov_mcd)
titles = ['raw correlation', 'lw', 'oas', 'mcd']
normcolor = None
fig = plt.figure()
for i, c in enumerate([rrcorr, corr_lw, corr_oas, corr_mcd]):
#for i, c in enumerate([np.cov(rr, rowvar=0), cov_lw, cov_oas, cov_mcd]):
ax = fig.add_subplot(2,2,i+1)
plot_corr(c, xnames=None, title=titles[i],
normcolor=normcolor, ax=ax)
images = [c for ax in fig.axes for c in ax.get_children() if isinstance(c, mpl.image.AxesImage)]
fig. subplots_adjust(bottom=0.1, right=0.9, top=0.9)
cax = fig.add_axes([0.9, 0.1, 0.025, 0.8])
fig.colorbar(images[0], cax=cax)
corrli = [rrcorr, corr_lw, corr_oas, corr_mcd, pcacorr]
diffssq = np.array([[((ci-cj)**2).sum() for ci in corrli]
for cj in corrli])
diffsabs = np.array([[np.max(np.abs(ci-cj)) for ci in corrli]
for cj in corrli])
print diffssq
print '\nmaxabs'
print diffsabs
fig.savefig('corrmatrix_sklearn.png', dpi=120)
fig2 = plot_corr_grid(corrli+[residcorr], ncols=3,
titles=titles+['pca', 'pca-residual'],
xnames=[], ynames=[])
fig2.savefig('corrmatrix_sklearn_2.png', dpi=120)
#plt.show()
#plt.close('all')
| bsd-3-clause |
johnmgregoire/PythonCompositionPlots | quaternary_FOM_stackedtern5.py | 1 | 2984 | import matplotlib.cm as cm
import numpy
import pylab
import operator, copy, os
#pylab.rc('font',**{'family':'serif''serif':['Times New Roman']})
#pylab.rcParams['font.family']='serif'
#pylab.rcParams['font.serif']='Times New Roman'
pylab.rc('font', family='serif', serif='Times New Roman')
#os.chdir('C:/Users/Gregoire/Documents/PythonCode/ternaryplot')
from myternaryutility import TernaryPlot
from myquaternaryutility import QuaternaryPlot
def make5ternaxes(ellabels=['A', 'B', 'C', 'D'], fig=None):
if fig is None:
fig=pylab.figure(figsize=(8, 6))
ax_xc=[]
ax_yc=[]
xcdel=[.22, .23, .13, .09, .06]
ax_yc=[.5, .58, .36, .58, .42]
for i in range(5):
if i==0:
ax_xc+=[xcdel[i]]
else:
ax_xc+=[ax_xc[-1]+xcdel[i]]
#ax_yc+=[.5+((i%2)*2.-1.)*((i>0)*.1+.072*i/10)]
shape1=numpy.array([.35, 1.])
axl=[]
for i, xc, yc in zip(range(1, 11), ax_xc, ax_yc):
w, l=shape1/i
axl+=[fig.add_axes([xc-w/2, yc-l/2, w, l])]
stpl=[]
xpos=[.27]*10
xpos[0:3]=[.35, .29, .28]
xpos[-1]=.26
for count, (ax, xp) in enumerate(zip(axl, xpos)):
stp=TernaryPlot(ax, ellabels=ellabels[:3], offset=.03)
stp.label(fontsize=17)#,fontdict={'fontname':'Times New Roman'})
stpl+=[stp]
if count<4:
stp.ax.text(xp, .8, '%s$_{%.2f-%.2f}$' %(ellabels[3], (count*.2), ((count+1)*.2)-.01), ha='right', va='center', fontsize=17)
else:
stp.ax.text(xp, .8, '%s$_{%.2f-%d}$' %(ellabels[3], (count*.2), 1), ha='right', va='center', fontsize=17)
return axl, stpl
def scatter_5axes(comps, fom, stpl, s=18, cb=False, cbrect=(.85, .3, .04, .4), cblabel='', **kwargs):# for colorbar must pass kwargs norm and cmap and optionally cblabel
abc=comps[:, :3]
abc[abc.sum(axis=1)==0.]=numpy.array([1., 1., 1.])/3.
abc=numpy.array([c/c.sum() for c in abc])
d=comps[:, 3]
d30=numpy.round(d*30.)
dlims=numpy.array([0., 1., 2., 3., 4., 6.])
marks=[('o', 1., 1.), ('o', 1., .8), ('D', .9, .7), ('D', .9, .6),('s', .8, .5),('s', .8, .4)]
sl=s*numpy.array([2.3, 1.5, .8, .7, .6])
scplots=[]
for i, (stp, sv) in enumerate(zip(stpl, sl)):
dl=dlims+(i*6.)
if i==4:
dl[-1]+=1.
for a, b, (m, sf, al) in zip(dl, dl[1:], marks):
inds=numpy.where((d30>=a) & (d30<b))[0]
#print a, b, len(inds)
if len(inds)>0:
scplots+=[stp.scatter(abc[inds], c=fom[inds], marker=m, s=sv*sf, alpha=al, **kwargs)]
if cb:
cbax=stp.ax.figure.add_axes(cbrect)
if 'extend' in kwargs.keys():
sm=cm.ScalarMappable(norm=kwargs['norm'], cmap=kwargs['cmap'], extend=kwargs['extend'])
else:
sm=cm.ScalarMappable(norm=kwargs['norm'], cmap=kwargs['cmap'])
sm.set_array(fom)
cb=stp.ax.figure.colorbar(sm, cax=cbax)
cb.set_label(cblabel, fontsize=18)
| bsd-3-clause |
ywcui1990/nupic | external/linux32/lib/python2.6/site-packages/matplotlib/backends/backend_gtkcairo.py | 69 | 2207 | """
GTK+ Matplotlib interface using cairo (not GDK) drawing operations.
Author: Steve Chaplin
"""
import gtk
if gtk.pygtk_version < (2,7,0):
import cairo.gtk
from matplotlib.backends import backend_cairo
from matplotlib.backends.backend_gtk import *
backend_version = 'PyGTK(%d.%d.%d) ' % gtk.pygtk_version + \
'Pycairo(%s)' % backend_cairo.backend_version
_debug = False
#_debug = True
def new_figure_manager(num, *args, **kwargs):
"""
Create a new figure manager instance
"""
if _debug: print 'backend_gtkcairo.%s()' % fn_name()
FigureClass = kwargs.pop('FigureClass', Figure)
thisFig = FigureClass(*args, **kwargs)
canvas = FigureCanvasGTKCairo(thisFig)
return FigureManagerGTK(canvas, num)
class RendererGTKCairo (backend_cairo.RendererCairo):
if gtk.pygtk_version >= (2,7,0):
def set_pixmap (self, pixmap):
self.ctx = pixmap.cairo_create()
self.ctx.save() # restore, save - when call new_gc()
else:
def set_pixmap (self, pixmap):
self.ctx = cairo.gtk.gdk_cairo_create (pixmap)
self.ctx.save() # restore, save - when call new_gc()
class FigureCanvasGTKCairo(backend_cairo.FigureCanvasCairo, FigureCanvasGTK):
filetypes = FigureCanvasGTK.filetypes.copy()
filetypes.update(backend_cairo.FigureCanvasCairo.filetypes)
def _renderer_init(self):
"""Override to use cairo (rather than GDK) renderer"""
if _debug: print '%s.%s()' % (self.__class__.__name__, _fn_name())
self._renderer = RendererGTKCairo (self.figure.dpi)
class FigureManagerGTKCairo(FigureManagerGTK):
def _get_toolbar(self, canvas):
# must be inited after the window, drawingArea and figure
# attrs are set
if matplotlib.rcParams['toolbar']=='classic':
toolbar = NavigationToolbar (canvas, self.window)
elif matplotlib.rcParams['toolbar']=='toolbar2':
toolbar = NavigationToolbar2GTKCairo (canvas, self.window)
else:
toolbar = None
return toolbar
class NavigationToolbar2Cairo(NavigationToolbar2GTK):
def _get_canvas(self, fig):
return FigureCanvasGTKCairo(fig)
| agpl-3.0 |
jshiv/turntable | test/lib/python2.7/site-packages/scipy/spatial/tests/test__plotutils.py | 71 | 1463 | from __future__ import division, print_function, absolute_import
from numpy.testing import dec, assert_, assert_array_equal
try:
import matplotlib
matplotlib.rcParams['backend'] = 'Agg'
import matplotlib.pyplot as plt
has_matplotlib = True
except:
has_matplotlib = False
from scipy.spatial import \
delaunay_plot_2d, voronoi_plot_2d, convex_hull_plot_2d, \
Delaunay, Voronoi, ConvexHull
class TestPlotting:
points = [(0,0), (0,1), (1,0), (1,1)]
@dec.skipif(not has_matplotlib, "Matplotlib not available")
def test_delaunay(self):
# Smoke test
fig = plt.figure()
obj = Delaunay(self.points)
s_before = obj.simplices.copy()
r = delaunay_plot_2d(obj, ax=fig.gca())
assert_array_equal(obj.simplices, s_before) # shouldn't modify
assert_(r is fig)
delaunay_plot_2d(obj, ax=fig.gca())
@dec.skipif(not has_matplotlib, "Matplotlib not available")
def test_voronoi(self):
# Smoke test
fig = plt.figure()
obj = Voronoi(self.points)
r = voronoi_plot_2d(obj, ax=fig.gca())
assert_(r is fig)
voronoi_plot_2d(obj)
@dec.skipif(not has_matplotlib, "Matplotlib not available")
def test_convex_hull(self):
# Smoke test
fig = plt.figure()
tri = ConvexHull(self.points)
r = convex_hull_plot_2d(tri, ax=fig.gca())
assert_(r is fig)
convex_hull_plot_2d(tri)
| mit |
mrshu/scikit-learn | examples/linear_model/plot_logistic.py | 5 | 1389 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=========================================================
Logit function
=========================================================
Show in the plot is how the logistic regression would, in this
synthetic dataset, classify values as either 0 or 1,
i.e. class one or two, using the logit-curve.
"""
print __doc__
# Code source: Gael Varoqueux
# License: BSD
import numpy as np
import pylab as pl
from sklearn import linear_model
# this is our test set, it's just a straight line with some
# gaussian noise
xmin, xmax = -5, 5
n_samples = 100
np.random.seed(0)
X = np.random.normal(size=n_samples)
y = (X > 0).astype(np.float)
X[X > 0] *= 4
X += .3 * np.random.normal(size=n_samples)
X = X[:, np.newaxis]
# run the classifier
clf = linear_model.LogisticRegression(C=1e5)
clf.fit(X, y)
# and plot the result
pl.figure(1, figsize=(4, 3))
pl.clf()
pl.scatter(X.ravel(), y, color='black', zorder=20)
X_test = np.linspace(-5, 10, 300)
def model(x):
return 1 / (1 + np.exp(-x))
loss = model(X_test * clf.coef_ + clf.intercept_).ravel()
pl.plot(X_test, loss, color='blue', linewidth=3)
ols = linear_model.LinearRegression()
ols.fit(X, y)
pl.plot(X_test, ols.coef_ * X_test + ols.intercept_, linewidth=1)
pl.axhline(.5, color='.5')
pl.ylabel('y')
pl.xlabel('X')
pl.xticks(())
pl.yticks(())
pl.ylim(-.25, 1.25)
pl.xlim(-4, 10)
pl.show()
| bsd-3-clause |
trmznt/fatools | fatools/scripts/facmd.py | 2 | 17741 |
import sys, argparse, yaml, csv, transaction
from fatools.lib.utils import cout, cerr, get_dbhandler, set_verbosity
from fatools.lib import params
from fatools.lib.const import assaystatus, peaktype
from fatools.lib.fautil import algo
def init_argparser(parser=None):
if parser:
p = parser
else:
p = argparse.ArgumentParser('facmd')
p.add_argument('--sqldb', default=False,
help = 'SQLite3 database filename')
p.add_argument('--fsdb', default=False,
help = 'directory for filesystem-based database')
p.add_argument('--clear', default=False, action='store_true',
help = 'clear / remove all peaks from assay')
p.add_argument('--scan', default=False, action='store_true',
help = 'scanning assay for peaks')
p.add_argument('--preannotate', default=False, action='store_true',
help = 'preannotate assay for overlapping peaks, stutter and broad peaks')
p.add_argument('--alignladder', default=False, action='store_true',
help = 'align ladder peaks with standard size')
p.add_argument('--call', default=False, action='store_true',
help = 'calling peaks (determining the sizes of peaks)')
p.add_argument('--bin', default=False, action='store_true',
help = 'binning peaks')
p.add_argument('--postannotate', default=False, action='store_true',
help = 'post annotate peaks')
p.add_argument('--listpeaks', default=False, action='store_true',
help = 'list all peaks')
p.add_argument('--listassay', default=False, action='store_true',
help = 'list assay information')
p.add_argument('--showtrace', default=False, action='store_true',
help = 'show trace as a plot')
p.add_argument('--findpeaks', default=False, action='store_true',
help = 'only find peaks')
p.add_argument('--setallele', default=False, action='store_true',
help = 'set allele type')
p.add_argument('--batch', default=False,
help = 'batch code')
p.add_argument('--sample', default=False,
help = 'sample code')
p.add_argument('--assay', default=False,
help = 'assay filename')
p.add_argument('--marker', default=False,
help = 'marker code')
p.add_argument('--panel', default='',
help = 'panel list (comma separated)')
p.add_argument('--commit', default=False, action='store_true',
help = 'commit to database')
p.add_argument('--outfmt', default='text',
help = 'output format, either text or tab')
p.add_argument('--outfile', default='-',
help = 'output filename')
p.add_argument('--peakcachedb', default=False,
help = 'peak cache db filename')
p.add_argument('--method', default='',
help = 'spesific method or algorithm to use')
p.add_argument('--value', default='',
help = 'bin value of alleles')
p.add_argument('--totype', default='',
help = 'new type of alleles')
p.add_argument('--fromtype', default='',
help = 'original type of alleles')
p.add_argument('--excluded_peaks')
p.add_argument('--stutter_ratio', default=0, type=float)
p.add_argument('--stutter_range', default=0, type=float)
p.add_argument('--force', default=False, action='store_true',
help = 'force the method (even if need short-cutting)' )
p.add_argument('--test', default=False, action='store_true',
help = 'just testing, not need to commit to database')
p.add_argument('-y', default=False, action='store_true',
help = 'say yes to all interactive questions')
p.add_argument('--abort', default=False, action='store_true',
help = 'abort for any warning')
p.add_argument('--showladderpca', default=False, action='store_true',
help = 'show PCA plot for ladder peaks')
p.add_argument('--showz', default=False, action='store_true',
help = 'show Z plot for ladder peaks')
p.add_argument('--verbose', default=0, type=int,
help = 'show verbositiy of the processing')
return p
def main(args):
if args.commit:
with transaction.manager:
do_facmd(args)
cerr('** COMMIT to database **')
else:
cerr('WARNING ** running without database COMMIT! All changes will be discarded!')
if not ( args.test or args.y ):
keys = input('Do you want to continue [y/n]? ')
if not keys.lower().strip().startswith('y'):
sys.exit(1)
do_facmd(args)
def do_facmd(args, dbh=None):
if dbh is None:
dbh = get_dbhandler(args)
if args.verbose != 0:
set_verbosity(args.verbose)
executed = 0
if args.clear is not False:
do_clear( args, dbh )
executed += 1
if args.findpeaks is not False:
do_findpeaks( args, dbh )
executed += 1
if args.scan is not False:
do_scan(args, dbh)
executed += 1
if args.preannotate is not False:
do_preannotate(args, dbh)
executed += 1
if args.alignladder is not False:
do_alignladder(args, dbh)
executed += 1
if args.call is not False:
do_call(args, dbh)
executed += 1
if args.bin is not False:
do_bin(args, dbh)
executed += 1
if args.postannotate is not False:
do_postannotate(args, dbh)
executed += 1
if args.setallele is not False:
do_setallele(args, dbh)
executed += 1
if args.showladderpca is not False:
do_showladderpca( args, dbh )
executed += 1
if args.listassay is not False:
do_listassay( args, dbh )
executed += 1
if args.listpeaks is not False:
do_listpeaks( args, dbh )
executed += 1
if args.showtrace is not False:
do_showtrace( args, dbh )
executed += 1
if args.showz is not False:
do_showz( args, dbh )
executed += 1
if executed == 0:
cerr('WARN - unknown command, nothing to do!')
else:
cerr('INFO - executed %d command(s)' % executed)
def do_clear( args, dbh ):
cerr('Clearing peaks...')
assay_list = get_assay_list( args, dbh )
counter = 1
for (assay, sample_code) in assay_list:
cerr('Clearing sample: %s assay %s [%d/%d]' %
(sample_code, assay.filename, counter, len(assay_list)))
assay.clear()
counter += 1
def do_scan( args, dbh ):
cerr('I: Scanning peaks...')
scanning_parameter = params.Params()
assay_list = get_assay_list( args, dbh )
if args.peakcachedb:
import leveldb
peakdb = leveldb.LevelDB(args.peakcachedb, create_if_missing=False)
else:
peakdb = None
if args.method:
scanning_parameter.ladder.method = args.method
scanning_parameter.nonladder.method = args.method
counter = 1
for (assay, sample_code) in assay_list:
cerr('I: [%d/%d] - Scanning: %s | %s' %
(counter, len(assay_list), sample_code, assay.filename))
assay.scan( scanning_parameter, peakdb = peakdb )
counter += 1
def do_preannotate( args, dbh ):
cerr('I: Preannotating peaks...')
scanning_parameter = params.Params()
assay_list = get_assay_list( args, dbh )
counter = 1
for (assay, sample_code) in assay_list:
cerr('I: [%d/%d] - Preannotating: %s | %s' %
(counter, len(assay_list), sample_code, assay.filename))
assay.preannotate( scanning_parameter )
counter += 1
def do_alignladder( args, dbh ):
cerr('Aligning ladders...')
assay_list = get_assay_list( args, dbh )
counter = 1
for (assay, sample_code) in assay_list:
cerr('I: [%d/%d] - Aligning: %s | %s' %
(counter, len(assay_list), sample_code, assay.filename))
(dpscore, rss, no_of_peaks, no_of_ladders, qcscore, remarks,
method) = assay.alignladder( args.excluded_peaks, force_mode = args.force )
if qcscore < 0.9:
msg = 'W! low ladder QC'
else:
msg = 'I:'
cerr( '%s [%d/%d] - Score %3.2f %4.2f %5.2f %d/%d %s for %s | %s'
% ( msg, counter, len(assay_list),
qcscore, dpscore, rss, no_of_peaks, no_of_ladders,
method, sample_code, assay.filename) )
if remarks:
cerr('%s - %s' % (msg, ' | '.join(remarks)))
if qcscore != 1.0 and args.abort:
sys.exit(1)
counter += 1
def do_call(args, dbh):
cerr('I: Calling peaks...')
scanning_parameter = params.Params()
assay_list = get_assay_list( args, dbh )
counter = 1
for (assay, sample_code) in assay_list:
cerr('I: [%d/%d] - Calling: %s | %s' %
(counter, len(assay_list), sample_code, assay.filename))
assay.call( scanning_parameter )
counter += 1
def do_bin(args, dbh):
cerr('I: Binning peaks...')
scanning_parameter = params.Params()
if args.marker:
markers = [ dbh.get_marker( code ) for code in args.marker.split(',') ]
else:
markers = None
assay_list = get_assay_list( args, dbh )
counter = 1
for (assay, sample_code) in assay_list:
cerr('I: [%d/%d] - Binning: %s | %s' %
(counter, len(assay_list), sample_code, assay.filename))
assay.bin( scanning_parameter, markers )
counter += 1
def do_postannotate(args, dbh):
cerr('I: Post-annotating peaks...')
scanning_parameter = params.Params()
if args.marker:
markers = [ dbh.get_marker( code ) for code in args.marker.split(',') ]
else:
markers = None
if args.stutter_ratio > 0:
scanning_parameter.nonladder.stutter_ratio = args.stutter_ratio
if args.stutter_range > 0:
scanning_parameter.nonladder.stutter_range = args.stutter_range
assay_list = get_assay_list( args, dbh )
counter = 1
for (assay, sample_code) in assay_list:
cerr('I: [%d/%d] - Post-annotating: %s | %s' %
(counter, len(assay_list), sample_code, assay.filename))
assay.postannotate( scanning_parameter, markers )
counter += 1
def do_findpeaks( args, dbh ):
import leveldb
from fatools.lib import params
cerr('Finding and caching peaks...')
if not args.peakcachedb:
cexit('ERR - please provide cache db filename')
# opening LevelDB database
if args.peakcachedb == '-':
peakdb = None
else:
peakdb = leveldb.LevelDB(args.peakcachedb)
scanning_parameter = params.Params()
assay_list = get_assay_list( args, dbh )
if args.method:
scanning_parameter.ladder.method = args.method
scanning_parameter.nonladder.method = args.method
channel_list = []
counter = 1
cerr('', nl=False)
for (assay, sample_code) in assay_list:
cerr('\rI: [%d/%d] processing assay' % (counter, len(assay_list)), nl=False)
for c in assay.channels:
if c.marker.code == 'ladder':
params = scanning_parameter.ladder
else:
params = scanning_parameter.nonladder
channel_list.append( (c.tag(), c.data, params) )
counter += 1
cerr('')
do_parallel_find_peaks( channel_list, peakdb )
#peakdb.close()
def do_setallele( args, dbh ):
marker_codes = args.marker.split(',')
marker_ids = [ dbh.get_marker(code).id for code in marker_codes ]
bin_values = [ int(x) for x in args.value.split(',') ]
totype = getattr(peaktype, args.totype)
assay_list = get_assay_list( args, dbh )
counter = 1
for (assay, sample_code) in assay_list:
for c in assay.channels:
if marker_ids and c.marker_id in marker_ids:
for allele in c.alleles:
if not allele.bin in bin_values:
continue
if args.fromtype and allele.type != args.fromtype:
continue
allele.type = totype
cerr('I: - setting allele %d marker %s for sample %s' %
(allele.bin, c.marker.label, sample_code))
def do_showladderpca( args, dbh ):
assay_list = get_assay_list( args, dbh )
counter = 1
for (assay, sample_code) in assay_list:
cerr('Showing ladder PCA for sample: %s assay %s [%d/%d]' %
(sample_code, assay.filename, counter, len(assay_list)))
assay.showladderpca()
def do_listassay( args, dbh ):
assay_list = get_assay_list( args, dbh )
if args.outfile != '-':
out_stream = open(args.outfile, 'w')
else:
out_stream = sys.stdout
for (assay, sample_code) in assay_list:
printout_assay( assay, outfile = out_stream, fmt = args.outfmt )
def do_listpeaks( args, dbh ):
assay_list = get_assay_list( args, dbh )
if args.marker:
markers = [ dbh.get_marker( code ) for code in args.marker.split(',') ]
else:
markers = None
if markers:
cerr('Markers: %s' % ','.join( m.code for m in markers ))
if args.outfile != '-':
out_stream = open(args.outfile, 'w')
else:
out_stream = sys.stdout
out_stream.write('SAMPLE\tFILENAME\tDYE\tRTIME\tHEIGHT\tSIZE\tSCORE\tID\n')
for (assay, sample_code) in assay_list:
cout('Sample: %s assay: %s' % (sample_code, assay.filename))
for channel in assay.channels:
if markers and channel.marker not in markers:
continue
cout('Marker => %s | %s [%d]' % (channel.marker.code, channel.dye,
len(channel.alleles)))
for p in channel.alleles:
out_stream.write('%s\t%s\t%s\t%d\t%d\t%5.3f\t%3.2f\t%d\n' %
(sample_code, assay.filename, channel.dye, p.rtime, p.height, p.size, p.qscore, p.id)
)
def do_showtrace( args, dbh ):
assay_list = get_assay_list( args, dbh )
from matplotlib import pylab as plt
for (assay, sample_code) in assay_list:
peaks = []
for c in assay.channels:
plt.plot( c.raw_data )
peaks += list(c.alleles)
for p in peaks:
plt.plot( p.rtime, p.height, 'r+' )
plt.show()
def do_showz( args, dbh ):
assay_list = get_assay_list( args, dbh )
from matplotlib import pylab as plt
import numpy as np
for (assay, sample_code) in assay_list:
ladder_peaks = list(assay.ladder.alleles)
z = assay.z
peak_pairs = [ (x.rtime, x.size) for x in ladder_peaks ]
x = np.linspace( peak_pairs[0][0], peak_pairs[-1][0] + 200, 100 )
f = np.poly1d(z)
y = f(x)
print(' => Z: ', z)
for p in ladder_peaks:
print(' => %6d -> %6.2f | %4d | %5.2f' % ( p.rtime, f(p.rtime), p.size,
abs( f(p.rtime) - p.size )))
plt.plot(x,y)
rtimes = [ x[0] for x in peak_pairs ]
sizes = [ x[1] for x in peak_pairs ]
plt.scatter(rtimes, sizes)
plt.show()
# helpers
def get_assay_list( args, dbh ):
if not args.batch:
cerr('ERR - need --batch argument!')
sys.exit(1)
batch = dbh.get_batch( args.batch )
if not batch:
cerr('ERR - batch %s not found!' % args.batch)
sys.exit(1)
samples = []
if args.sample:
samples = args.sample.split(',')
assays = []
if args.assay:
assays = args.assay.split(',')
panels = []
if args.panel:
panels = args.panel.split(',')
assay_list = []
for sample in batch.samples:
if samples and sample.code not in samples: continue
for assay in sample.assays:
if assays and assay.filename not in assays: continue
if panels and assay.panel.code not in panels: continue
assay_list.append( (assay, sample.code) )
cerr('INFO - number of assays to be processed: %d' % len(assay_list))
return assay_list
## PRINTOUT
def printout_assay( assay, outfile=sys.stdout, fmt='text' ):
if fmt == 'tab':
outfile.write('%s\t%s\t%f\t%f\t%f\t%d\t%d\t%s\n' %
( assay.sample.code, assay.filename,
assay.score, assay.dp, assay.rss, assay.ladder_peaks,
len(assay.ladder.alleles), assay.method) )
return ''
buf = []
_ = buf.append
_( 'Assay: %s -- Sample: %s' % (assay.filename, assay.sample.code) )
if assay.status in ( assaystatus.aligned, assaystatus.called,
assaystatus.annotated, assaystatus.binned ):
_( ' => Score: %3.2f, DP: %5.2f, RSS: %5.2f, N-peak: %d' %
( assay.score, assay.dp, assay.rss, assay.ladder_peaks ))
return '\n'.join( buf )
## parallel word
def do_parallel_find_peaks( channel_list, peakdb ):
import concurrent.futures, pickle
cerr('I: Processing channel(s)')
total = len(channel_list)
counter = 0
with concurrent.futures.ProcessPoolExecutor() as executor:
for (tag, peaks) in executor.map( find_peaks_p, channel_list ):
if peakdb:
peakdb.Put(tag.encode(), pickle.dumps(peaks))
else:
cout('== channel %s\n' % tag )
cout(str(peaks))
counter += 1
cerr('I: [%d/%d] channel %s => %d peak(s)' % (counter, total, tag, len(peaks)))
def find_peaks_p( args ):
tag, data, param = args
return (tag, algo.find_raw_peaks(data, param))
| lgpl-3.0 |
vmAggies/omniture-master | build/lib/omniture/query.py | 2 | 16229 | # encoding: utf-8
from __future__ import absolute_import
import time
from copy import copy, deepcopy
import functools
from dateutil.relativedelta import relativedelta
import json
import logging
import sys
import pandas as pd
import io
import requests
from .elements import Value
from . import reports
from . import utils
def immutable(method):
@functools.wraps(method)
def wrapped_method(self, *vargs, **kwargs):
obj = self.clone()
method(obj, *vargs, **kwargs)
return obj
return wrapped_method
class Query(object):
""" Lets you build a query to the Reporting API for Adobe Analytics.
Methods in this object are chainable. For example
>>> report = report.element("page").element("prop1").
metric("pageviews").granularity("day").run()
Making it easy to create a report.
To see the raw definition use
>>> print report
"""
GRANULARITY_LEVELS = ['hour', 'day', 'week', 'month', 'quarter', 'year']
def __init__(self, suite):
""" Setup the basic structure of the report query. """
self.log = logging.getLogger(__name__)
self.suite = suite
self.raw = {}
#Put the report suite in so the user can print
#the raw query and have it work as is
self.raw['reportSuiteID'] = str(self.suite.id)
self.id = None
self.report = reports.Report
self.method = "Get"
self.data_frame = None
self.appended_data = []
def _normalize_value(self, value, category):
if isinstance(value, Value):
return value
else:
return getattr(self.suite, category)[value]
def _serialize_value(self, value, category):
return self._normalize_value(value, category).serialize()
def _serialize_values(self, values, category):
if not isinstance(values, list):
values = [values]
return [self._serialize_value(value, category) for value in values]
def _serialize(self, obj):
if isinstance(obj, list):
return [self._serialize(el) for el in obj]
elif isinstance(obj, Value):
return obj.serialize()
else:
return obj
def clone(self):
""" Return a copy of the current object. """
query = Query(self.suite)
query.raw = copy(self.raw)
query.report = self.report
return query
@immutable
def range(self, start, stop=None, months=0, days=0, granularity=None):
"""
Define a date range for the report.
* start -- The start date of the report. If stop is not present
it is assumed to be the to and from dates.
* stop (optional) -- the end date of the report (inclusive).
* months (optional, named) -- months to run used for relative dates
* days (optional, named)-- days to run used for relative dates)
* granulartiy (optional, named) -- set the granularity for the report
"""
start = utils.date(start)
stop = utils.date(stop)
if days or months:
stop = start + relativedelta(days=days-1, months=months)
else:
stop = stop or start
if start == stop:
#self.raw['date'] = start.isoformat()
self.raw.update({
'dateFrom': start.isoformat(),
'dateTo': stop.isoformat(),
})
else:
self.raw.update({
'dateFrom': start.isoformat(),
'dateTo': stop.isoformat(),
})
if granularity:
self.raw = self.granularity(granularity).raw
return self
@immutable
def granularity(self, granularity):
"""
Set the granulartiy for the report.
Values are one of the following
'hour', 'day', 'week', 'month', 'quarter', 'year'
"""
if granularity not in self.GRANULARITY_LEVELS:
levels = ", ".join(self.GRANULARITY_LEVELS)
raise ValueError("Granularity should be one of: " + levels)
self.raw['dateGranularity'] = granularity
return self
@immutable
def set(self, key=None, value=None, **kwargs):
"""
Set a custom property in the report
`set` is a way to add raw properties to the request,
for features that python-omniture does not support but the
SiteCatalyst API does support. For convenience's sake,
it will serialize Value and Element objects but will
leave any other kind of value alone.
"""
if key and value:
self.raw[key] = self._serialize(value)
elif key or kwargs:
properties = key or kwargs
for key, value in properties.items():
self.raw[key] = self._serialize(value)
else:
raise ValueError("Query#set requires a key and value, \
a properties dictionary or keyword arguments.")
return self
@immutable
def filter(self, segment=None, segments=None, disable_validation=False, **kwargs):
""" Set Add a segment to the report. """
# It would appear to me that 'segment_id' has a strict subset
# of the functionality of 'segments', but until I find out for
# sure, I'll provide both options.
if not self.raw.has_key('segments'):
self.raw['segments'] = []
if disable_validation == False:
if segments:
self.raw['segments'].append(self._serialize_values(segments, 'segments'))
elif segment:
self.raw['segments'].append({"id":self._normalize_value(segment,
'segments').id})
elif kwargs:
self.raw['segments'].append(kwargs)
else:
raise ValueError()
else:
if segments:
self.raw['segments'].append(segments)
elif segment:
self.raw['segments'].append({"id":segment})
elif kwargs:
self.raw['segments'].append(kwargs)
else:
raise ValueError()
return self
@immutable
def element(self, element, disable_validation=False, **kwargs):
"""
Add an element to the report.
This method is intended to be called multiple time. Each time it will
add an element as a breakdown
After the first element, each additional element is considered
a breakdown
"""
if self.raw.get('elements', None) == None:
self.raw['elements'] = []
if disable_validation == False:
element = self._serialize_value(element, 'elements')
else:
element = {"id":element}
if kwargs != None:
element.update(kwargs)
self.raw['elements'].append(deepcopy(element))
#TODO allow this method to accept a list
return self
@immutable
def source(self, source, disable_validation=False, **kwargs):
"""
Add a source to the report.
This method is intended to datawarehouse as the source of the report. This came as part of 1.4
but it has not been documented in Adobe analytics documents
"""
if self.raw.get('source', None) == None:
self.raw['source'] = []
self.raw['source'] = source
return self
@immutable
def breakdown(self, element, **kwargs):
""" Pass through for element. Adds an element to the report. """
return self.element(element, **kwargs)
def elements(self, *args, **kwargs):
""" Shortcut for adding multiple elements. Doesn't support arguments """
obj = self
for e in args:
obj = obj.element(e, **kwargs)
return obj
@immutable
def metric(self, metric, disable_validation=False):
"""
Add an metric to the report.
This method is intended to be called multiple time.
Each time a metric will be added to the report
"""
if self.raw.get('metrics', None) == None:
self.raw['metrics'] = []
if disable_validation == False:
self.raw['metrics'].append(self._serialize_value(metric, 'metrics'))
else:
self.raw['metrics'].append({"id":metric})
#self.raw['metrics'] = self._serialize_values(metric, 'metrics')
#TODO allow this metric to accept a list
return self
def metrics(self, *args, **kwargs):
""" Shortcut for adding multiple metrics """
obj = self
for m in args:
obj = obj.metric(m, **kwargs)
return obj
@immutable
def sortBy(self, metric):
""" Specify the sortBy Metric """
self.raw['sortBy'] = metric
return self
@immutable
def currentData(self):
""" Set the currentData flag """
self.raw['currentData'] = True
return self
# TODO: data warehouse reports are a work in progress
@immutable
def data(self, metrics, breakdowns):
self.report = reports.DataWarehouseReport
self.raw['metrics'] = self._serialize_values(metrics, 'metrics')
# TODO: haven't figured out how breakdowns work yet
self.raw['breakdowns'] = False
return self
def build(self):
""" Return the report descriptoin as an object """
if self.report == reports.DataWarehouseReport:
return utils.translate(self.raw, {
'metrics': 'Metric_List',
'breakdowns': 'Breakdown_List',
'dateFrom': 'Date_From',
'dateTo': 'Date_To',
# is this the correct mapping?
'date': 'Date_Preset',
'dateGranularity': 'Date_Granularity',
})
else:
return {'reportDescription': self.raw}
def queue(self):
""" Submits the report to the Queue on the Adobe side. """
q = self.build()
self.log.debug("Suite Object: %s Method: %s, Query %s",
self.suite, self.report.method, q)
self.id = self.suite.request('Report',
self.report.method,
q)['reportID']
return self
def probe(self, fn, heartbeat=None, interval=1, soak=False):
""" Evaluate the response of a report"""
status = 'not ready'
while status == 'not ready':
if heartbeat:
heartbeat()
time.sleep(interval)
import json as js
dict_Value = None
flag = False
loop_timeout = 1
total_row_count = 0
#Loop until the report is done
#(No longer raises the ReportNotReadyError)
try:
response = fn()
if self.raw['source'] == 'warehouse':
self.data_frame = pd.read_csv(io.StringIO(response.text))
while flag == False:
time.sleep(10)
try:
response = fn()
if response.status_code == 400:
dict_Value = js.loads(response.content)
if dict_Value['error'] == 'no_warehouse_data':
loop_timeout = loop_timeout + 1
if loop_timeout > 30:
break
else:
continue
else:
flag = True
break
elif response.status_code == 200:
df2 = pd.read_csv(io.StringIO(response.text))
total_row_count = total_row_count + len(df2)
print total_row_count
self.appended_data.append(df2)
except requests.ConnectionError as e:
continue
#appending the dataframes
self.appended_data.append(self.data_frame)
self.appended_data = pd.concat(self.appended_data, axis=0)
dict_Value = js.loads(response.content)
if dict_Value['error'] == 'eof_or_invalid_page':
status = 'done'
print total_row_count
return self.appended_data
else:
status = 'done'
return response
except reports.ReportNotReadyError:
status = 'not ready'
# if not soak and status not in ['not ready', 'done', 'ready']:
#raise reports.InvalidReportError(response)
#Use a back off up to 30 seconds to play nice with the APIs
if interval < 30:
interval = round(interval * 1.5)
else:
interval = 30
self.log.debug("Check Interval: %s seconds", interval)
# only for SiteCatalyst queries
def sync(self, heartbeat=None, interval=1):
""" Run the report synchronously,"""
if not self.id:
self.queue()
# this looks clunky, but Omniture sometimes reports a report
# as ready when it's really not
get_report = lambda: self.suite.request('Report',
'Get',
{'reportID': self.id})
response = self.probe(get_report, heartbeat, interval)
return self.report(response, self)
#shortcut to run a report immediately
def run(self, defaultheartbeat=True, heartbeat=None, interval=1):
"""Shortcut for sync(). Runs the current report synchronously. """
if defaultheartbeat == True:
rheartbeat = self.heartbeat
else:
rheartbeat = heartbeat
return self.sync(rheartbeat, interval)
def heartbeat(self):
""" A default heartbeat method that prints a dot for each request """
sys.stdout.write('.')
sys.stdout.flush()
# only for SiteCatalyst queries
def async(self, callback=None, heartbeat=None, interval=1):
if not self.id:
self.queue()
raise NotImplementedError()
# only for Data Warehouse queries
def request(self, name='python-omniture query', ftp=None, email=None):
raise NotImplementedError()
def cancel(self):
""" Cancels a the report from the Queue on the Adobe side. """
if self.report == reports.DataWarehouseReport:
return self.suite.request('DataWarehouse',
'CancelRequest',
{'Request_Id': self.id})
else:
return self.suite.request('Report',
'CancelReport',
{'reportID': self.id})
def json(self):
""" Return a JSON string of the Request """
return str(json.dumps(self.build(), indent=4, separators=(',', ': ')))
def __str__(self):
return self.json()
def _repr_html_(self):
""" Format in HTML for iPython Users """
html = "Current Report Settings</br>"
for key, value in self.raw.iteritems():
html += "<b>{0}</b>: {1} </br>".format(key, value)
if self.id:
html += "This report has been submitted</br>"
html += "<b>{0}</b>: {1} </br>".format("ReportId", self.id)
return html
def __dir__(self):
""" Give sensible options for Tab Completion mostly for iPython """
return ['async','breakdown','cancel','clone','currentData', 'element', 'source',
'filter', 'granularity', 'id','json' ,'metric', 'queue', 'range', 'raw', 'report',
'request', 'run', 'set', 'sortBy', 'suite']
| mit |
borismarin/genesis2.4gamma | Scripts/gpython-tools/plotVm.py | 1 | 2028 | #!/usr/bin/env python
# plotVm ver 0.5 - a command line utility to plot a wildcarded argument
# list of files containing membrane potential data, and plots them in
# different colors on the same axes
import sys, os
import matplotlib.pyplot as plt
import numpy as np
def plot_file(file,format):
print 'Plotting %s' % file
x = []; y = []
fp = open(file, 'r')
for line in fp.readlines():
data = line.split(" ")
x.append(data[0]); y.append(data[1])
# print "Data length is ", len(x), "Format is ", format
axes.plot(x, y, format)
# use this instead, to let pyplot plot pick new colors
# axes.plot(x, y)
def do_plot_files(filenames):
if len(filenames) > 0:
formats = ['k', 'r', 'b', 'g', 'm', 'c']
plotnum = 0
for file in filenames:
# print file
format = formats[plotnum % len(formats)]
# print format, plotnum
try:
if os.path.exists(file):
plot_file(file,format)
plotnum = plotnum + 1
else:
print '*** Error: Incorrect file name or path specified ***'
# I need to do better error handling!
except:
print 'An error ocurred'
sys.exit()
else:
print "No files were specified for plotting!"
print "Please give one or more filenames as arguments, e.g.\n"
print " plotVm Vm.out pyr4*.out\n"
sys.exit()
if __name__ == "__main__":
# Get the arguments (possibly wildcarded) into a list of filenames
filenames = sys.argv[1:]
print filenames
# create the plot
fig = plt.figure()
axes = fig.add_subplot(111)
do_plot_files(filenames)
axes.set_title('Membrane Potential')
axes.set_xlabel('seconds')
axes.set_ylabel('Volts')
axes.axis(ymin=-0.1, ymax=0.05)
# to use autoscaling
# axes.axis('auto')
# to add a legend
# axes.legend(filenames)
plt.draw()
plt.show()
| gpl-2.0 |
scienceopen/CVutils | DemoMedianFilter.py | 1 | 1893 | #!/usr/bin/env python
import cv2
import numpy as np
from skimage.util import random_noise
from matplotlib.pyplot import figure, show
from typing import Tuple
def gen_patterns(
x: int, y: int, dtype=np.uint8, noise: float = 0.0
) -> Tuple[np.ndarray, np.ndarray]:
if dtype == np.uint8:
V = 255
elif dtype in (float, np.float32, np.float64):
V = 1
elif dtype in (int, np.uint16):
V = 65535
else:
raise TypeError(dtype)
im = np.zeros((y, x), dtype=dtype)
im[18:29, 5] = V # vert line
im[15, 18:29] = V # horiz line
im[4:7, 4:6] = V
im[4:6, 24:27] = V
im[4:7, 14:17] = V
im[4:6, 9:11] = V
im[6, 10] = V
im[20:25:2, 20:25:2] = V
im[21:24:2, 21:24:2] = V
if noise > 0:
im = random_noise(im, 's&p', amount=noise).astype('uint8') * V
im2 = np.zeros((y, x), dtype='uint8')
im2[4:7, 4:7] = V
im2[4, 8] = V
return im, im2
def plot_panel(fg, im: np.ndarray):
ax = fg.add_subplot(1, 4, 1)
ax.imshow(im, cmap='gray_r', interpolation='none', origin='bottom')
ax.set_title('original')
imfilt = cv2.medianBlur(im, 3)
ax = fg.add_subplot(1, 4, 2)
ax.imshow(imfilt, cmap='gray_r', interpolation='none', origin='bottom')
ax.set_title('median filtered')
openrad = 3
kern = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (openrad, openrad))
ax = fg.add_subplot(1, 4, 3)
ax.imshow(cv2.erode(im, kern), cmap='gray_r', interpolation='none', origin='bottom')
ax.set_title('erosion')
ax = fg.add_subplot(1, 4, 4)
ax.imshow(
cv2.erode(imfilt, kern), cmap='gray_r', interpolation='none', origin='bottom'
)
ax.set_title('erosion median filtered')
# for a in ax:
# a.set_xlim((0, im.shape[1]))
im1, im2 = gen_patterns(32, 32, np.uint8, 0.0)
plot_panel(figure(), im1)
plot_panel(figure(), im2)
show()
| mit |
koobonil/Boss2D | Boss2D/addon/tensorflow-1.2.1_for_boss/tensorflow/contrib/learn/python/learn/estimators/kmeans_test.py | 44 | 19373 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for KMeans."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import math
import time
import numpy as np
from sklearn.cluster import KMeans as SklearnKMeans
# pylint: disable=g-import-not-at-top
from tensorflow.contrib.learn.python import learn
from tensorflow.contrib.learn.python.learn.estimators import kmeans as kmeans_lib
from tensorflow.contrib.learn.python.learn.estimators import run_config
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import data_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.platform import benchmark
from tensorflow.python.platform import flags
from tensorflow.python.platform import test
from tensorflow.python.training import input as input_lib
from tensorflow.python.training import queue_runner
FLAGS = flags.FLAGS
def normalize(x):
return x / np.sqrt(np.sum(x * x, axis=-1, keepdims=True))
def cosine_similarity(x, y):
return np.dot(normalize(x), np.transpose(normalize(y)))
def make_random_centers(num_centers, num_dims, center_norm=500):
return np.round(
np.random.rand(num_centers, num_dims).astype(np.float32) * center_norm)
def make_random_points(centers, num_points, max_offset=20):
num_centers, num_dims = centers.shape
assignments = np.random.choice(num_centers, num_points)
offsets = np.round(
np.random.randn(num_points, num_dims).astype(np.float32) * max_offset)
return (centers[assignments] + offsets, assignments,
np.add.reduce(offsets * offsets, 1))
class KMeansTestBase(test.TestCase):
def input_fn(self, batch_size=None, points=None, randomize=None,
num_epochs=None):
"""Returns an input_fn that randomly selects batches from given points."""
batch_size = batch_size or self.batch_size
points = points if points is not None else self.points
num_points = points.shape[0]
if randomize is None:
randomize = (self.use_mini_batch and
self.mini_batch_steps_per_iteration <= 1)
def _fn():
x = constant_op.constant(points)
if batch_size == num_points:
return input_lib.limit_epochs(x, num_epochs=num_epochs), None
if randomize:
indices = random_ops.random_uniform(
constant_op.constant([batch_size]),
minval=0, maxval=num_points-1,
dtype=dtypes.int32,
seed=10)
else:
# We need to cycle through the indices sequentially. We create a queue
# to maintain the list of indices.
q = data_flow_ops.FIFOQueue(self.num_points, dtypes.int32, ())
# Conditionally initialize the Queue.
def _init_q():
with ops.control_dependencies([q.enqueue_many(
math_ops.range(self.num_points))]):
return control_flow_ops.no_op()
init_q = control_flow_ops.cond(q.size() <= 0,
_init_q,
control_flow_ops.no_op)
with ops.control_dependencies([init_q]):
offsets = q.dequeue_many(self.batch_size)
with ops.control_dependencies([q.enqueue_many(offsets)]):
indices = array_ops.identity(offsets)
batch = array_ops.gather(x, indices)
return (input_lib.limit_epochs(batch, num_epochs=num_epochs), None)
return _fn
@staticmethod
def config(tf_random_seed):
return run_config.RunConfig(tf_random_seed=tf_random_seed)
@property
def batch_size(self):
return self.num_points
@property
def use_mini_batch(self):
return False
@property
def mini_batch_steps_per_iteration(self):
return 1
class KMeansTest(KMeansTestBase):
def setUp(self):
np.random.seed(3)
self.num_centers = 5
self.num_dims = 2
self.num_points = 1000
self.true_centers = make_random_centers(self.num_centers, self.num_dims)
self.points, _, self.scores = make_random_points(self.true_centers,
self.num_points)
self.true_score = np.add.reduce(self.scores)
def _kmeans(self, relative_tolerance=None):
return kmeans_lib.KMeansClustering(
self.num_centers,
initial_clusters=kmeans_lib.KMeansClustering.KMEANS_PLUS_PLUS_INIT,
distance_metric=kmeans_lib.KMeansClustering.SQUARED_EUCLIDEAN_DISTANCE,
use_mini_batch=self.use_mini_batch,
mini_batch_steps_per_iteration=self.mini_batch_steps_per_iteration,
random_seed=24,
relative_tolerance=relative_tolerance)
def test_clusters(self):
kmeans = self._kmeans()
kmeans.fit(input_fn=self.input_fn(), steps=1)
clusters = kmeans.clusters()
self.assertAllEqual(list(clusters.shape), [self.num_centers, self.num_dims])
def test_fit(self):
kmeans = self._kmeans()
kmeans.fit(input_fn=self.input_fn(), steps=1)
score1 = kmeans.score(
input_fn=self.input_fn(batch_size=self.num_points), steps=1)
steps = 10 * self.num_points // self.batch_size
kmeans.fit(input_fn=self.input_fn(), steps=steps)
score2 = kmeans.score(
input_fn=self.input_fn(batch_size=self.num_points), steps=1)
self.assertTrue(score1 > score2)
self.assertNear(self.true_score, score2, self.true_score * 0.05)
def test_monitor(self):
if self.use_mini_batch:
# We don't test for use_mini_batch case since the loss value can be noisy.
return
kmeans = kmeans_lib.KMeansClustering(
self.num_centers,
initial_clusters=kmeans_lib.KMeansClustering.KMEANS_PLUS_PLUS_INIT,
distance_metric=kmeans_lib.KMeansClustering.SQUARED_EUCLIDEAN_DISTANCE,
use_mini_batch=self.use_mini_batch,
mini_batch_steps_per_iteration=self.mini_batch_steps_per_iteration,
config=learn.RunConfig(tf_random_seed=14),
random_seed=12,
relative_tolerance=1e-4)
kmeans.fit(
input_fn=self.input_fn(),
# Force it to train until the relative tolerance monitor stops it.
steps=None)
score = kmeans.score(
input_fn=self.input_fn(batch_size=self.num_points), steps=1)
self.assertNear(self.true_score, score, self.true_score * 0.01)
def test_infer(self):
kmeans = self._kmeans()
# Make a call to fit to initialize the cluster centers.
max_steps = 1
kmeans.fit(input_fn=self.input_fn(), max_steps=max_steps)
clusters = kmeans.clusters()
# Make a small test set
num_points = 10
points, true_assignments, true_offsets = make_random_points(clusters,
num_points)
# Test predict
assignments = list(kmeans.predict_cluster_idx(input_fn=self.input_fn(
batch_size=num_points, points=points, num_epochs=1)))
self.assertAllEqual(assignments, true_assignments)
# Test score
score = kmeans.score(
input_fn=lambda: (constant_op.constant(points), None), steps=1)
self.assertNear(score, np.sum(true_offsets), 0.01 * score)
# Test transform
transform = kmeans.transform(
input_fn=lambda: (constant_op.constant(points), None))
true_transform = np.maximum(
0,
np.sum(np.square(points), axis=1, keepdims=True) - 2 * np.dot(
points, np.transpose(clusters)) +
np.transpose(np.sum(np.square(clusters), axis=1, keepdims=True)))
self.assertAllClose(transform, true_transform, rtol=0.05, atol=10)
def test_fit_raise_if_num_clusters_larger_than_num_points_random_init(self):
points = np.array([[2.0, 3.0], [1.6, 8.2]], dtype=np.float32)
with self.assertRaisesOpError('less'):
kmeans = learn.KMeansClustering(
num_clusters=3,
use_mini_batch=self.use_mini_batch,
mini_batch_steps_per_iteration=self.mini_batch_steps_per_iteration,
initial_clusters=kmeans_lib.KMeansClustering.RANDOM_INIT)
kmeans.fit(input_fn=lambda: (constant_op.constant(points), None),
steps=10)
def test_fit_raise_if_num_clusters_larger_than_num_points_kmeans_plus_plus(
self):
points = np.array([[2.0, 3.0], [1.6, 8.2]], dtype=np.float32)
with self.assertRaisesOpError(AssertionError):
kmeans = learn.KMeansClustering(
num_clusters=3,
use_mini_batch=self.use_mini_batch,
mini_batch_steps_per_iteration=self.mini_batch_steps_per_iteration,
initial_clusters=kmeans_lib.KMeansClustering.KMEANS_PLUS_PLUS_INIT)
kmeans.fit(input_fn=lambda: (constant_op.constant(points), None),
steps=10)
class MiniBatchKMeansTest(KMeansTest):
@property
def batch_size(self):
return 50
@property
def use_mini_batch(self):
return True
class FullBatchAsyncKMeansTest(KMeansTest):
@property
def batch_size(self):
return 50
@property
def use_mini_batch(self):
return True
@property
def mini_batch_steps_per_iteration(self):
return self.num_points // self.batch_size
class KMeansCosineDistanceTest(KMeansTestBase):
def setUp(self):
self.points = np.array(
[[2.5, 0.1], [2, 0.2], [3, 0.1], [4, 0.2], [0.1, 2.5], [0.2, 2],
[0.1, 3], [0.2, 4]],
dtype=np.float32)
self.num_points = self.points.shape[0]
self.true_centers = np.array(
[
normalize(
np.mean(
normalize(self.points)[0:4, :], axis=0, keepdims=True))[0],
normalize(
np.mean(
normalize(self.points)[4:, :], axis=0, keepdims=True))[0]
],
dtype=np.float32)
self.true_assignments = np.array([0] * 4 + [1] * 4)
self.true_score = len(self.points) - np.tensordot(
normalize(self.points), self.true_centers[self.true_assignments])
self.num_centers = 2
self.kmeans = kmeans_lib.KMeansClustering(
self.num_centers,
initial_clusters=kmeans_lib.KMeansClustering.RANDOM_INIT,
distance_metric=kmeans_lib.KMeansClustering.COSINE_DISTANCE,
use_mini_batch=self.use_mini_batch,
mini_batch_steps_per_iteration=self.mini_batch_steps_per_iteration,
config=self.config(3))
def test_fit(self):
max_steps = 10 * self.num_points // self.batch_size
self.kmeans.fit(input_fn=self.input_fn(), max_steps=max_steps)
centers = normalize(self.kmeans.clusters())
centers = centers[centers[:, 0].argsort()]
true_centers = self.true_centers[self.true_centers[:, 0].argsort()]
self.assertAllClose(centers, true_centers, atol=0.04)
def test_transform(self):
self.kmeans.fit(input_fn=self.input_fn(), steps=10)
centers = normalize(self.kmeans.clusters())
true_transform = 1 - cosine_similarity(self.points, centers)
transform = self.kmeans.transform(input_fn=self.input_fn(
batch_size=self.num_points))
self.assertAllClose(transform, true_transform, atol=1e-3)
def test_predict(self):
max_steps = 10 * self.num_points // self.batch_size
self.kmeans.fit(input_fn=self.input_fn(), max_steps=max_steps)
centers = normalize(self.kmeans.clusters())
assignments = list(self.kmeans.predict_cluster_idx(
input_fn=self.input_fn(num_epochs=1, batch_size=self.num_points)))
self.assertAllClose(
centers[assignments],
self.true_centers[self.true_assignments],
atol=1e-2)
centers = centers[centers[:, 0].argsort()]
true_centers = self.true_centers[self.true_centers[:, 0].argsort()]
self.assertAllClose(centers, true_centers, atol=0.04)
score = self.kmeans.score(input_fn=self.input_fn(
batch_size=self.num_points), steps=1)
self.assertAllClose(score, self.true_score, atol=1e-2)
def test_predict_kmeans_plus_plus(self):
# Most points are concetrated near one center. KMeans++ is likely to find
# the less populated centers.
points = np.array(
[[2.5, 3.5], [2.5, 3.5], [-2, 3], [-2, 3], [-3, -3], [-3.1, -3.2],
[-2.8, -3.], [-2.9, -3.1], [-3., -3.1], [-3., -3.1], [-3.2, -3.],
[-3., -3.]],
dtype=np.float32)
true_centers = np.array(
[
normalize(
np.mean(
normalize(points)[0:2, :], axis=0, keepdims=True))[0],
normalize(
np.mean(
normalize(points)[2:4, :], axis=0, keepdims=True))[0],
normalize(np.mean(
normalize(points)[4:, :], axis=0, keepdims=True))[0]
],
dtype=np.float32)
true_assignments = [0] * 2 + [1] * 2 + [2] * 8
true_score = len(points) - np.tensordot(
normalize(points), true_centers[true_assignments])
kmeans = kmeans_lib.KMeansClustering(
3,
initial_clusters=kmeans_lib.KMeansClustering.KMEANS_PLUS_PLUS_INIT,
distance_metric=kmeans_lib.KMeansClustering.COSINE_DISTANCE,
use_mini_batch=self.use_mini_batch,
mini_batch_steps_per_iteration=self.mini_batch_steps_per_iteration,
config=self.config(3))
kmeans.fit(input_fn=lambda: (constant_op.constant(points), None), steps=30)
centers = normalize(kmeans.clusters())
self.assertAllClose(
sorted(centers.tolist()), sorted(true_centers.tolist()), atol=1e-2)
def _input_fn():
return (
input_lib.limit_epochs(constant_op.constant(points), num_epochs=1),
None)
assignments = list(kmeans.predict_cluster_idx(input_fn=_input_fn))
self.assertAllClose(
centers[assignments], true_centers[true_assignments], atol=1e-2)
score = kmeans.score(
input_fn=lambda: (constant_op.constant(points), None), steps=1)
self.assertAllClose(score, true_score, atol=1e-2)
class MiniBatchKMeansCosineTest(KMeansCosineDistanceTest):
@property
def batch_size(self):
return 2
@property
def use_mini_batch(self):
return True
class FullBatchAsyncKMeansCosineTest(KMeansCosineDistanceTest):
@property
def batch_size(self):
return 2
@property
def use_mini_batch(self):
return True
@property
def mini_batch_steps_per_iteration(self):
return self.num_points // self.batch_size
class KMeansBenchmark(benchmark.Benchmark):
"""Base class for benchmarks."""
def SetUp(self,
dimension=50,
num_clusters=50,
points_per_cluster=10000,
center_norm=500,
cluster_width=20):
np.random.seed(123456)
self.num_clusters = num_clusters
self.num_points = num_clusters * points_per_cluster
self.centers = make_random_centers(
self.num_clusters, dimension, center_norm=center_norm)
self.points, _, scores = make_random_points(
self.centers, self.num_points, max_offset=cluster_width)
self.score = float(np.sum(scores))
def _report(self, num_iters, start, end, scores):
print(scores)
self.report_benchmark(
iters=num_iters,
wall_time=(end - start) / num_iters,
extras={'true_sum_squared_distances': self.score,
'fit_scores': scores})
def _fit(self, num_iters=10):
pass
def benchmark_01_2dim_5center_500point(self):
self.SetUp(dimension=2, num_clusters=5, points_per_cluster=100)
self._fit()
def benchmark_02_20dim_20center_10kpoint(self):
self.SetUp(dimension=20, num_clusters=20, points_per_cluster=500)
self._fit()
def benchmark_03_100dim_50center_50kpoint(self):
self.SetUp(dimension=100, num_clusters=50, points_per_cluster=1000)
self._fit()
def benchmark_03_100dim_50center_50kpoint_unseparated(self):
self.SetUp(
dimension=100,
num_clusters=50,
points_per_cluster=1000,
cluster_width=250)
self._fit()
def benchmark_04_100dim_500center_500kpoint(self):
self.SetUp(dimension=100, num_clusters=500, points_per_cluster=1000)
self._fit(num_iters=4)
def benchmark_05_100dim_500center_500kpoint_unseparated(self):
self.SetUp(
dimension=100,
num_clusters=500,
points_per_cluster=1000,
cluster_width=250)
self._fit(num_iters=4)
class TensorflowKMeansBenchmark(KMeansBenchmark):
def _fit(self, num_iters=10):
scores = []
start = time.time()
for i in range(num_iters):
print('Starting tensorflow KMeans: %d' % i)
tf_kmeans = kmeans_lib.KMeansClustering(
self.num_clusters,
initial_clusters=kmeans_lib.KMeansClustering.KMEANS_PLUS_PLUS_INIT,
kmeans_plus_plus_num_retries=int(math.log(self.num_clusters) + 2),
random_seed=i * 42,
relative_tolerance=1e-6,
config=run_config.RunConfig(tf_random_seed=3))
tf_kmeans.fit(input_fn=lambda: (constant_op.constant(self.points), None),
steps=50)
_ = tf_kmeans.clusters()
scores.append(
tf_kmeans.score(
input_fn=lambda: (constant_op.constant(self.points), None),
steps=1))
self._report(num_iters, start, time.time(), scores)
class SklearnKMeansBenchmark(KMeansBenchmark):
def _fit(self, num_iters=10):
scores = []
start = time.time()
for i in range(num_iters):
print('Starting sklearn KMeans: %d' % i)
sklearn_kmeans = SklearnKMeans(
n_clusters=self.num_clusters,
init='k-means++',
max_iter=50,
n_init=1,
tol=1e-4,
random_state=i * 42)
sklearn_kmeans.fit(self.points)
scores.append(sklearn_kmeans.inertia_)
self._report(num_iters, start, time.time(), scores)
class KMeansTestQueues(test.TestCase):
def input_fn(self):
def _fn():
queue = data_flow_ops.FIFOQueue(capacity=10,
dtypes=dtypes.float32,
shapes=[10, 3])
enqueue_op = queue.enqueue(array_ops.zeros([10, 3], dtype=dtypes.float32))
queue_runner.add_queue_runner(queue_runner.QueueRunner(queue,
[enqueue_op]))
return queue.dequeue(), None
return _fn
# This test makes sure that there are no deadlocks when using a QueueRunner.
# Note that since cluster initialization is dependendent on inputs, if input
# is generated using a QueueRunner, one has to make sure that these runners
# are started before the initialization.
def test_queues(self):
kmeans = kmeans_lib.KMeansClustering(5)
kmeans.fit(input_fn=self.input_fn(), steps=1)
if __name__ == '__main__':
test.main()
| mit |
shikhardb/scikit-learn | examples/tree/plot_tree_regression.py | 40 | 1470 | """
===================================================================
Decision Tree Regression
===================================================================
A 1D regression with decision tree.
The :ref:`decision trees <tree>` is
used to fit a sine curve with addition noisy observation. As a result, it
learns local linear regressions approximating the sine curve.
We can see that if the maximum depth of the tree (controlled by the
`max_depth` parameter) is set too high, the decision trees learn too fine
details of the training data and learn from the noise, i.e. they overfit.
"""
print(__doc__)
# Import the necessary modules and libraries
import numpy as np
from sklearn.tree import DecisionTreeRegressor
import matplotlib.pyplot as plt
# Create a random dataset
rng = np.random.RandomState(1)
X = np.sort(5 * rng.rand(80, 1), axis=0)
y = np.sin(X).ravel()
y[::5] += 3 * (0.5 - rng.rand(16))
# Fit regression model
clf_1 = DecisionTreeRegressor(max_depth=2)
clf_2 = DecisionTreeRegressor(max_depth=5)
clf_1.fit(X, y)
clf_2.fit(X, y)
# Predict
X_test = np.arange(0.0, 5.0, 0.01)[:, np.newaxis]
y_1 = clf_1.predict(X_test)
y_2 = clf_2.predict(X_test)
# Plot the results
plt.figure()
plt.scatter(X, y, c="k", label="data")
plt.plot(X_test, y_1, c="g", label="max_depth=2", linewidth=2)
plt.plot(X_test, y_2, c="r", label="max_depth=5", linewidth=2)
plt.xlabel("data")
plt.ylabel("target")
plt.title("Decision Tree Regression")
plt.legend()
plt.show()
| bsd-3-clause |
gfyoung/pandas | pandas/tests/arrays/boolean/test_construction.py | 6 | 12857 | import numpy as np
import pytest
import pandas as pd
import pandas._testing as tm
from pandas.arrays import BooleanArray
from pandas.core.arrays.boolean import coerce_to_array
def test_boolean_array_constructor():
values = np.array([True, False, True, False], dtype="bool")
mask = np.array([False, False, False, True], dtype="bool")
result = BooleanArray(values, mask)
expected = pd.array([True, False, True, None], dtype="boolean")
tm.assert_extension_array_equal(result, expected)
with pytest.raises(TypeError, match="values should be boolean numpy array"):
BooleanArray(values.tolist(), mask)
with pytest.raises(TypeError, match="mask should be boolean numpy array"):
BooleanArray(values, mask.tolist())
with pytest.raises(TypeError, match="values should be boolean numpy array"):
BooleanArray(values.astype(int), mask)
with pytest.raises(TypeError, match="mask should be boolean numpy array"):
BooleanArray(values, None)
with pytest.raises(ValueError, match="values must be a 1D array"):
BooleanArray(values.reshape(1, -1), mask)
with pytest.raises(ValueError, match="mask must be a 1D array"):
BooleanArray(values, mask.reshape(1, -1))
def test_boolean_array_constructor_copy():
values = np.array([True, False, True, False], dtype="bool")
mask = np.array([False, False, False, True], dtype="bool")
result = BooleanArray(values, mask)
assert result._data is values
assert result._mask is mask
result = BooleanArray(values, mask, copy=True)
assert result._data is not values
assert result._mask is not mask
def test_to_boolean_array():
expected = BooleanArray(
np.array([True, False, True]), np.array([False, False, False])
)
result = pd.array([True, False, True], dtype="boolean")
tm.assert_extension_array_equal(result, expected)
result = pd.array(np.array([True, False, True]), dtype="boolean")
tm.assert_extension_array_equal(result, expected)
result = pd.array(np.array([True, False, True], dtype=object), dtype="boolean")
tm.assert_extension_array_equal(result, expected)
# with missing values
expected = BooleanArray(
np.array([True, False, True]), np.array([False, False, True])
)
result = pd.array([True, False, None], dtype="boolean")
tm.assert_extension_array_equal(result, expected)
result = pd.array(np.array([True, False, None], dtype=object), dtype="boolean")
tm.assert_extension_array_equal(result, expected)
def test_to_boolean_array_all_none():
expected = BooleanArray(np.array([True, True, True]), np.array([True, True, True]))
result = pd.array([None, None, None], dtype="boolean")
tm.assert_extension_array_equal(result, expected)
result = pd.array(np.array([None, None, None], dtype=object), dtype="boolean")
tm.assert_extension_array_equal(result, expected)
@pytest.mark.parametrize(
"a, b",
[
([True, False, None, np.nan, pd.NA], [True, False, None, None, None]),
([True, np.nan], [True, None]),
([True, pd.NA], [True, None]),
([np.nan, np.nan], [None, None]),
(np.array([np.nan, np.nan], dtype=float), [None, None]),
],
)
def test_to_boolean_array_missing_indicators(a, b):
result = pd.array(a, dtype="boolean")
expected = pd.array(b, dtype="boolean")
tm.assert_extension_array_equal(result, expected)
@pytest.mark.parametrize(
"values",
[
["foo", "bar"],
["1", "2"],
# "foo",
[1, 2],
[1.0, 2.0],
pd.date_range("20130101", periods=2),
np.array(["foo"]),
np.array([1, 2]),
np.array([1.0, 2.0]),
[np.nan, {"a": 1}],
],
)
def test_to_boolean_array_error(values):
# error in converting existing arrays to BooleanArray
msg = "Need to pass bool-like value"
with pytest.raises(TypeError, match=msg):
pd.array(values, dtype="boolean")
def test_to_boolean_array_from_integer_array():
result = pd.array(np.array([1, 0, 1, 0]), dtype="boolean")
expected = pd.array([True, False, True, False], dtype="boolean")
tm.assert_extension_array_equal(result, expected)
# with missing values
result = pd.array(np.array([1, 0, 1, None]), dtype="boolean")
expected = pd.array([True, False, True, None], dtype="boolean")
tm.assert_extension_array_equal(result, expected)
def test_to_boolean_array_from_float_array():
result = pd.array(np.array([1.0, 0.0, 1.0, 0.0]), dtype="boolean")
expected = pd.array([True, False, True, False], dtype="boolean")
tm.assert_extension_array_equal(result, expected)
# with missing values
result = pd.array(np.array([1.0, 0.0, 1.0, np.nan]), dtype="boolean")
expected = pd.array([True, False, True, None], dtype="boolean")
tm.assert_extension_array_equal(result, expected)
def test_to_boolean_array_integer_like():
# integers of 0's and 1's
result = pd.array([1, 0, 1, 0], dtype="boolean")
expected = pd.array([True, False, True, False], dtype="boolean")
tm.assert_extension_array_equal(result, expected)
# with missing values
result = pd.array([1, 0, 1, None], dtype="boolean")
expected = pd.array([True, False, True, None], dtype="boolean")
tm.assert_extension_array_equal(result, expected)
def test_coerce_to_array():
# TODO this is currently not public API
values = np.array([True, False, True, False], dtype="bool")
mask = np.array([False, False, False, True], dtype="bool")
result = BooleanArray(*coerce_to_array(values, mask=mask))
expected = BooleanArray(values, mask)
tm.assert_extension_array_equal(result, expected)
assert result._data is values
assert result._mask is mask
result = BooleanArray(*coerce_to_array(values, mask=mask, copy=True))
expected = BooleanArray(values, mask)
tm.assert_extension_array_equal(result, expected)
assert result._data is not values
assert result._mask is not mask
# mixed missing from values and mask
values = [True, False, None, False]
mask = np.array([False, False, False, True], dtype="bool")
result = BooleanArray(*coerce_to_array(values, mask=mask))
expected = BooleanArray(
np.array([True, False, True, True]), np.array([False, False, True, True])
)
tm.assert_extension_array_equal(result, expected)
result = BooleanArray(*coerce_to_array(np.array(values, dtype=object), mask=mask))
tm.assert_extension_array_equal(result, expected)
result = BooleanArray(*coerce_to_array(values, mask=mask.tolist()))
tm.assert_extension_array_equal(result, expected)
# raise errors for wrong dimension
values = np.array([True, False, True, False], dtype="bool")
mask = np.array([False, False, False, True], dtype="bool")
with pytest.raises(ValueError, match="values must be a 1D list-like"):
coerce_to_array(values.reshape(1, -1))
with pytest.raises(ValueError, match="mask must be a 1D list-like"):
coerce_to_array(values, mask=mask.reshape(1, -1))
def test_coerce_to_array_from_boolean_array():
# passing BooleanArray to coerce_to_array
values = np.array([True, False, True, False], dtype="bool")
mask = np.array([False, False, False, True], dtype="bool")
arr = BooleanArray(values, mask)
result = BooleanArray(*coerce_to_array(arr))
tm.assert_extension_array_equal(result, arr)
# no copy
assert result._data is arr._data
assert result._mask is arr._mask
result = BooleanArray(*coerce_to_array(arr), copy=True)
tm.assert_extension_array_equal(result, arr)
assert result._data is not arr._data
assert result._mask is not arr._mask
with pytest.raises(ValueError, match="cannot pass mask for BooleanArray input"):
coerce_to_array(arr, mask=mask)
def test_coerce_to_numpy_array():
# with missing values -> object dtype
arr = pd.array([True, False, None], dtype="boolean")
result = np.array(arr)
expected = np.array([True, False, pd.NA], dtype="object")
tm.assert_numpy_array_equal(result, expected)
# also with no missing values -> object dtype
arr = pd.array([True, False, True], dtype="boolean")
result = np.array(arr)
expected = np.array([True, False, True], dtype="object")
tm.assert_numpy_array_equal(result, expected)
# force bool dtype
result = np.array(arr, dtype="bool")
expected = np.array([True, False, True], dtype="bool")
tm.assert_numpy_array_equal(result, expected)
# with missing values will raise error
arr = pd.array([True, False, None], dtype="boolean")
msg = (
"cannot convert to 'bool'-dtype NumPy array with missing values. "
"Specify an appropriate 'na_value' for this dtype."
)
with pytest.raises(ValueError, match=msg):
np.array(arr, dtype="bool")
def test_to_boolean_array_from_strings():
result = BooleanArray._from_sequence_of_strings(
np.array(["True", "False", "1", "1.0", "0", "0.0", np.nan], dtype=object)
)
expected = BooleanArray(
np.array([True, False, True, True, False, False, False]),
np.array([False, False, False, False, False, False, True]),
)
tm.assert_extension_array_equal(result, expected)
def test_to_boolean_array_from_strings_invalid_string():
with pytest.raises(ValueError, match="cannot be cast"):
BooleanArray._from_sequence_of_strings(["donkey"])
@pytest.mark.parametrize("box", [True, False], ids=["series", "array"])
def test_to_numpy(box):
con = pd.Series if box else pd.array
# default (with or without missing values) -> object dtype
arr = con([True, False, True], dtype="boolean")
result = arr.to_numpy()
expected = np.array([True, False, True], dtype="object")
tm.assert_numpy_array_equal(result, expected)
arr = con([True, False, None], dtype="boolean")
result = arr.to_numpy()
expected = np.array([True, False, pd.NA], dtype="object")
tm.assert_numpy_array_equal(result, expected)
arr = con([True, False, None], dtype="boolean")
result = arr.to_numpy(dtype="str")
expected = np.array([True, False, pd.NA], dtype="<U5")
tm.assert_numpy_array_equal(result, expected)
# no missing values -> can convert to bool, otherwise raises
arr = con([True, False, True], dtype="boolean")
result = arr.to_numpy(dtype="bool")
expected = np.array([True, False, True], dtype="bool")
tm.assert_numpy_array_equal(result, expected)
arr = con([True, False, None], dtype="boolean")
with pytest.raises(ValueError, match="cannot convert to 'bool'-dtype"):
result = arr.to_numpy(dtype="bool")
# specify dtype and na_value
arr = con([True, False, None], dtype="boolean")
result = arr.to_numpy(dtype=object, na_value=None)
expected = np.array([True, False, None], dtype="object")
tm.assert_numpy_array_equal(result, expected)
result = arr.to_numpy(dtype=bool, na_value=False)
expected = np.array([True, False, False], dtype="bool")
tm.assert_numpy_array_equal(result, expected)
result = arr.to_numpy(dtype="int64", na_value=-99)
expected = np.array([1, 0, -99], dtype="int64")
tm.assert_numpy_array_equal(result, expected)
result = arr.to_numpy(dtype="float64", na_value=np.nan)
expected = np.array([1, 0, np.nan], dtype="float64")
tm.assert_numpy_array_equal(result, expected)
# converting to int or float without specifying na_value raises
with pytest.raises(ValueError, match="cannot convert to 'int64'-dtype"):
arr.to_numpy(dtype="int64")
with pytest.raises(ValueError, match="cannot convert to 'float64'-dtype"):
arr.to_numpy(dtype="float64")
def test_to_numpy_copy():
# to_numpy can be zero-copy if no missing values
arr = pd.array([True, False, True], dtype="boolean")
result = arr.to_numpy(dtype=bool)
result[0] = False
tm.assert_extension_array_equal(
arr, pd.array([False, False, True], dtype="boolean")
)
arr = pd.array([True, False, True], dtype="boolean")
result = arr.to_numpy(dtype=bool, copy=True)
result[0] = False
tm.assert_extension_array_equal(arr, pd.array([True, False, True], dtype="boolean"))
# FIXME: don't leave commented out
# TODO when BooleanArray coerces to object dtype numpy array, need to do conversion
# manually in the indexing code
# def test_indexing_boolean_mask():
# arr = pd.array([1, 2, 3, 4], dtype="Int64")
# mask = pd.array([True, False, True, False], dtype="boolean")
# result = arr[mask]
# expected = pd.array([1, 3], dtype="Int64")
# tm.assert_extension_array_equal(result, expected)
# # missing values -> error
# mask = pd.array([True, False, True, None], dtype="boolean")
# with pytest.raises(IndexError):
# result = arr[mask]
| bsd-3-clause |
sandeepdsouza93/TensorFlow-15712 | tensorflow/contrib/learn/python/learn/experiment.py | 5 | 16349 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Experiment class collecting information needed for a single training run."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import contextlib
import math
import time
from tensorflow.contrib.framework import deprecated
from tensorflow.contrib.framework import deprecated_arg_values
from tensorflow.contrib.learn.python.learn import evaluable
from tensorflow.contrib.learn.python.learn import monitors
from tensorflow.contrib.learn.python.learn import trainable
from tensorflow.contrib.learn.python.learn.estimators import run_config
from tensorflow.contrib.learn.python.learn.estimators._sklearn import NotFittedError
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.training import basic_session_run_hooks
from tensorflow.python.training import server_lib
__all__ = ["Experiment"]
class Experiment(object):
"""Experiment is a class containing all information needed to train a model.
After an experiment is created (by passing an Estimator and inputs for
training and evaluation), an Experiment instance knows how to invoke training
and eval loops in a sensible fashion for distributed training.
"""
# TODO(ispir): remove delay_workers_by_global_step and make global step based
# waiting as only behaviour.
@deprecated_arg_values(
"2016-10-23",
"local_eval_frequency is deprecated as local_run will be renamed to "
"train_and_evaluate. Use min_eval_frequency and call train_and_evaluate "
"instead. Note, however, that the default for min_eval_frequency is 1, "
"meaning models will be evaluated every time a new checkpoint is "
"available. In contrast, the default for local_eval_frequency is None, "
"resulting in evaluation occurring only after training has completed. "
"min_eval_frequency is ignored when calling the deprecated local_run.",
local_eval_frequency=None)
def __init__(self,
estimator,
train_input_fn,
eval_input_fn,
eval_metrics=None,
train_steps=None,
eval_steps=100,
train_monitors=None,
local_eval_frequency=None,
eval_delay_secs=120,
continuous_eval_throttle_secs=60,
min_eval_frequency=1,
delay_workers_by_global_step=False):
"""Constructor for `Experiment`.
Creates an Experiment instance. None of the functions passed to this
constructor are executed at construction time. They are stored and used
when a method is executed which requires it.
Args:
estimator: Object implementing `Trainable` and `Evaluable`.
train_input_fn: function, returns features and labels for training.
eval_input_fn: function, returns features and labels for evaluation. If
`eval_steps` is `None`, this should be configured only to produce for a
finite number of batches (generally, 1 epoch over the evaluation data).
eval_metrics: `dict` of string, metric function. If `None`, default set
is used.
train_steps: Perform this many steps of training. `None`, the default,
means train forever.
eval_steps: `evaluate` runs until input is exhausted (or another exception
is raised), or for `eval_steps` steps, if specified.
train_monitors: A list of monitors to pass to the `Estimator`'s `fit`
function.
local_eval_frequency: Frequency of running eval in steps,
when running locally. If `None`, runs evaluation only at the end of
training.
eval_delay_secs: Start evaluating after waiting for this many seconds.
continuous_eval_throttle_secs: Do not re-evaluate unless the last
evaluation was started at least this many seconds ago for
continuous_eval().
min_eval_frequency: (applies only to train_and_evaluate). the minimum
number of steps between evaluations. Of course, evaluation does not
occur if no new snapshot is available, hence, this is the minimum.
delay_workers_by_global_step: if `True` delays training workers
based on global step instead of time.
Raises:
ValueError: if `estimator` does not implement `Evaluable` and `Trainable`.
"""
if not isinstance(estimator, evaluable.Evaluable):
raise ValueError("`estimator` must implement `Evaluable`.")
if not isinstance(estimator, trainable.Trainable):
raise ValueError("`estimator` must implement `Trainable`.")
super(Experiment, self).__init__()
self._estimator = estimator
self._train_input_fn = train_input_fn
self._eval_input_fn = eval_input_fn
self._eval_metrics = eval_metrics
self._train_steps = train_steps
self._eval_steps = eval_steps
self._train_monitors = train_monitors or []
self._local_eval_frequency = local_eval_frequency
self._eval_delay_secs = eval_delay_secs
self._continuous_eval_throttle_secs = continuous_eval_throttle_secs
self._min_eval_frequency = min_eval_frequency
self._delay_workers_by_global_step = delay_workers_by_global_step
@property
def estimator(self):
return self._estimator
def train(self, delay_secs=None):
"""Fit the estimator using the training data.
Train the estimator for `self._train_steps` steps, after waiting for
`delay_secs` seconds. If `self._train_steps` is `None`, train forever.
Args:
delay_secs: Start training after this many seconds.
Returns:
The trained estimator.
"""
start = time.time()
# Start the server, if needed. It's important to start the server before
# we (optionally) sleep for the case where no device_filters are set.
# Otherwise, the servers will wait to connect to each other before starting
# to train. We might as well start as soon as we can.
config = self._estimator.config
if (config.environment != run_config.Environment.LOCAL and
config.environment != run_config.Environment.GOOGLE and
config.cluster_spec and config.master):
self._start_server()
extra_hooks = []
if delay_secs is None:
task_id = self._estimator.config.task_id or 0
if self._delay_workers_by_global_step:
# Wait 5500 global steps for the second worker. Each worker waits more
# then previous one but with a diminishing number of steps.
extra_hooks.append(
basic_session_run_hooks.GlobalStepWaiterHook(
int(8000.0 * math.log(task_id + 1))))
delay_secs = 0
else:
# Wait 5 secs more for each new worker up to 60 secs.
delay_secs = min(60, task_id * 5)
if delay_secs > 0:
elapsed_secs = time.time() - start
remaining = delay_secs - elapsed_secs
logging.info("Waiting %d secs before starting training.", remaining)
time.sleep(delay_secs)
return self._estimator.fit(input_fn=self._train_input_fn,
max_steps=self._train_steps,
monitors=self._train_monitors + extra_hooks)
def evaluate(self, delay_secs=None):
"""Evaluate on the evaluation data.
Runs evaluation on the evaluation data and returns the result. Runs for
`self._eval_steps` steps, or if it's `None`, then run until input is
exhausted or another exception is raised. Start the evaluation after
`delay_secs` seconds, or if it's `None`, defaults to using
`self._eval_delay_secs` seconds.
Args:
delay_secs: Start evaluating after this many seconds. If `None`, defaults
to using `self._eval_delays_secs`.
Returns:
The result of the `evaluate` call to the `Estimator`.
"""
if delay_secs is None:
delay_secs = self._eval_delay_secs
if delay_secs:
logging.info("Waiting %d secs before starting eval.", delay_secs)
time.sleep(delay_secs)
return self._estimator.evaluate(input_fn=self._eval_input_fn,
steps=self._eval_steps,
metrics=self._eval_metrics,
name="one_pass")
@deprecated(
"2016-10-23",
"local_run will be renamed to train_and_evaluate and the new default "
"behavior will be to run evaluation every time there is a new "
"checkpoint.")
def local_run(self):
with _new_attr_context(self, "_min_eval_frequency"):
self._min_eval_frequency = self._local_eval_frequency
return self.train_and_evaluate()
def _continuous_eval(self,
input_fn,
name,
delay_secs,
throttle_delay_secs):
"""Run continuous eval.
Runs infinite eval on the evaluation data set. This function starts
evaluating after `delay_secs` seconds and then runs no more than one
evaluation (with `self._eval_steps` steps each time) per
`throttle_delay_secs`. It never returns.
Args:
input_fn: The input to use for this eval.
name: A string appended to the folder name of evaluation results.
delay_secs: Start evaluating after this many seconds. If None, defaults to
self._eval_delay_secs.
throttle_delay_secs: Do not re-evaluate unless the last evaluation was
started at least this many seconds ago. If None, defaults to
self._continuous_eval_throttle_secs.
"""
if delay_secs is None:
delay_secs = self._eval_delay_secs
if throttle_delay_secs is None:
throttle_delay_secs = self._continuous_eval_throttle_secs
if delay_secs:
logging.info("Waiting %f secs before starting eval.", delay_secs)
time.sleep(delay_secs)
last_fitted_error_time = 0
while True:
start = time.time()
try:
self._estimator.evaluate(input_fn=input_fn,
steps=self._eval_steps,
metrics=self._eval_metrics,
name=name)
except NotFittedError:
# Print warning message every 10 mins.
if time.time() - last_fitted_error_time > 600:
logging.warning(
"Estimator is not fitted yet. "
"Will start an evaluation when a checkpoint will be ready.")
last_fitted_error_time = time.time()
duration = time.time() - start
if duration < throttle_delay_secs:
difference = throttle_delay_secs - duration
logging.info("Waiting %f secs before starting next eval run.",
difference)
time.sleep(difference)
def continuous_eval(self, delay_secs=None, throttle_delay_secs=None):
self._continuous_eval(self._eval_input_fn,
name="continuous",
delay_secs=delay_secs,
throttle_delay_secs=throttle_delay_secs)
def continuous_eval_on_train_data(self,
delay_secs=None,
throttle_delay_secs=None):
self._continuous_eval(self._train_input_fn,
name="continuous_on_train_data",
delay_secs=delay_secs,
throttle_delay_secs=throttle_delay_secs)
def train_and_evaluate(self):
"""Interleaves training and evaluation.
The frequency of evaluation is controlled by the contructor arg
`min_eval_frequency`. When this parameter is None or 0, evaluation happens
only after training has completed. Note that evaluation cannot happen
more frequently than checkpoints are taken. If no new snapshots are
available when evaluation is supposed to occur, then evaluation doesn't
happen for another `min_eval_frequency` steps (assuming a checkpoint is
available at that point). Thus, settings `min_eval_frequency` to 1 means
that the model will be evaluated everytime there is a new checkpoint.
This is particular useful for a "Master" task in the cloud, whose
responsibility it is to take checkpoints, evaluate those checkpoints,
and write out summaries. Participating in training as the supervisor
allows such a task to accomplish the first and last items, while
performing evaluation allows for the second.
Returns:
The result of the `evaluate` call to the `Estimator`.
"""
# The directory to which evaluation summaries are written are determined
# by adding a suffix to 'eval'; that suffix is the 'name' parameter to
# the various evaluate(...) methods. By setting it to None, we force
# the directory name to simply be 'eval'.
eval_dir_suffix = None
# We set every_n_steps to 1, but evaluation only occurs when a new
# snapshot is available. If, by the time we finish evaluation
# there is a new snapshot, then we just evaluate again. Otherwise,
# we keep training until one becomes available.
with _new_attr_context(self, "_train_monitors"):
self._train_monitors = self._train_monitors or []
if self._min_eval_frequency:
self._train_monitors += [monitors.ValidationMonitor(
input_fn=self._eval_input_fn, eval_steps=self._eval_steps,
metrics=self._eval_metrics, every_n_steps=self._min_eval_frequency,
name=eval_dir_suffix,
)]
self.train(delay_secs=0)
return self._estimator.evaluate(input_fn=self._eval_input_fn,
steps=self._eval_steps,
metrics=self._eval_metrics,
name=eval_dir_suffix)
def run_std_server(self):
"""Starts a TensorFlow server and joins the serving thread.
Typically used for parameter servers.
Raises:
ValueError: if not enough information is available in the estimator's
config to create a server.
"""
self._start_server().join()
def test(self):
"""Tests training and evaluating the estimator both for a single step.
Returns:
The result of the `evaluate` call to the `Estimator`.
"""
self._estimator.fit(input_fn=self._train_input_fn,
steps=1,
monitors=self._train_monitors)
return self._estimator.evaluate(input_fn=self._eval_input_fn,
steps=1,
metrics=self._eval_metrics,
name="one_pass")
def _start_server(self):
"""Creates, starts, and returns a server_lib.Server."""
config = self._estimator.config
if (not config.cluster_spec or not config.task_type or not config.master or
config.task_id is None):
raise ValueError("Could not start server; be sure to specify "
"cluster_spec, task_type, master, and task in "
"RunConfig or set the TF_CONFIG environment variable.")
server = server_lib.Server(
config.cluster_spec,
job_name=config.task_type,
task_index=config.task_id,
config=config.tf_config,
start=False)
server.start()
return server
@contextlib.contextmanager
def _new_attr_context(obj, attr):
"""Creates a new context in which an object's attribute can be changed.
This creates a context in which an object's attribute can be changed.
Once the context is exited, the attribute reverts to its original value.
Example usage:
my_obj.x = 1
with _new_attr_context(my_obj, "x"):
my_obj.x = 2
print(my_obj.x)
print(my_obj.x)
"""
saved = getattr(obj, attr)
try:
yield
finally:
setattr(obj, attr, saved)
| apache-2.0 |
aflaxman/scikit-learn | sklearn/feature_extraction/dict_vectorizer.py | 16 | 12486 | # Authors: Lars Buitinck
# Dan Blanchard <dblanchard@ets.org>
# License: BSD 3 clause
from array import array
from collections import Mapping
from operator import itemgetter
import numpy as np
import scipy.sparse as sp
from ..base import BaseEstimator, TransformerMixin
from ..externals import six
from ..externals.six.moves import xrange
from ..utils import check_array, tosequence
def _tosequence(X):
"""Turn X into a sequence or ndarray, avoiding a copy if possible."""
if isinstance(X, Mapping): # single sample
return [X]
else:
return tosequence(X)
class DictVectorizer(BaseEstimator, TransformerMixin):
"""Transforms lists of feature-value mappings to vectors.
This transformer turns lists of mappings (dict-like objects) of feature
names to feature values into Numpy arrays or scipy.sparse matrices for use
with scikit-learn estimators.
When feature values are strings, this transformer will do a binary one-hot
(aka one-of-K) coding: one boolean-valued feature is constructed for each
of the possible string values that the feature can take on. For instance,
a feature "f" that can take on the values "ham" and "spam" will become two
features in the output, one signifying "f=ham", the other "f=spam".
However, note that this transformer will only do a binary one-hot encoding
when feature values are of type string. If categorical features are
represented as numeric values such as int, the DictVectorizer can be
followed by OneHotEncoder to complete binary one-hot encoding.
Features that do not occur in a sample (mapping) will have a zero value
in the resulting array/matrix.
Read more in the :ref:`User Guide <dict_feature_extraction>`.
Parameters
----------
dtype : callable, optional
The type of feature values. Passed to Numpy array/scipy.sparse matrix
constructors as the dtype argument.
separator : string, optional
Separator string used when constructing new features for one-hot
coding.
sparse : boolean, optional.
Whether transform should produce scipy.sparse matrices.
True by default.
sort : boolean, optional.
Whether ``feature_names_`` and ``vocabulary_`` should be sorted when fitting.
True by default.
Attributes
----------
vocabulary_ : dict
A dictionary mapping feature names to feature indices.
feature_names_ : list
A list of length n_features containing the feature names (e.g., "f=ham"
and "f=spam").
Examples
--------
>>> from sklearn.feature_extraction import DictVectorizer
>>> v = DictVectorizer(sparse=False)
>>> D = [{'foo': 1, 'bar': 2}, {'foo': 3, 'baz': 1}]
>>> X = v.fit_transform(D)
>>> X
array([[ 2., 0., 1.],
[ 0., 1., 3.]])
>>> v.inverse_transform(X) == \
[{'bar': 2.0, 'foo': 1.0}, {'baz': 1.0, 'foo': 3.0}]
True
>>> v.transform({'foo': 4, 'unseen_feature': 3})
array([[ 0., 0., 4.]])
See also
--------
FeatureHasher : performs vectorization using only a hash function.
sklearn.preprocessing.OneHotEncoder : handles nominal/categorical features
encoded as columns of integers.
"""
def __init__(self, dtype=np.float64, separator="=", sparse=True,
sort=True):
self.dtype = dtype
self.separator = separator
self.sparse = sparse
self.sort = sort
def fit(self, X, y=None):
"""Learn a list of feature name -> indices mappings.
Parameters
----------
X : Mapping or iterable over Mappings
Dict(s) or Mapping(s) from feature names (arbitrary Python
objects) to feature values (strings or convertible to dtype).
y : (ignored)
Returns
-------
self
"""
feature_names = []
vocab = {}
for x in X:
for f, v in six.iteritems(x):
if isinstance(v, six.string_types):
f = "%s%s%s" % (f, self.separator, v)
if f not in vocab:
feature_names.append(f)
vocab[f] = len(vocab)
if self.sort:
feature_names.sort()
vocab = dict((f, i) for i, f in enumerate(feature_names))
self.feature_names_ = feature_names
self.vocabulary_ = vocab
return self
def _transform(self, X, fitting):
# Sanity check: Python's array has no way of explicitly requesting the
# signed 32-bit integers that scipy.sparse needs, so we use the next
# best thing: typecode "i" (int). However, if that gives larger or
# smaller integers than 32-bit ones, np.frombuffer screws up.
assert array("i").itemsize == 4, (
"sizeof(int) != 4 on your platform; please report this at"
" https://github.com/scikit-learn/scikit-learn/issues and"
" include the output from platform.platform() in your bug report")
dtype = self.dtype
if fitting:
feature_names = []
vocab = {}
else:
feature_names = self.feature_names_
vocab = self.vocabulary_
# Process everything as sparse regardless of setting
X = [X] if isinstance(X, Mapping) else X
indices = array("i")
indptr = array("i", [0])
# XXX we could change values to an array.array as well, but it
# would require (heuristic) conversion of dtype to typecode...
values = []
# collect all the possible feature names and build sparse matrix at
# same time
for x in X:
for f, v in six.iteritems(x):
if isinstance(v, six.string_types):
f = "%s%s%s" % (f, self.separator, v)
v = 1
if f in vocab:
indices.append(vocab[f])
values.append(dtype(v))
else:
if fitting:
feature_names.append(f)
vocab[f] = len(vocab)
indices.append(vocab[f])
values.append(dtype(v))
indptr.append(len(indices))
if len(indptr) == 1:
raise ValueError("Sample sequence X is empty.")
indices = np.frombuffer(indices, dtype=np.intc)
indptr = np.frombuffer(indptr, dtype=np.intc)
shape = (len(indptr) - 1, len(vocab))
result_matrix = sp.csr_matrix((values, indices, indptr),
shape=shape, dtype=dtype)
# Sort everything if asked
if fitting and self.sort:
feature_names.sort()
map_index = np.empty(len(feature_names), dtype=np.int32)
for new_val, f in enumerate(feature_names):
map_index[new_val] = vocab[f]
vocab[f] = new_val
result_matrix = result_matrix[:, map_index]
if self.sparse:
result_matrix.sort_indices()
else:
result_matrix = result_matrix.toarray()
if fitting:
self.feature_names_ = feature_names
self.vocabulary_ = vocab
return result_matrix
def fit_transform(self, X, y=None):
"""Learn a list of feature name -> indices mappings and transform X.
Like fit(X) followed by transform(X), but does not require
materializing X in memory.
Parameters
----------
X : Mapping or iterable over Mappings
Dict(s) or Mapping(s) from feature names (arbitrary Python
objects) to feature values (strings or convertible to dtype).
y : (ignored)
Returns
-------
Xa : {array, sparse matrix}
Feature vectors; always 2-d.
"""
return self._transform(X, fitting=True)
def inverse_transform(self, X, dict_type=dict):
"""Transform array or sparse matrix X back to feature mappings.
X must have been produced by this DictVectorizer's transform or
fit_transform method; it may only have passed through transformers
that preserve the number of features and their order.
In the case of one-hot/one-of-K coding, the constructed feature
names and values are returned rather than the original ones.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Sample matrix.
dict_type : callable, optional
Constructor for feature mappings. Must conform to the
collections.Mapping API.
Returns
-------
D : list of dict_type objects, length = n_samples
Feature mappings for the samples in X.
"""
# COO matrix is not subscriptable
X = check_array(X, accept_sparse=['csr', 'csc'])
n_samples = X.shape[0]
names = self.feature_names_
dicts = [dict_type() for _ in xrange(n_samples)]
if sp.issparse(X):
for i, j in zip(*X.nonzero()):
dicts[i][names[j]] = X[i, j]
else:
for i, d in enumerate(dicts):
for j, v in enumerate(X[i, :]):
if v != 0:
d[names[j]] = X[i, j]
return dicts
def transform(self, X):
"""Transform feature->value dicts to array or sparse matrix.
Named features not encountered during fit or fit_transform will be
silently ignored.
Parameters
----------
X : Mapping or iterable over Mappings, length = n_samples
Dict(s) or Mapping(s) from feature names (arbitrary Python
objects) to feature values (strings or convertible to dtype).
Returns
-------
Xa : {array, sparse matrix}
Feature vectors; always 2-d.
"""
if self.sparse:
return self._transform(X, fitting=False)
else:
dtype = self.dtype
vocab = self.vocabulary_
X = _tosequence(X)
Xa = np.zeros((len(X), len(vocab)), dtype=dtype)
for i, x in enumerate(X):
for f, v in six.iteritems(x):
if isinstance(v, six.string_types):
f = "%s%s%s" % (f, self.separator, v)
v = 1
try:
Xa[i, vocab[f]] = dtype(v)
except KeyError:
pass
return Xa
def get_feature_names(self):
"""Returns a list of feature names, ordered by their indices.
If one-of-K coding is applied to categorical features, this will
include the constructed feature names but not the original ones.
"""
return self.feature_names_
def restrict(self, support, indices=False):
"""Restrict the features to those in support using feature selection.
This function modifies the estimator in-place.
Parameters
----------
support : array-like
Boolean mask or list of indices (as returned by the get_support
member of feature selectors).
indices : boolean, optional
Whether support is a list of indices.
Returns
-------
self
Examples
--------
>>> from sklearn.feature_extraction import DictVectorizer
>>> from sklearn.feature_selection import SelectKBest, chi2
>>> v = DictVectorizer()
>>> D = [{'foo': 1, 'bar': 2}, {'foo': 3, 'baz': 1}]
>>> X = v.fit_transform(D)
>>> support = SelectKBest(chi2, k=2).fit(X, [0, 1])
>>> v.get_feature_names()
['bar', 'baz', 'foo']
>>> v.restrict(support.get_support()) # doctest: +ELLIPSIS
DictVectorizer(dtype=..., separator='=', sort=True,
sparse=True)
>>> v.get_feature_names()
['bar', 'foo']
"""
if not indices:
support = np.where(support)[0]
names = self.feature_names_
new_vocab = {}
for i in support:
new_vocab[names[i]] = len(new_vocab)
self.vocabulary_ = new_vocab
self.feature_names_ = [f for f, i in sorted(six.iteritems(new_vocab),
key=itemgetter(1))]
return self
| bsd-3-clause |
kyleabeauchamp/HMCNotes | code/correctness/june11/test_various_hmc.py | 1 | 2454 | import lb_loader
import pandas as pd
import simtk.openmm.app as app
import numpy as np
import simtk.openmm as mm
from simtk import unit as u
from openmmtools import hmc_integrators, testsystems
precision = "mixed"
sysname = "chargedswitchedaccurateljbox"
system, positions, groups, temperature, timestep, langevin_timestep, testsystem, equil_steps, steps_per_hmc = lb_loader.load(sysname)
positions, boxes = lb_loader.equilibrate(testsystem, temperature, timestep, steps=equil_steps, minimize=True)
collision_rate = 1E0 / u.picoseconds
n_steps = 1 # Number inside integrator.step()
Neff_cutoff = 1E5
# Water box settings
groups = [(0, 2), (1, 1)]
extra_chances = 2
steps_per_hmc = 25
timestep = 2.4208317230875265 * 1.5 * u.femtoseconds
#timestep = timestep * 1.5
integrator = mm.LangevinIntegrator(temperature, collision_rate, 2.0 * u.femtoseconds)
#integrator = hmc_integrators.HMCIntegrator(temperature, steps_per_hmc=steps_per_hmc, timestep=timestep)
#integrator = hmc_integrators.GHMCIntegrator(temperature, steps_per_hmc=steps_per_hmc, timestep=timestep, collision_rate=collision_rate)
#integrator = hmc_integrators.XCHMCIntegrator(temperature, steps_per_hmc=steps_per_hmc, timestep=timestep, extra_chances=extra_chances)
#integrator = hmc_integrators.XCHMCIntegrator(temperature, steps_per_hmc=steps_per_hmc, timestep=timestep)
#integrator = hmc_integrators.XCGHMCIntegrator(temperature, steps_per_hmc=steps_per_hmc, timestep=timestep)
#integrator = hmc_integrators.HMCRESPAIntegrator(temperature, steps_per_hmc=steps_per_hmc, timestep=timestep, groups=groups)
#integrator = hmc_integrators.GHMCRESPAIntegrator(temperature, steps_per_hmc=steps_per_hmc, timestep=timestep, groups=groups)
#integrator = hmc_integrators.XCHMCRESPAIntegrator(temperature, steps_per_hmc=steps_per_hmc, timestep=timestep, groups=groups, extra_chances=extra_chances)
#integrator = hmc_integrators.XCGHMCRESPAIntegrator(temperature, steps_per_hmc=steps_per_hmc, timestep=timestep, groups=groups, collision_rate=collision_rate)
itype = type(integrator).__name__
context = lb_loader.build(system, integrator, positions, temperature, precision=precision)
filename = "./data/%s_%s_%s_%.3f_%d.csv" % (precision, sysname, itype, timestep / u.femtoseconds, collision_rate * u.picoseconds)
print(filename)
integrator.step(equil_steps)
data, g, Neff, mu, sigma, stderr = lb_loader.converge(context, integrator, n_steps=n_steps, Neff_cutoff=Neff_cutoff, filename=filename)
| gpl-2.0 |
leesavide/pythonista-docs | Documentation/matplotlib/examples/event_handling/lasso_demo.py | 9 | 2365 | """
Show how to use a lasso to select a set of points and get the indices
of the selected points. A callback is used to change the color of the
selected points
This is currently a proof-of-concept implementation (though it is
usable as is). There will be some refinement of the API.
"""
from matplotlib.widgets import Lasso
from matplotlib.colors import colorConverter
from matplotlib.collections import RegularPolyCollection
from matplotlib import path
import matplotlib.pyplot as plt
from numpy import nonzero
from numpy.random import rand
class Datum(object):
colorin = colorConverter.to_rgba('red')
colorout = colorConverter.to_rgba('blue')
def __init__(self, x, y, include=False):
self.x = x
self.y = y
if include: self.color = self.colorin
else: self.color = self.colorout
class LassoManager(object):
def __init__(self, ax, data):
self.axes = ax
self.canvas = ax.figure.canvas
self.data = data
self.Nxy = len(data)
facecolors = [d.color for d in data]
self.xys = [(d.x, d.y) for d in data]
fig = ax.figure
self.collection = RegularPolyCollection(
fig.dpi, 6, sizes=(100,),
facecolors=facecolors,
offsets = self.xys,
transOffset = ax.transData)
ax.add_collection(self.collection)
self.cid = self.canvas.mpl_connect('button_press_event', self.onpress)
def callback(self, verts):
facecolors = self.collection.get_facecolors()
p = path.Path(verts)
ind = p.contains_points(self.xys)
for i in range(len(self.xys)):
if ind[i]:
facecolors[i] = Datum.colorin
else:
facecolors[i] = Datum.colorout
self.canvas.draw_idle()
self.canvas.widgetlock.release(self.lasso)
del self.lasso
def onpress(self, event):
if self.canvas.widgetlock.locked(): return
if event.inaxes is None: return
self.lasso = Lasso(event.inaxes, (event.xdata, event.ydata), self.callback)
# acquire a lock on the widget drawing
self.canvas.widgetlock(self.lasso)
if __name__ == '__main__':
data = [Datum(*xy) for xy in rand(100, 2)]
ax = plt.axes(xlim=(0,1), ylim=(0,1), autoscale_on=False)
lman = LassoManager(ax, data)
plt.show()
| apache-2.0 |
Mohitsharma44/citibike-challenge | citibike-challenge-aio.py | 2 | 3386 | import pylab as plt
import pandas as pd
import numpy as np
import datetime as dt
def datestr_as_datetime(dstr):
#2014-01-27 12:28:45
dstr=dstr.split()
y,mo,day=dstr[0].split('-')
hh,mm,ss=dstr[1].split(':')
return dt.datetime(int(y),int(mo),int(day),int(hh),int(mm),int(ss))
cbs=pd.read_csv("./citibike-files/2013-07.csv")
pk=np.array( [datestr_as_datetime(st) for st in cbs.starttime])
hh=np.array( [float(st.split()[1].split(':')[0])+float(st.split()[1].split(':')[1])/60.0 for st in cbs.starttime])
fig=plt.figure(figsize=(15,10))
ax1 =plt.subplot2grid((2,2),(0,0),colspan=2)
ax1.set_title("Peak Hour for July 2013")
ax1.set_xlabel("Hour of the day")
ax1.set_ylabel("Number of People")
ax1.grid()
ax1.set_xticks(range(24))
n,bins,patches=ax1.hist([hh[np.array(cbs.gender)==1], hh[np.array(cbs.gender)==2],hh[np.array(cbs.gender)==0]], 24, stacked=True, label=['male','female','undetermined'], color=['LightSteelBlue','SteelBlue','MidnightBlue'])
n1,bins1=np.histogram(hh,24)
colors=['pink','yellow','green','orange','brown','purple']
plt.legend()
for i in range(6):
ax1.bar(i*4,np.mean(n1[i*4:i*4+4]),width=4,alpha=0.5, color=colors[i])
ax1.text(i*4+2, np.mean(n1[i*4:i*4+4])+400,'%d'%np.mean(n1[i*4:i*4+4]), ha="center")
ax1.plot(bins1[:-1]+0.5,n1,'r-')
n2,b2=np.histogram(cbs['gender'][cbs['gender']>0],2)
ax2 = plt.subplot2grid((2,2),(1,0),colspan=1)
ax2.set_title("Rides by Man vs Women")
ax2.set_ylabel("Number of People")
plt.tick_params(
axis='x', # changes apply to the x-axis
which='both', # both major and minor ticks are affected
bottom='off', # ticks along the bottom edge are off
top='off', # ticks along the top edge are off
labelbottom='off') # labels along the bottom edge are off
ax2.bar(np.array([1,2]),n2,width=0.5, color='SteelBlue')
ax2.text(0.3,0.5,'Male',transform=ax2.transAxes, ha='center')
ax2.text(0.3,0.3,'%d'%n2[0],transform=ax2.transAxes, ha='center')
ax2.text(0.7,0.5,'Female',transform=ax2.transAxes, ha='center')
ax2.text(0.7,0.3,'%d'%n2[1],transform=ax2.transAxes, ha='center')
ax2.grid()
ax2.xaxis.grid()
ax2.set_xlim((0.5,3))
ax2.set_ylim((0,max(n2)*1.5))
tur=cbs.usertype=='Customer'
nyc=cbs.usertype=='Subscriber'
ax3 = plt.subplot2grid((2,2),(1,1),colspan=1)
ax3.set_title("NewYorkers vs Tourists")
'''
tick_params(
axis='x', # changes apply to the x-axis
which='both', # both major and minor ticks are affected
bottom='off', # ticks along the bottom edge are off
top='off', # ticks along the top edge are off
labelbottom='off') # labels along the bottom edge are off
ax3.set_ylabel("Number of Customers")
ax3.bar(np.array([1,2]),[sum(nyc),sum(tur)],width=0.5, color='SteelBlue')
ax3.yaxis.grid()
ax3.text(0.3,0.5,'New Yorkers',transform=ax3.transAxes, ha='center')
ax3.text(0.3,0.3,'%d'%sum(nyc),transform=ax3.transAxes, ha='center')
ax3.text(0.7,0.5,'Tourists',transform=ax3.transAxes, ha='center')
ax3.text(0.7,0.3,'%d'%sum(tur),transform=ax3.transAxes, ha='center')
ax3.set_xlim((0.5,3))
ax3.set_ylim((0,max([sum(nyc),sum(tur)])*1.5))
'''
labels = 'New Yorkers\n(%d)'%sum(nyc), 'Tourists\n(%d)'%sum(nyc)
fracs = np.array([sum(nyc),sum(tur)])
ax3.pie(fracs, labels=labels,
autopct='%1.1f%%', shadow=True, startangle=0, colors=['SteelBlue','purple'])
ax3.axis('equal')
plt.show()
| mit |
gaoce/TimeVis | setup.py | 1 | 1191 | from __future__ import print_function
import os
from setuptools import setup
# Utility function to read the README file.
def read(fname):
return open(os.path.join(os.path.dirname(__file__), fname)).read()
# Setup
# 1. zip_safe needs to be False since we need access to templates
setup(
name="TimeVis",
version="0.2",
author="Ce Gao",
author_email="gaoce@coe.neu.edu",
description=("TimeVis: An interactive tool to query and visualize "
"time series gene expression data"),
license="MIT",
install_requires=[
"flask",
"Flask-RESTful",
"SQLAlchemy",
"pandas",
"scikits.bootstrap",
],
packages=['timevis'],
package_dir={"timevis": "timevis"},
package_data={
"timevis": [
"db/*.db",
"static/images/*",
"static/js/*.js",
"static/js/lib/*.js",
"static/css/*.css",
"static/css/lib/*.css",
"static/css/lib/images/*",
"templates/*.html",
]
},
long_description=read('README.md'),
entry_points={'console_scripts': ['timevis = timevis.run:main']},
zip_safe=False,
)
| mit |
larsoner/mne-python | mne/decoding/tests/test_transformer.py | 7 | 9311 | # Author: Mainak Jas <mainak@neuro.hut.fi>
# Romain Trachel <trachelr@gmail.com>
#
# License: BSD (3-clause)
import os.path as op
import numpy as np
import pytest
from numpy.testing import (assert_array_equal, assert_array_almost_equal,
assert_allclose, assert_equal)
from mne import io, read_events, Epochs, pick_types
from mne.decoding import (Scaler, FilterEstimator, PSDEstimator, Vectorizer,
UnsupervisedSpatialFilter, TemporalFilter)
from mne.defaults import DEFAULTS
from mne.utils import requires_sklearn, run_tests_if_main, check_version
tmin, tmax = -0.2, 0.5
event_id = dict(aud_l=1, vis_l=3)
start, stop = 0, 8
data_dir = op.join(op.dirname(__file__), '..', '..', 'io', 'tests', 'data')
raw_fname = op.join(data_dir, 'test_raw.fif')
event_name = op.join(data_dir, 'test-eve.fif')
def test_scaler():
"""Test methods of Scaler."""
raw = io.read_raw_fif(raw_fname)
events = read_events(event_name)
picks = pick_types(raw.info, meg=True, stim=False, ecg=False,
eog=False, exclude='bads')
picks = picks[1:13:3]
epochs = Epochs(raw, events, event_id, tmin, tmax, picks=picks,
baseline=(None, 0), preload=True)
epochs_data = epochs.get_data()
y = epochs.events[:, -1]
methods = (None, dict(mag=5, grad=10, eeg=20), 'mean', 'median')
infos = (epochs.info, epochs.info, None, None)
epochs_data_t = epochs_data.transpose([1, 0, 2])
for method, info in zip(methods, infos):
if method in ('mean', 'median') and not check_version('sklearn'):
with pytest.raises(ImportError, match='No module'):
Scaler(info, method)
continue
scaler = Scaler(info, method)
X = scaler.fit_transform(epochs_data, y)
assert_equal(X.shape, epochs_data.shape)
if method is None or isinstance(method, dict):
sd = DEFAULTS['scalings'] if method is None else method
stds = np.zeros(len(picks))
for key in ('mag', 'grad'):
stds[pick_types(epochs.info, meg=key)] = 1. / sd[key]
stds[pick_types(epochs.info, meg=False, eeg=True)] = 1. / sd['eeg']
means = np.zeros(len(epochs.ch_names))
elif method == 'mean':
stds = np.array([np.std(ch_data) for ch_data in epochs_data_t])
means = np.array([np.mean(ch_data) for ch_data in epochs_data_t])
else: # median
percs = np.array([np.percentile(ch_data, [25, 50, 75])
for ch_data in epochs_data_t])
stds = percs[:, 2] - percs[:, 0]
means = percs[:, 1]
assert_allclose(X * stds[:, np.newaxis] + means[:, np.newaxis],
epochs_data, rtol=1e-12, atol=1e-20, err_msg=method)
X2 = scaler.fit(epochs_data, y).transform(epochs_data)
assert_array_equal(X, X2)
# inverse_transform
Xi = scaler.inverse_transform(X)
assert_array_almost_equal(epochs_data, Xi)
# Test init exception
pytest.raises(ValueError, Scaler, None, None)
pytest.raises(TypeError, scaler.fit, epochs, y)
pytest.raises(TypeError, scaler.transform, epochs)
epochs_bad = Epochs(raw, events, event_id, 0, 0.01, baseline=None,
picks=np.arange(len(raw.ch_names))) # non-data chs
scaler = Scaler(epochs_bad.info, None)
pytest.raises(ValueError, scaler.fit, epochs_bad.get_data(), y)
def test_filterestimator():
"""Test methods of FilterEstimator."""
raw = io.read_raw_fif(raw_fname)
events = read_events(event_name)
picks = pick_types(raw.info, meg=True, stim=False, ecg=False,
eog=False, exclude='bads')
picks = picks[1:13:3]
epochs = Epochs(raw, events, event_id, tmin, tmax, picks=picks,
baseline=(None, 0), preload=True)
epochs_data = epochs.get_data()
# Add tests for different combinations of l_freq and h_freq
filt = FilterEstimator(epochs.info, l_freq=40, h_freq=80)
y = epochs.events[:, -1]
X = filt.fit_transform(epochs_data, y)
assert (X.shape == epochs_data.shape)
assert_array_equal(filt.fit(epochs_data, y).transform(epochs_data), X)
filt = FilterEstimator(epochs.info, l_freq=None, h_freq=40,
filter_length='auto',
l_trans_bandwidth='auto', h_trans_bandwidth='auto')
y = epochs.events[:, -1]
X = filt.fit_transform(epochs_data, y)
filt = FilterEstimator(epochs.info, l_freq=1, h_freq=1)
y = epochs.events[:, -1]
with pytest.warns(RuntimeWarning, match='longer than the signal'):
pytest.raises(ValueError, filt.fit_transform, epochs_data, y)
filt = FilterEstimator(epochs.info, l_freq=40, h_freq=None,
filter_length='auto',
l_trans_bandwidth='auto', h_trans_bandwidth='auto')
X = filt.fit_transform(epochs_data, y)
# Test init exception
pytest.raises(ValueError, filt.fit, epochs, y)
pytest.raises(ValueError, filt.transform, epochs)
def test_psdestimator():
"""Test methods of PSDEstimator."""
raw = io.read_raw_fif(raw_fname)
events = read_events(event_name)
picks = pick_types(raw.info, meg=True, stim=False, ecg=False,
eog=False, exclude='bads')
picks = picks[1:13:3]
epochs = Epochs(raw, events, event_id, tmin, tmax, picks=picks,
baseline=(None, 0), preload=True)
epochs_data = epochs.get_data()
psd = PSDEstimator(2 * np.pi, 0, np.inf)
y = epochs.events[:, -1]
X = psd.fit_transform(epochs_data, y)
assert (X.shape[0] == epochs_data.shape[0])
assert_array_equal(psd.fit(epochs_data, y).transform(epochs_data), X)
# Test init exception
pytest.raises(ValueError, psd.fit, epochs, y)
pytest.raises(ValueError, psd.transform, epochs)
def test_vectorizer():
"""Test Vectorizer."""
data = np.random.rand(150, 18, 6)
vect = Vectorizer()
result = vect.fit_transform(data)
assert_equal(result.ndim, 2)
# check inverse_trasnform
orig_data = vect.inverse_transform(result)
assert_equal(orig_data.ndim, 3)
assert_array_equal(orig_data, data)
assert_array_equal(vect.inverse_transform(result[1:]), data[1:])
# check with different shape
assert_equal(vect.fit_transform(np.random.rand(150, 18, 6, 3)).shape,
(150, 324))
assert_equal(vect.fit_transform(data[1:]).shape, (149, 108))
# check if raised errors are working correctly
vect.fit(np.random.rand(105, 12, 3))
pytest.raises(ValueError, vect.transform, np.random.rand(105, 12, 3, 1))
pytest.raises(ValueError, vect.inverse_transform,
np.random.rand(102, 12, 12))
@requires_sklearn
def test_unsupervised_spatial_filter():
"""Test unsupervised spatial filter."""
from sklearn.decomposition import PCA
from sklearn.kernel_ridge import KernelRidge
raw = io.read_raw_fif(raw_fname)
events = read_events(event_name)
picks = pick_types(raw.info, meg=True, stim=False, ecg=False,
eog=False, exclude='bads')
picks = picks[1:13:3]
epochs = Epochs(raw, events, event_id, tmin, tmax, picks=picks,
preload=True, baseline=None, verbose=False)
# Test estimator
pytest.raises(ValueError, UnsupervisedSpatialFilter, KernelRidge(2))
# Test fit
X = epochs.get_data()
n_components = 4
usf = UnsupervisedSpatialFilter(PCA(n_components))
usf.fit(X)
usf1 = UnsupervisedSpatialFilter(PCA(n_components))
# test transform
assert_equal(usf.transform(X).ndim, 3)
# test fit_transform
assert_array_almost_equal(usf.transform(X), usf1.fit_transform(X))
assert_equal(usf.transform(X).shape[1], n_components)
assert_array_almost_equal(usf.inverse_transform(usf.transform(X)), X)
# Test with average param
usf = UnsupervisedSpatialFilter(PCA(4), average=True)
usf.fit_transform(X)
pytest.raises(ValueError, UnsupervisedSpatialFilter, PCA(4), 2)
def test_temporal_filter():
"""Test methods of TemporalFilter."""
X = np.random.rand(5, 5, 1200)
# Test init test
values = (('10hz', None, 100., 'auto'), (5., '10hz', 100., 'auto'),
(10., 20., 5., 'auto'), (None, None, 100., '5hz'))
for low, high, sf, ltrans in values:
filt = TemporalFilter(low, high, sf, ltrans, fir_design='firwin')
pytest.raises(ValueError, filt.fit_transform, X)
# Add tests for different combinations of l_freq and h_freq
for low, high in ((5., 15.), (None, 15.), (5., None)):
filt = TemporalFilter(low, high, sfreq=100., fir_design='firwin')
Xt = filt.fit_transform(X)
assert_array_equal(filt.fit_transform(X), Xt)
assert (X.shape == Xt.shape)
# Test fit and transform numpy type check
with pytest.raises(ValueError, match='Data to be filtered must be'):
filt.transform([1, 2])
# Test with 2 dimensional data array
X = np.random.rand(101, 500)
filt = TemporalFilter(l_freq=25., h_freq=50., sfreq=1000.,
filter_length=150, fir_design='firwin2')
assert_equal(filt.fit_transform(X).shape, X.shape)
run_tests_if_main()
| bsd-3-clause |
DR08/mxnet | example/svm_mnist/svm_mnist.py | 44 | 4094 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#############################################################
## Please read the README.md document for better reference ##
#############################################################
from __future__ import print_function
import mxnet as mx
import numpy as np
from sklearn.datasets import fetch_mldata
from sklearn.decomposition import PCA
# import matplotlib.pyplot as plt
import logging
logger = logging.getLogger()
logger.setLevel(logging.DEBUG)
# Network declaration as symbols. The following pattern was based
# on the article, but feel free to play with the number of nodes
# and with the activation function
data = mx.symbol.Variable('data')
fc1 = mx.symbol.FullyConnected(data = data, name='fc1', num_hidden=512)
act1 = mx.symbol.Activation(data = fc1, name='relu1', act_type="relu")
fc2 = mx.symbol.FullyConnected(data = act1, name = 'fc2', num_hidden = 512)
act2 = mx.symbol.Activation(data = fc2, name='relu2', act_type="relu")
fc3 = mx.symbol.FullyConnected(data = act2, name='fc3', num_hidden=10)
# Here we add the ultimate layer based on L2-SVM objective
mlp = mx.symbol.SVMOutput(data=fc3, name='svm')
# To use L1-SVM objective, comment the line above and uncomment the line below
# mlp = mx.symbol.SVMOutput(data=fc3, name='svm', use_linear=True)
# Now we fetch MNIST dataset, add some noise, as the article suggests,
# permutate and assign the examples to be used on our network
mnist = fetch_mldata('MNIST original')
mnist_pca = PCA(n_components=70).fit_transform(mnist.data)
noise = np.random.normal(size=mnist_pca.shape)
mnist_pca += noise
np.random.seed(1234) # set seed for deterministic ordering
p = np.random.permutation(mnist_pca.shape[0])
X = mnist_pca[p]
Y = mnist.target[p]
X_show = mnist.data[p]
# This is just to normalize the input and separate train set and test set
X = X.astype(np.float32)/255
X_train = X[:60000]
X_test = X[60000:]
X_show = X_show[60000:]
Y_train = Y[:60000]
Y_test = Y[60000:]
# Article's suggestion on batch size
batch_size = 200
train_iter = mx.io.NDArrayIter(X_train, Y_train, batch_size=batch_size, label_name='svm_label')
test_iter = mx.io.NDArrayIter(X_test, Y_test, batch_size=batch_size, label_name='svm_label')
# Here we instatiate and fit the model for our data
# The article actually suggests using 400 epochs,
# But I reduced to 10, for convinience
mod = mx.mod.Module(
context = mx.cpu(0), # Run on CPU 0
symbol = mlp, # Use the network we just defined
label_names = ['svm_label'],
)
mod.fit(
train_data=train_iter,
eval_data=test_iter, # Testing data set. MXNet computes scores on test set every epoch
batch_end_callback = mx.callback.Speedometer(batch_size, 200), # Logging module to print out progress
num_epoch = 10, # Train for 10 epochs
optimizer_params = {
'learning_rate': 0.1, # Learning rate
'momentum': 0.9, # Momentum for SGD with momentum
'wd': 0.00001, # Weight decay for regularization
},
)
# Uncomment to view an example
# plt.imshow((X_show[0].reshape((28,28))*255).astype(np.uint8), cmap='Greys_r')
# plt.show()
# print 'Result:', model.predict(X_test[0:1])[0].argmax()
# Now it prints how good did the network did for this configuration
print('Accuracy:', mod.score(test_iter, mx.metric.Accuracy())[0][1]*100, '%')
| apache-2.0 |
mikehankey/fireball_camera | analyze-stacks.py | 1 | 55222 | #!/usr/bin/python3
# next steps. save off a cache of the diffs,so we can save time on multiple re-runs.
# do a crop cnt confirm.
# script to make master stacks per night and hour from the 1 minute stacks
from matplotlib.backends.backend_agg import FigureCanvasAgg as FigureCanvas
from matplotlib.figure import Figure
from sklearn.cluster import KMeans
from sklearn import datasets
from PIL import Image, ImageChops
from scipy.spatial.distance import cdist
import matplotlib.pyplot as plt
from random import randint
import time
import ephem
from PIL import Image
import cv2
import glob
import sys
import os
import numpy as np
import datetime
from pathlib import Path
import subprocess
from amscommon import read_config
import math
import time
from sklearn.cluster import Birch
from collections import deque
video_dir = "/mnt/ams2/SD/"
def stack_stack(pic1, pic2):
frame_pil = Image.fromarray(pic1)
stacked_image = pic2
if stacked_image is None:
stacked_image = frame_pil
else:
stacked_image=ImageChops.lighter(stacked_image,frame_pil)
return(stacked_image)
def compute_straight_line(x1,y1,x2,y2,x3,y3):
print ("COMP STRAIGHT", x1,y1,x2,y2,x3,y3)
if x2 - x1 != 0:
a = (y2 - y1) / (x2 - x1)
else:
a = 0
if x3 - x1 != 0:
b = (y3 - y1) / (x3 - x1)
else:
b = 0
straight_line = a - b
if (straight_line < 1):
straight = "Y"
else:
straight = "N"
return(straight_line)
def crop_center(img,cropx,cropy):
y,x = img.shape
startx = x//2-(cropx//2) +12
starty = y//2-(cropy//2) + 4
return img[starty:starty+cropy,startx:startx+cropx]
def fig2data ( fig ):
"""
@brief Convert a Matplotlib figure to a 4D numpy array with RGBA channels and return it
@param fig a matplotlib figure
@return a numpy 3D array of RGBA values
"""
# draw the renderer
fig.canvas.draw ( )
# Get the RGBA buffer from the figure
w,h = fig.canvas.get_width_height()
buf = np.fromstring ( fig.canvas.tostring_argb(), dtype=np.uint8 )
buf.shape = ( w, h,4 )
# canvas.tostring_argb give pixmap in ARGB mode. Roll the ALPHA channel to have it in RGBA mode
buf = np.roll ( buf, 3, axis = 2 )
return buf
def kmeans_cluster2(points, num_clusters):
#print (points)
#points = np.array((points))
points = sorted(points, key=lambda x: x[1])
clusters = []
cluster_points = []
# group points in 3 clusters first
est = KMeans(n_clusters=num_clusters)
est.fit(points)
({i: np.where(est.labels_ == i)[0] for i in range(est.n_clusters)})
for i in set(est.labels_):
index = est.labels_ == i
cluster_idx = np.where(est.labels_ == i)
for idxg in cluster_idx:
for idx in idxg:
idx = int(idx)
point = points[idx]
#print ("IDX:",i, idx, point)
cluster_points.append(point)
clusters.append(cluster_points)
cluster_points = []
# find distance from each cluster to the other. If close group together.
lcx = None
lcy = None
cn = 1
new_clusters, clust_d, dst_ot = cluster_dist(clusters)
#print ("NUM CLUSTERS / CLUSTERS OVER DIST THRESH", num_clusters, dst_ot)
if dst_ot < num_clusters -1 and num_clusters > 1:
clusters, clust_d = kmeans_cluster2(points, num_clusters -1)
if dst_ot > num_clusters and num_clusters > 1:
clusters, clust_d = kmeans_cluster2(points, num_clusters -1)
clust_d = sorted(clust_d, key=lambda x: x[1])
#print (clust_d)
new_clusters, clust_d, dst_ot = cluster_dist(clusters)
print ("CLUSTERS", clusters)
print ("NEW CLUSTERS", new_clusters)
return(new_clusters, clust_d)
def cluster_center(cluster):
npc = np.array(cluster)
cx = np.average(npc[:,1])
cy = np.average(npc[:,0])
mx = np.max(npc[:,1])
my = np.max(npc[:,0])
mnx = np.min(npc[:,1])
mny = np.min(npc[:,0])
return(cx,cy,mx,my,mnx,mny)
def merge_clusters(clusters):
print ("YO")
def cluster_dist ( clusters):
cluster_dist = []
cn = 0
dst_ot = 0
unmerged_clusters = []
merged_clusters = []
new_clusters = []
for i in range(0,len(clusters)) :
merged = 0
cluster = clusters[i]
cx,cy,mx,my,mnx,mny = cluster_center(cluster)
print ("CLUSTER ", i, " POINTS: ", len(cluster), cx,cy)
for j in range(i+1,len(clusters)) :
cluster2 = clusters[j]
ccx, ccy, mx2,my2,mnx2,mny2 = cluster_center(cluster2)
dist = calc_dist(cx, cy, ccx,ccy)
dist_mn_mx = calc_dist(mx, my, mnx2,mny2)
print ("Distance between cluster ", i, " and ", j, " is ", dist)
if dist < 50:
print ("Cluster ", j, " is close and should be merged.")
#temp = merge_clusters(cluster, cluster2)
merged_clusters.append(i)
merged_clusters.append(j)
merged = 1
else:
dst_ot += 1
cluster_dist.append ((cx, cy, ccx,ccy, dist))
#if merged == 0:
# unmerged_clusters.append((i))
print ("Merged Clusters:", merged_clusters)
for cc in range(0, len(clusters)):
merge = 0
for mc in merged_clusters:
if (cc == mc):
print ("Cluster ", cc, " should be merged.")
merge = 1
if merge == 0 and cc not in merged_clusters:
print ("Cluster ", cc, " should not be merged.")
unmerged_clusters.append((cc))
if cc in merged_clusters:
print ("Cluster ", cc, " was already merged.")
new_clusters = []
merged = []
for cid in merged_clusters:
print ("MERGED CID: ", cid, len(merged_clusters))
for x,y in clusters[cid]:
merged.append((x,y))
if len(merged) > 0:
new_clusters.append(merged)
print ("NEW CLUSTERS STEP 1:", new_clusters)
for cid in unmerged_clusters:
print ("UNMERGED CID: ", cid, len(unmerged_clusters))
new_clusters.append(clusters[cid])
print ("Merged Clusters: ", merged_clusters)
print ("Unmerged Clusters: ", unmerged_clusters)
print ("New Clusters: ", new_clusters)
print("Original Clusters", len(clusters))
print("New Clusters", len(new_clusters))
return (clusters, cluster_dist, dst_ot)
def merge_clusters(c1,c2):
new_cluster = []
for x,y in c1:
pt = (x,y)
new_cluster.append((pt))
for x,y in c2:
new_cluster.append((x,y))
return(new_cluster)
def kmeans_cluster(points, num_clusters):
points = np.array(points)
print(points)
clusters = []
cluster_points = []
colors = ('r', 'g', 'b')
est = KMeans(n_clusters=num_clusters)
est.fit(points)
({i: np.where(est.labels_ == i)[0] for i in range(est.n_clusters)})
for i in set(est.labels_):
index = est.labels_ == i
cluster_idx = np.where(est.labels_ == i)
for idxg in cluster_idx:
for idx in idxg:
idx = int(idx)
point = points[idx]
#print ("IDX:",i, idx, point)
cluster_points.append(point)
clusters.append(cluster_points)
cluster_points = []
print (est.labels_)
print (len(points))
({i: np.where(est.labels_ == i)[0] for i in range(est.n_clusters)})
for i in set(est.labels_):
index = est.labels_ == i
cluster_idx = np.where(est.labels_ == i)
for idxg in cluster_idx:
for idx in idxg:
idx = int(idx)
point = points[idx]
#print ("IDX:",i, idx, point)
cluster_points.append(point)
clusters.append(cluster_points)
cluster_points = []
#print(points[:,0])
#print(points[:,1])
int_lb = est.labels_.astype(float)
#fig = gcf()
fig = Figure()
canvas = FigureCanvas(fig)
plot = fig.add_subplot(1,1,1)
plot.scatter(points[:,0], points[:,1], c=[plt.cm.Spectral(float(i) / 10) for i in est.labels_])
for cluster in clusters:
cxs = []
cys = []
for cp in cluster:
x,y,w,h = cp
cxs.append(x)
cys.append(y)
if len(cxs) > 3:
plot.plot(np.unique(cxs), np.poly1d(np.polyfit(cxs, cys, 1))(np.unique(cxs)))
plt.xlim(0,640)
plt.ylim(0,480)
plot.invert_yaxis()
fig.canvas.draw()
fig.savefig("/tmp/plot.png", dpi=fig.dpi)
#plt.show()
return(clusters)
def calc_dist(x1,y1,x2,y2):
dist = math.sqrt((x2 - x1)**2 + (y2 - y1)**2)
return dist
def find_angle(x1,x2,y1,y2):
if x2 - x1 != 0:
a1 = (y2 - y1) / (x2 - x1)
else:
a1 = 0
angle = math.atan(a1)
angle = math.degrees(angle)
return(angle)
def closest_node(node, nodes):
return nodes[cdist([node], nodes).argmin()]
def find_objects(index, points):
apoints = []
unused_points = []
cl_sort = []
sorted_points = []
last_angle = None
objects = []
group_pts = []
line_segments = []
stars = []
obj_points = []
big_cnts = []
count = 0
x1,y1,w1,h1 = points[index]
print ("Total Points found in image: ", len(points))
used_pts = {}
for i in range(0,len(points)-1):
x1,y1,w1,h1 = points[i]
for i in range(0,len(points)-1):
x2,y2,w2,h2 = points[i]
key = str(x1)+"."+str(y1)+"."+str(x2)+"."+str(y2)
used_pts[key] = 0
key2 = str(x2)+"."+str(y2)+"."+str(x1)+"."+str(y1)
used_pts[key2] = 0
possible_stars = []
for i in range(0,len(points)-1):
closest = []
x1,y1,w1,h1 = points[i]
for j in range(0,len(points)-1):
x2,y2,w2,h2 = points[j]
key = str(x1)+"."+str(y1)+"."+str(x2)+"."+str(y2)
key2 = str(x2)+"."+str(y2)+"."+str(x1)+"."+str(y1)
dist = calc_dist(x1,y1,x2,y2)
angle = find_angle(x1,y1,x2,y2)
if x1 != x2 and y1 != y2:
if used_pts[key] == 0 and used_pts[key2] == 0 :
#print("Closest Point:", (int(dist),int(angle),int(x1),int(y1),int(x2),int(y2)))
closest.append((int(dist),int(angle),int(x1),int(y1),int(x2),int(y2)))
used_pts[key] = 1
used_pts[key2] = 1
#print("Key has been used:", key, key2)
#else:
# print("Key already used try another one:", key, key2)
#else:
# print ("this point has already been used")
count = count + 1
# of all the close points, make sure that at least 2 points < 25 px dist exist.
conf_closest = []
for cls in closest:
if cls[0] < 100:
conf_closest.append(cls)
if len(closest) > 0:
distsort = np.unique(closest, axis=0)
dist,angle,x1,y1,x2,y2 = distsort[0]
if dist < 50 and len(conf_closest) > 1:
line_segments.append((int(dist),int(angle),int(x1),int(y1),int(x2),int(y2)))
obj_points.append((int(x1),int(y1), int(w1), int(h1)))
else:
possible_stars.append((int(x1),int(y1),int(w1),int(h1)))
#print("CLOSEST LINE SEGMENT FOR PT: ", distsort[0])
#else:
#print("ERROR! no close points to this one!", x1,y1)
if w1 > 15 or h1 > 15:
# print ("BIG!!! We have a big object here likely containing many line segments.")
big_cnts.append((int(x1),int(y1),int(w1),int(h1)))
for star in possible_stars:
close = 0
for line in line_segments:
dist,angle,x1,y1,x2,y2 = line
star_dist = calc_dist(star[0], star[1], x1,y1)
#print ("STARDIST: ", star_dist, star[0], star[1], x1,y1)
if star_dist < 60:
close = 1
if close == 1:
obj_points.append(star)
else:
stars.append(star)
#print ("OBJECT POINTS")
if len(line_segments) > 0:
sorted_lines = sorted(line_segments, key=lambda x: x[2])
else:
sorted_lines = []
#print ("LINE SEGMENTS:")
#for line in sorted_lines:
# print (line)
last_ang = 0
last_dist = 0
line_groups = []
line_group = []
orphan_lines = []
if len(sorted_lines) > 0:
for segment in sorted_lines:
dist,angle,x1,y1,x2,y2 = segment
if last_ang != 0 and (angle -5 < last_ang < angle + 5) and dist < 100:
#print ("Line Segment Part of Existing Group: ", segment)
line_group.append((dist,angle,x1,y1,x2,y2))
else:
#print ("New Group Started!", last_ang, angle )
# print ("Line Segment Part of New Group: ", segment)
if len(line_group) >= 3:
line_groups.append(line_group)
else:
#print("Last line segment was too small to be part of a group! These are random points or stars. Skip for now.")
for line in line_group:
orphan_lines.append(line)
line_group = []
line_group.append((dist,angle,x1,y1,x2,y2))
last_ang = angle
if len(line_group) >= 2:
line_groups.append(line_group)
else:
for line in line_group:
orphan_lines.append(line)
# now make sure all of the line segments in the line group can connect to at least one of the other segments
#print ("Total Line Groups as of now:", len(line_groups))
#print ("Total Orphan Lines as of now:", len(orphan_lines))
#print ("Confirm the line segments are all part of the same group", len(line_groups))
#print ("TOTAL POINTS: ", len(points))
#print ("TOTAL LINE GROUPS: ", len(line_groups))
#print ("ORPHAN GROUPS: ", len(orphan_lines))
#for point in points:
#print ("POINT: ", point)
gc = 1
if len(line_groups) > 0:
for line_group in line_groups:
lc = 1
for line in line_group:
#print("LINE:", line)
dist,ang,x1,y1,x2,y2 = line
#confirm_angle = find_angle(x1,y1,x2,y2)
#print ("GROUP", gc, lc, line, ang, confirm_angle)
lc = lc + 1
gc = gc + 1
#else:
#make sure the obj points are not false positives, if so move to stars.
(line_groups, orphan_lines, stars, obj_points, big_cnts) = conf_objs(line_groups, orphan_lines, stars, obj_points, big_cnts)
return(line_groups, orphan_lines, stars, obj_points, big_cnts)
def conf_objs(line_groups, orphan_lines, stars, obj_points, big_cnts):
print ("CONF OBJS")
print ("LINE GROUPS", len(line_groups))
print ("OBJ POINTS", len(obj_points))
conf_line_groups = []
mx = []
my = []
mw = []
mh = []
#first lets check the line groups and make sure at least 3 points are straight
for line_group in line_groups:
mx = []
my = []
mw = []
mh = []
lgc = 0
for dist,ang,x1,y1,x2,y2 in line_group:
mx.append(x1)
my.append(y1)
print (dist, ang, x1,y1,x2,y2)
print (lgc, "adding MX", x1, mx)
print (lgc, "adding MYs", y1, my)
#mx.append(x2)
#my.append(y2)
lgc = lgc + 1
if len(mx) > 2:
print ("MXs", mx)
print ("MYs", my)
st = compute_straight_line(mx[0],my[0],mx[1],my[1],mx[2],my[2])
else:
st = 100
if st <= 1:
print ("This group is straight")
conf_line_groups.append(line_group)
else:
print ("This group is NOT straight")
orphan_lines.append(line_group)
cc = 0
mx = []
my = []
mw = []
mh = []
for x,y,h,w in obj_points:
mx.append(x)
my.append(y)
mw.append(w)
mh.append(h)
cc = cc + 1
if len(mx) > 2:
st = compute_straight_line(mx[0],my[0],mx[1],my[1],mx[2],my[2])
else:
st = 100
if st <= 1:
print ("At least 3 of these are straight, we can continue.", st)
else:
print ("These 3 objects are not straight, and thus false!", st)
for x,y,h,w in obj_points:
stars.append((x,y,h,w))
obj_points = []
return(line_groups, orphan_lines, stars, obj_points, big_cnts)
def clean_line_groups(line_groups, orphan_lines):
cleaned_line_groups = []
cleaned_line_group = []
for line_group in line_groups:
if len(line_group) == 2:
# make sure these two groups are close enough to each other to be grouped.
(dist,angle,x1,y1,x2,y2) = line_group[0]
(xdist,xangle,xx1,xy1,xx2,xy2) = line_group[1]
group_dist = calc_dist(x1,y1,xx1,xy1)
if group_dist > 50 or (angle -5 < xangle < angle + 5):
orphan_lines.append(line_group[0])
orphan_lines.append(line_group[1])
else:
cleaned_line_group.append(line_group[0])
cleaned_line_group.append(line_group[1])
else:
cleaned_line_groups.append(line_group)
line_groups = cleaned_line_groups
print("CLG:", line_groups)
return(cleaned_line_groups, orphan_lines)
def confirm_cnts(crop):
crop = cv2.GaussianBlur(crop, (5, 5), 0)
avg_flux = np.average(crop)
max_flux = np.amax(crop)
thresh_limit = avg_flux / 2
_, crop_thresh = cv2.threshold(crop, thresh_limit, 255, cv2.THRESH_BINARY)
#(_, cnts, xx) = cv2.findContours(crop_thresh.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
#if np.sum(crop_thresh) > (255 * 2):
#print ("CONFIRM:", max_flux, avg_flux, thresh_limit, np.sum(crop_thresh))
#cv2.imshow('pepe', crop_thresh)
#else:
# print ("FAILED:", max_flux, avg_flux, thresh_limit, np.sum(crop_thresh))
#cv2.imshow('pepe', crop)
#cv2.waitKey(100)
return(np.sum(crop_thresh))
def find_best_thresh(image, thresh_limit, type):
go = 1
while go == 1:
_, thresh = cv2.threshold(image, thresh_limit, 255, cv2.THRESH_BINARY)
(_, cnts, xx) = cv2.findContours(thresh.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
if type == 0:
cap = 80
else:
cap = 100
if len(cnts) > cap:
thresh_limit = thresh_limit + 1
else:
bad = 0
for (i,c) in enumerate(cnts):
x,y,w,h = cv2.boundingRect(cnts[i])
if w == image.shape[1]:
bad = 1
if type == 0 and (w >= 10 or h > 10):
bad = 1
if bad == 0:
go = 0
else:
thresh_limit = thresh_limit + 1
#print ("CNTs, BEST THRESH:", str(len(cnts)), thresh_limit)
return(thresh_limit)
def find_objects2(timage, tag, current_image, filename):
stars = []
big_cnts = []
obj_points = []
image = timage
thresh_limit = 10
thresh_limit = find_best_thresh(image, thresh_limit, 0)
# find best thresh limit code here!
line_objects = []
points = []
orphan_lines = []
_, thresh = cv2.threshold(image, thresh_limit, 255, cv2.THRESH_BINARY)
(_, cnts, xx) = cv2.findContours(thresh.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
#print ("CNTS:", len(cnts))
hit = 0
objects = []
if len(cnts) < 500:
for (i,c) in enumerate(cnts):
x,y,w,h = cv2.boundingRect(cnts[i])
if w > 1 and h > 1:
if (w < 10 and h <10):
nothing = 0
# cv2.rectangle(image, (x,y), (x+w+5, y+h+5), (255),1)
#cv2.circle(image, (x,y), 20, (120), 1)
#if w != h:
# cv2.rectangle(image, (x,y), (x+w+5, y+h+5), (255),1)
else:
#cv2.rectangle(image, (x,y), (x+w+5, y+h+5), (255),1)
# Convert big object into points and add each one to the points array.
crop = timage[y:y+h,x:x+w]
points.append((x,y,w,h))
if w < 600 and h < 400:
crop_points = find_points_in_crop(crop,x,y,w,h)
for x,y,w,h in crop_points:
print("adding some points",x,y,w,h)
points.append((x,y,w,h))
points.append((x,y,w,h))
#objects.append((x,y,w,h))
#else:
#image[y:y+h,x:x+w] = [0]
else:
print ("WAY TO MANY CNTS:", len(cnts))
thresh_limit = thresh_limit + 5
return(points)
# find line objects
if (len(objects) + len(points)) > 0:
line_groups, orphan_lines, stars, obj_points = find_objects(0, points)
else:
line_groups = []
final_group = []
final_groups = []
reject_group = []
reject_groups = []
line_segments = flatten_line_groups(line_groups)
line_segments = sorted(line_segments, key = lambda x: (x[0],x[1]))
if len(line_segments) > 0:
final_group, reject_group = regroup_lines(line_segments)
print ("MIKE!:", len(final_group))
if len(final_group) > 1:
final_groups.append(final_group)
else:
for line in final_group:
orphan_lines.append(line)
if len(reject_group) > 3:
print (len(reject_group), "rejects left. do it again.")
reject_group = sorted(reject_group, key = lambda x: (x[1],x[0]))
final_group, reject_group = regroup_lines(reject_group)
if len(final_group) > 1:
final_groups.append(final_group)
else:
for line in final_group:
orphan_lines.append(line)
print (len(reject_group), "rejects left after 2nd try")
if len(reject_group) > 3:
print (len(reject_group), "rejects left. do it again.")
final_group, reject_group = regroup_lines(reject_group)
if len(final_group) > 1:
final_groups.append(final_group)
else:
for line in final_group:
orphan_lines.append(line)
print (len(reject_group), "rejects left after 3rd try")
# try to adopt the orphans!
if len(orphan_lines) >= 1:
print (orphan_lines)
final_group, reject_group = regroup_lines(orphan_lines)
if len(final_group) > 1:
final_groups.append(final_group)
if len(final_group) > 0:
print ("Adopted! : ", final_group)
orphan_lines = reject_group
if len(orphan_lines) >= 1:
final_group, reject_group = regroup_lines(reject_group)
if len(final_group) > 1:
final_groups.append(final_group)
if len(final_group) > 0:
print ("Adopted! : ", final_group)
orphan_lines = reject_group
if len(orphan_lines) >= 1:
final_group, reject_group = regroup_lines(reject_group)
if len(final_group) > 1:
final_groups.append(final_group)
if len(final_group) > 0:
print ("Adopted! : ", final_group)
orphan_lines = reject_group
final_groups, orphan_lines = clean_line_groups(final_groups, orphan_lines)
clusters= []
clusters_ab= []
last_x = None
last_y = None
last_ang = None
ang = None
if len(points) > 3:
num_clusters = int(len(points)/3)
clusters = kmeans_cluster(points, num_clusters)
#print ("MIKE CLUSTERS", len(clusters))
for cluster in clusters:
cxs = []
cys = []
for cp in cluster:
x,y,w,h = cp
cxs.append(x)
cys.append(y)
if last_x is not None:
ang = find_angle(x,y,last_x,last_y)
print ("CLUSTER ANGLE:", x,y,last_x,last_y,ang)
if last_ang is not None:
if ang - 5 < last_ang < ang + 5:
cv2.line(image, (x,y), (last_x,last_y), (200), 4)
last_x = x
last_y = y
last_ang = ang
a, b = best_fit (cxs,cys)
mnx = min(cxs)
mny = min(cys)
mmx = max(cxs)
mmy = max(cys)
cv2.rectangle(image, (mnx,mny), (mmx, mmy), (255),1)
#print ("MIKE MIKE XS,", cxs)
#print ("MIKE MIKE YS,", cys)
clusters_ab.append((a,b))
print ("MIKE AB,", a,b)
print ("FINAL ANALYSIS")
print (final_groups)
print ("--------------")
print ("File Name: ", filename)
print ("Total Points:", len(points))
print ("Total Line Segments:", len(line_segments))
print ("Total Final Line Groups:", len(final_groups))
print ("Total Clusters:", len(clusters))
cl =0
for a,b in clusters_ab:
print ("Cluster " + str(cl + 1) + " " + str(len(clusters[cl])) + " points")
print ("LINE AB " + str(a) + " " + str(b))
cl = cl + 1
#print (final_groups)
print ("Total Rejected Lines:", len(reject_group))
gc = 1
xs = ys = []
for line_group in final_groups:
lc = 1
for line in line_group:
dist,angle,x1,y1,x2,y2 = line
xs.append(x1)
xs.append(x2)
ys.append(y1)
ys.append(y2)
#print (gc, lc, line)
lc = lc + 1
gc = gc + 1
if len(xs) > 0 and len(ys) > 0:
mnx = min(xs)
mxx = max(xs)
mny = min(ys)
mxy = max(ys)
cv2.rectangle(image, (mnx,mny), (mxx, mxy), (255),1)
print ("Total Orphaned Lines:", len(orphan_lines))
if len(line_groups) > 0:
line_segments = flatten_line_groups(line_groups)
find_line_nodes(line_segments)
gc = 1
for line_group in line_groups:
lc = 1
line_group = sorted(line_group, key = lambda x: (x[2],x[3]))
dist,angle,sx1,sy1,sx2,sy2 = line_group[0]
for line in line_group:
dist,angle,x1,y1,x2,y2 = line
#s_ang = find_angle(sx1,sy1,x1,y1)
#if angle - 5 < s_ang < angle + 5:
# print("FINAL GROUP:", gc,lc,line, angle, s_ang)
# final_group.append((dist,angle,x1,y1,x2,y2))
#else:
# print("REJECT GROUP:", gc,lc,line, angle, s_ang)
# reject_group.append((dist,angle,x1,y1,x2,y2))
#seg_dist = find_closest_segment(line, line_group)
cv2.line(image, (x1,y1), (x2,y2), (255), 2)
cv2.putText(image, "L " + str(lc), (x1+25,y1+10), cv2.FONT_HERSHEY_SIMPLEX, .4, (255), 1)
lc = lc + 1
if len(line_group) > 0:
cv2.putText(image, "LG " + str(gc), (x1+25,y1), cv2.FONT_HERSHEY_SIMPLEX, .4, (255), 1)
gc = gc + 1
for line in orphan_lines:
#print("ORPHAN:", line)
dist,angle,x1,y1,x2,y2 = line
cv2.line(image, (x1,y1), (x2,y2), (255), 1)
cv2.putText(image, "Orph" , (x1+25,y1), cv2.FONT_HERSHEY_SIMPLEX, .4, (255), 1)
#cv2.ellipse(image,(ax,ay),(dist_x,dist_y),elp_ang,elp_ang,180,255,-1)
#a,b = best_fit(lxs, lys)
#plt.scatter(lxs,lys)
#plt.xlim(0,640)
#plt.ylim(0,480)
#yfit = [a + b * xi for xi in lxs]
#plt.plot(lxs,yfit)
#cv2.imshow('pepe', image)
#cv2.waitKey(1)
#plt.gca().invert_yaxis()
#plt.show()
#for x,y,w,h in points:
# if w > 25 or h > 25:
# cv2.rectangle(image, (x,y), (x+w+5, y+h+5), (255),1)
# else:
# cv2.circle(image, (x,y), 20, (120), 1)
edges = cv2.Canny(image.copy(),thresh_limit,255)
el = filename.split("/");
fn = el[-1]
cv2.putText(current_image, "File Name: " + fn, (10,440), cv2.FONT_HERSHEY_SIMPLEX, .4, (255), 1)
cv2.putText(current_image, str(tag), (10,450), cv2.FONT_HERSHEY_SIMPLEX, .4, (255), 1)
cv2.putText(current_image, "Points: " + str(len(points)), (10,460), cv2.FONT_HERSHEY_SIMPLEX, .4, (255), 1)
cv2.putText(current_image, "Line Groups: " + str(len(final_groups)), (10,470), cv2.FONT_HERSHEY_SIMPLEX, .4, (255), 1)
blend = cv2.addWeighted(image, .2, current_image, .8,0)
np_plt = cv2.imread("/tmp/plot.png")
np_plt = cv2.cvtColor(np_plt, cv2.COLOR_BGR2GRAY)
hh, ww = np_plt.shape
crop = cv2.resize(np_plt, (0,0), fx=1.1, fy=1.1)
crop = crop_center(crop, 640,480)
#blend = cv2.addWeighted(blend, .5, crop, .5,0)
#for x,y in stars:
# cv2.circle(blend, (x,y), 5, (255), 1)
#exit()
return(line_groups, points, clusters)
def regroup_lines(line_segments):
final_group = []
reject_group = []
sangles = []
dist,angle,sx1,sy1,sx2,sy2 = line_segments[0]
for line in line_segments:
dist,angle,x1,y1,x2,y2 = line
s_ang = find_angle(sx1,sy1,x1,y1)
sangles.append(s_ang)
mean_angle = np.median(np.array(sangles))
if len(line_segments ) > 0:
dist,angle,sx1,sy1,sx2,sy2 = line_segments[0]
for line in line_segments:
dist,angle,x1,y1,x2,y2 = line
s_ang = find_angle(sx1,sy1,x1,y1)
if mean_angle - 10 <= s_ang <= mean_angle + 10:
#print("FINAL GROUP:", line, angle, s_ang, mean_angle)
found = 0
for (dd,aa,ax1,ay1,ax2,ay2) in final_group:
if ax1 == x1 and ay1 == y1:
found = 1
if found == 0:
final_group.append((dist,angle,x1,y1,x2,y2))
else:
#print("REJECT GROUP:",line, angle, s_ang, mean_angle)
reject_group.append((dist,angle,x1,y1,x2,y2))
if len(line_segments ) > 0:
sdist,sangle,sx1,sy1,sx2,sy2 = line_segments[0]
for line in line_segments:
dist,angle,x1,y1,x2,y2 = line
s_ang = find_angle(sx1,sy1,x1,y1)
tdist = calc_dist(x1,y1,sx1,sy1)
if sangle - 10 <= angle <= sangle + 10 and tdist < 20:
found = 0
for (dd,aa,ax1,ay1,ax2,ay2) in final_group:
if ax1 == x1 and ay1 == y1:
found = 1
if found == 0:
print("FINAL GROUP:", line, angle, s_ang, mean_angle)
final_group.append((dist,angle,x1,y1,x2,y2))
else:
#print("REJECT GROUP:",line, angle, s_ang, mean_angle)
reject_group.append((dist,angle,x1,y1,x2,y2))
return(final_group, reject_group)
def flatten_line_groups(line_groups):
line_segments = []
for line_group in line_groups:
for line in line_group:
dist,angle,x1,y1,x2,y2 = line
line_segments.append((dist,angle,x1,y1,x2,y2))
return(line_segments)
def log_node(nodes, line, closest):
if len(nodes) == 0:
nodes.append((line,closest))
return(nodes)
def find_line_nodes(line_segments):
nodes = []
seg_list = []
rest = line_segments
for line in line_segments:
#print("LENLINE", len(line))
#print(line)
dist,angle,x1,y1,x2,y2 = line
closest, rest = sort_segs(x1,y1,rest)
#nodes = log_node(nodes, line, closest)
def sort_segs(x,y,seg_dist):
sorted_lines = sorted(seg_dist, key=lambda x: x[0])
#for line in sorted_lines:
# print ("SORTED LINE", line)
closest = []
rest = []
already_found = 0
for line in sorted_lines:
if len(line) == 6:
dist,angle,x1,y1,x2,y2 = line
else:
print("WTF!:", line)
seg_dist = calc_dist(x,y,x1,y1)
if seg_dist != 0 and already_found != 1:
closest.append((dist,angle,x1,y1,x2,y2))
else:
rest.append((dist,angle,x1,y1,x2,y2))
return(closest, rest)
def find_closest_segment(this_line,line_group):
seg_dist = []
dist, angle, x1,y1,x2,y2 = this_line
cx = (x1 + x2) / 2
cy = (y1 + y2) / 2
for line in line_group:
xdist, xangle, xx1,xy1,xx2,xy2 = line
xcx = (xx1 + xx2) / 2
xcy = (xy1 + xy2) / 2
dist = calc_dist(cx,cy,xcx,xcy)
if dist > 0:
seg_dist.append((dist, x1,y1,x2,y2))
sorted_lines = sorted(seg_dist, key=lambda x: x[0])
#for line in sorted_lines:
# print("CLOSEST SEGMENTS:", line)
def find_points_in_crop(crop,x,y,w,h):
print ("cropping")
go = 1
cnt_pts = []
thresh_limit = 250
canvas = np.zeros([480,640], dtype=crop.dtype)
canvas[y:y+h,x:x+w] = crop
for i in range(x,x+w):
for j in range(y,y+w):
if i % 5 == 0:
canvas[0:480,i:i+3] = 0
if j % 5 == 0:
canvas[j:j+3,0:640] = 0
#print ("CROP", crop.shape[0])
#if crop.shape[0] > 25:
#cv2.imshow('pepe', canvas)
#cv2.waitKey(1000)
last_cnts = []
while go == 1:
_, thresh = cv2.threshold(canvas, thresh_limit, 255, cv2.THRESH_BINARY)
(_, cnts, xx) = cv2.findContours(thresh.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
cnt_limit = int((w + h) / 20)
if cnt_limit < 5:
cnt_limit = 5
if cnt_limit > 25:
cnt_limit = 25
#print ("CNTS at thresh:", len(cnts), thresh_limit)
thresh_limit = thresh_limit - 2
if len(cnts) >= cnt_limit:
for (i,c) in enumerate(cnts):
x,y,w,h = cv2.boundingRect(cnts[i])
if w > 1 and h > 1:
cnt_pts.append((x,y,w,h))
if len(last_cnts) >= len(cnt_pts) and len(last_cnts) > cnt_limit:
#cnt_pts = last_cnts
go = 0
if thresh_limit < 5:
cnt_pts = last_cnts
go = 0
if len(cnts) > 70:
go = 0
#print ("CNTS: ", len(cnts))
#print ("LAST CNTS: ", len(last_cnts))
#print ("THRESH LIMIT: ", thresh_limit)
#cv2.imshow('pepe', thresh)
#cv2.waitKey(100)
last_cnts = cnt_pts
return(cnt_pts)
def best_fit(X, Y):
xbar = sum(X)/len(X)
ybar = sum(Y)/len(Y)
n = len(X) # or len(Y)
numer = sum([xi*yi for xi,yi in zip(X, Y)]) - n * xbar * ybar
denum = sum([xi**2 for xi in X]) - n * xbar**2
b = numer / denum
a = ybar - b * xbar
print('best fit line:\ny = {:.2f} + {:.2f}x'.format(a, b))
return a, b
def diff_all(med_stack_all, background, median, before_image, current_image, after_image,filename ):
#before_diff = cv2.absdiff(current_image.astype(current_image.dtype), before_image,)
#after_diff = cv2.absdiff(current_image.astype(current_image.dtype), after_image,)
#before_after_diff = cv2.absdiff(before_image.astype(current_image.dtype), after_image,)
#median_three = np.median(np.array((before_image, after_image, current_image)), axis=0)
median = np.uint8(median)
#median_sum = np.sum(median)
#median_diff = cv2.absdiff(median_three.astype(current_image.dtype), median,)
blur_med = cv2.GaussianBlur(median, (5, 5), 0)
# find bright areas in median and mask them out of the current image
tm = find_best_thresh(blur_med, 30, 1)
_, median_thresh = cv2.threshold(blur_med, tm, 255, cv2.THRESH_BINARY)
tm = find_best_thresh(blur_med, 30, 1)
_, median_thresh = cv2.threshold(blur_med, tm, 255, cv2.THRESH_BINARY)
(_, cnts, xx) = cv2.findContours(median_thresh.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
hit = 0
real_cnts = []
#print ("CNTS: ", len(cnts))
#if len(cnts) < 1000:
#for (i,c) in enumerate(cnts):
#x,y,w,h = cv2.boundingRect(cnts[i])
#if True:
# w = w + 20
# h = h + 20
# #x = x - 20
# y = y - 20
# if x < 0:
# x = 0
#if y < 0:
#y = 0
#if x+w > current_image.shape[1]:
#x = current_image.shape[1]-1
# if y+h > current_image.shape[0]:
# y = current_image.shape[0]-1
# if w > 0 and h > 0:
# mask = current_image[y:y+h, x:x+w]
#cv2.rectangle(current_image, (x,y), (x+w+5, y+h+5), (255),1)
#for xx in range(0, mask.shape[1]):
#for yy in range(0, mask.shape[0]):
# mask[yy,xx] = randint(0,6)
# blur_mask = cv2.GaussianBlur(mask, (5, 5), 0)
#current_image[y:y+h,x:x+w] = blur_mask
#median[y:y+h,x:x+w] =blur_mask
# find the diff between the masked median and the masked current image
blur_cur = cv2.GaussianBlur(current_image, (5, 5), 0)
blur_med = cv2.GaussianBlur(median, (5, 5), 0)
cur_med_diff = cv2.absdiff(blur_cur.astype(blur_cur.dtype), blur_med,)
blend = cv2.addWeighted(current_image, .5, cur_med_diff, .5,0)
#cur_med_diff =- median
return(blend, current_image, filename)
def inspect_image(med_stack_all, background, median, before_image, current_image, after_image, avg_cnt,avg_tot,avg_pts,filename):
rois = []
big_cnts = []
line_groups = []
orphan_lines = []
obj_points = []
stars = []
image_diff = cv2.absdiff(current_image.astype(current_image.dtype), background,)
orig_image = current_image
current_image = image_diff
blend, current_image, filename = diff_all(None, background, median, before_image, current_image, after_image,filename)
points = find_objects2(blend, "Current Median Diff Blend", current_image, filename)
if len(points) > 2:
line_groups, orphan_lines, stars, obj_points, big_cnts = find_objects(0, points)
if len(obj_points) > 2:
line_groups, orphan_lines, stars2, obj_points, big_cnts = find_objects(0, obj_points)
stars = stars + stars2
print ("---FINAL ANALYSIS---")
print ("File: ", filename)
print ("Total Points: ", len(points))
print ("Line Groups: ", len(line_groups))
lg_points = 0
lg = 1
for line in line_groups:
print (" Group " + str(lg) + ": " + str(len(line)))
lg = lg + 1
lg_points = lg_points + len(line)
print ("Total Line Group Points: ", lg_points)
print ("Orphan Lines: ", len(line_groups))
print ("Stars: ", len(stars))
print ("Obj Points: ", len(obj_points))
print ("Big CNTS: ", len(big_cnts))
for x,y,w,h in big_cnts:
cv2.rectangle(blend, (x,y), (x+w+5, y+h+5), (255),1)
#for x,y,w,h in obj_points:
# if w > 25 or h > 25:
# cv2.rectangle(blend, (x,y), (x+w+5, y+h+5), (255),1)
# else:
# cv2.circle(blend, (x,y), 20, (120), 1)
#for x,y,w,h in stars:
# if w > 25 or h > 25:
# cv2.rectangle(blend, (x,y), (x+w+5, y+h+5), (255),1)
# else:
# cv2.circle(blend, (x,y), 5, (120), 1)
return(blend, points, line_groups, stars, obj_points, big_cnts)
def parse_file_date(orig_video_file):
#print(orig_video_file)
if ".mp4" in orig_video_file:
stacked_image_fn = orig_video_file.replace(".mp4", "-stack.jpg")
star_image_fn = orig_video_file.replace(".mp4", "-stars.jpg")
report_fn = orig_video_file.replace(".mp4", "-stack-report.txt")
video_report = orig_video_file.replace(".mp4", "-report.txt")
trim_file = orig_video_file.replace(".mp4", "-trim.mp4")
else:
stacked_image_fn = orig_video_file.replace(".avi", "-stack.jpg")
trim_file = orig_video_file.replace(".avi", "-trim.avi")
star_image_fn = orig_video_file.replace(".avi", "-stars.jpg")
report_fn = orig_video_file.replace(".avi", "-stack-report.txt")
el = orig_video_file.split("/")
file_name = el[-1]
file_name = file_name.replace("_", "-")
file_name = file_name.replace(".", "-")
#print ("FN", file_name)
xyear, xmonth, xday, xhour, xmin, xsec, xcam_num, ftype, xext = file_name.split("-")
cam_num = xcam_num.replace("cam", "")
date_str = xyear + "-" + xmonth + "-" + xday + " " + xhour + ":" + xmin + ":" + xsec
capture_date = date_str
return(capture_date)
def day_or_night(config, capture_date):
obs = ephem.Observer()
obs.pressure = 0
obs.horizon = '-0:34'
obs.lat = config['device_lat']
obs.lon = config['device_lng']
obs.date = capture_date
sun = ephem.Sun()
sun.compute(obs)
(sun_alt, x,y) = str(sun.alt).split(":")
saz = str(sun.az)
(sun_az, x,y) = saz.split(":")
#print ("SUN", sun_alt)
if int(sun_alt) <= -4:
sun_status = "night"
else:
sun_status = "day"
return(sun_status, sun_alt)
def find_closest_cnts(sx,sy,cnts):
close = 0
for (i,c) in enumerate(cnts):
x,y,w,h = cv2.boundingRect(cnts[i])
dist = calc_dist(sx,sy,x,y)
if 5 < dist < 30:
close = close + 1
return(close)
def sort_cnts(cnts, method="left-to-right"):
# initialize the reverse flag and sort index
reverse = False
i = 0
# handle if we need to sort in reverse
if method == "right-to-left" or method == "bottom-to-top":
reverse = True
# handle if we are sorting against the y-coordinate rather than
# the x-coordinate of the bounding box
if method == "top-to-bottom" or method == "bottom-to-top":
i = 1
# construct the list of bounding boxes and sort them from top to
# bottom
boundingBoxes = [cv2.boundingRect(c) for c in cnts]
(cnts, boundingBoxes) = zip(*sorted(zip(cnts, boundingBoxes),
key=lambda b:b[1][i], reverse=reverse))
# return the list of sorted contours and bounding boxes
return (cnts, boundingBoxes)
def find_noisy_cnts(image):
#cv2.imshow('pepe', image)
#cv2.waitKey(1000)
noise = 0
(_, noisy_cnts, xx) = cv2.findContours(image.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
for (i,c) in enumerate(noisy_cnts):
#x,y,w,h = cv2.boundingRect(noisy_cnts[i])
x,y,w,h = cv2.boundingRect(c)
convex = cv2.isContourConvex(c)
peri = cv2.arcLength(c, True)
approx = cv2.approxPolyDP(c, 0.01 * peri, True)
#print ("NOISE:", len(noisy_cnts), int(peri), len(approx), convex)
if w <= 1 and h <= 1:
noise = noise + 1
image[y:y+h,x:x+w] = [0]
if len(approx) > 10 and convex is False :
# this is a cloud or a plane, so mute it out.
#print ("DELETE!")
image[y:y+h,x:x+w] = [0]
if peri > 100 and convex is False :
# this is a cloud or a plane, so mute it out.
ny = y - 10
nx = x - 10
if ny < 0:
ny = 0
if nx < 0:
nx = 0
image[ny:y+h+10,nx:x+w+10] = [0]
noise = noise + 1
#print ("removing big cnt:", x,y,w,h,convex,peri,len(approx))
return(noise, image)
def find_cnts(image):
last_angle = -1000
angle = 0
dist = 0
(_, cnts_by_area, xx) = cv2.findContours(image.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
cnts_by_area = sorted(cnts_by_area, key = cv2.contourArea, reverse = True)[:25]
print ("CNT BY AREA LENGTH: ", len(cnts_by_area))
# Mute out the big non-convex cnts
for (i,c) in enumerate(cnts_by_area):
x,y,w,h = cv2.boundingRect(cnts_by_area[i])
#cv2.rectangle(image, (x,y), (x+w+5, y+h+5), (255),1)
#cv2.imshow('pepe', image)
#cv2.waitKey(1000)
#print ("Mute: ", w,h)
peri = cv2.arcLength(c, True)
approx = cv2.approxPolyDP(c, 0.02 * peri, True)
convex = cv2.isContourConvex(c)
close = find_closest_cnts(x,y,cnts_by_area)
if peri > 100 and convex == False or (close <= 2):
# This is a cloud or plane some other non meteor object
#print ("BAD CNT!!! MUTE IT!", peri, convex)
ny = y - 10
nx = x - 10
if ny < 0:
ny = 0
if nx < 0:
nx = 0
#image[ny:y+h+10,nx:x+w+10] = [0]
color = 255
good = 0
bad = 0
good_angles = 0
(_, cnts_by_loc, xx) = cv2.findContours(image.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
cnts_by_loc = sorted(cnts_by_loc, key = cv2.contourArea, reverse = True)[:25]
if len(cnts_by_loc) > 3:
cnts_by_loc,bb = sort_cnts(cnts_by_loc, method="left-to-right")
good_points = []
for (i,c) in enumerate(cnts_by_loc):
x,y,w,h = cv2.boundingRect(cnts_by_loc[i])
peri = cv2.arcLength(c, True)
approx = cv2.approxPolyDP(c, 0.02 * peri, True)
convex = cv2.isContourConvex(c)
# how many cnts are close to this one?
close = find_closest_cnts(x,y,cnts_by_loc)
if peri > 100 and convex == False:
# This is a cloud or plane some other non meteor object
#image[y-10:y+h+10,x-10:x+w+10] = [0]
good = good - 1
if i > 0:
last_angle = angle
angle = find_angle(x,y,last_x,last_y)
#print ("FIND ANGLE FOR: ", x,y,last_x,last_y, angle, last_angle)
dist = calc_dist(x,y,last_x,last_y)
else:
first_x = x
first_y = y
# if this is the last cnt in the group
if i == len(cnts_by_loc) - 1:
last_angle = angle
angle = find_angle(first_x, first_y, x, y)
dist = calc_dist(first_x, first_y, x, y)
# if this angle agrees with the last angle
if (angle - 10 < last_angle < angle + 10) and (1 < close < 10) and (w > 1 and h > 1):
good_angles = good_angles + 1
cv2.circle(image, (x,y), 20, (255), 1)
good_points.append((x,y,w,h))
if len(approx) > 0 and peri > 0 and w > 1 and h > 1 and close > 0 and close < 10:
print ("Peri:", str(i), x,y,w,h, str(int(peri)), str(len(approx)), str(int(angle)), str(int(last_angle)), str(int(dist)), convex, close)
cv2.putText(image, "PA " + str(i) + " " + str(int(peri)) + " " + str(len(approx)) + " " + str(int(angle)) + " " + str(last_angle) + " " + str(int(dist)) + " " + str(convex) + " " + str(close), (100,10+(i*15)), cv2.FONT_HERSHEY_SIMPLEX, .4, (255), 1)
cv2.putText(image, str(i), (x+10,y+10), cv2.FONT_HERSHEY_SIMPLEX, .4, (255), 1)
if len(approx) < 3 :
color = 50
elif 3 <= len(approx) < 5:
color = 155
elif 5 <= len(approx) < 15:
color = 255
elif len(approx) > 15:
color = 75
if peri > 100:
color = 75
if peri > 100 and convex == False:
# This is a cloud or plane some other non meteor object
#image[y-10:y+h+10,x-10:x+w+10] = [0]
good = good - 1
else:
good = good + 1
rect = cv2.minAreaRect(c)
box = cv2.boxPoints(rect)
box = np.int0(box)
image = cv2.drawContours(image, [box], 0,(color),1)
#if len(approx) >= 5:
# elp = cv2.fitEllipse(c)
# image = cv2.ellipse(image,elp,(0,255,0),2)
#cv2.rectangle(image, (x,y), (x+w+5, y+h+5), (color),1)
last_x = x
last_y = y
# ok we should be left with just 'good cnts'
# lets run some tests on the 'good cnts'
# test to make sure they can form a line or share a similar angle
if len(good_points) >= 3:
line_groups, orphan_lines, stars, obj_points, big_cnts = find_objects(0, good_points)
print("Line Groups:", len(line_groups))
print("Orphan Lines:", len(orphan_lines))
print("Stars:", len(stars))
print("Obj Points:", len(obj_points))
return(good, good_angles, image)
def get_points(image):
points = []
xypoints = []
(_, cnt_points, xx) = cv2.findContours(image.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
if len(cnt_points) > 2:
cnt_points = sorted(cnt_points, key = cv2.contourArea, reverse = True)[:100]
cnts_points,bb = sort_cnts(cnt_points, method="left-to-right")
for (i,c) in enumerate(cnt_points):
x,y,w,h = cv2.boundingRect(cnt_points[i])
if y > 1 or h > 1:
peri = cv2.arcLength(c, True)
approx = cv2.approxPolyDP(c, 0.02 * peri, True)
convex = cv2.isContourConvex(c)
points.append((x,y,w,h,peri,approx,convex))
xypoints.append((x,y))
return(points, xypoints)
def find_orphan_points(points):
#print ("----CLASSIFY POINTS----")
#print ("Total Points: ", len(points))
closest_points = []
no_points = []
o_points = []
point_index = {}
all_points = {}
for (x,y,w,h,peri,approx,convex) in points:
key = str(x) + ":" + str(y)
point_index['x'] = x
point_index['y'] = y
point_index['w'] = w
point_index['h'] = h
point_index['used'] = 0
point_index['peri'] = peri
point_index['approx'] = len(approx)
point_index['convex'] = convex
all_points[key] = point_index
point_index = {}
for (vx,vy,vw,vh,vperi,vapprox,vconvex) in points:
idx = str(vx) + ":" + str(vy)
point_index = all_points[idx]
#print ("PI1: ", idx, point_index)
for (x2,y2,w2,h2,peri2,approx2,convex2) in points:
idx2 = str(x2) + ":" + str(y2)
point_index2 = all_points[idx]
dist = int(calc_dist(vx,vy,x2,y2))
angle = int(find_angle(vx,vy,x2,y2))
if dist > 2 and dist < 50 and point_index['used'] == 0 and w > 1 and h > 1 and w2 > 1 and h2 > 1:
closest_points.append((x2,y2,dist,angle))
#print ("Adding Close Points: ",idx, point_index['used'])
#print ("Close point len", len(closest_points))
#print ("Close Points: ", len(points))
point_index['closest_points'] = sorted(closest_points, key=lambda x: x[2])
# find #1 closest and block it from re-use
point_index['used'] = 1
all_points[idx] = point_index
if len(closest_points) > 0:
cx,cy,cd,ca = point_index['closest_points'][0]
idx2 = str(cx) + ":" + str(cy)
point_index = all_points[idx2]
point_index['used'] = 1
all_points[idx2] = point_index
closest_points = []
print ("POINTS IN THIS IMAGE:", len(all_points))
#for idx, value in all_points.items():
# print ("KEY/VALUE:", idx, value)
return(all_points)
def cluster_size(cluster):
np_cluster = np.array(cluster)
min_x = np.min(np_cluster[:,0])
min_y = np.min(np_cluster[:,1])
max_x = np.max(np_cluster[:,0])
max_y = np.max(np_cluster[:,1])
width = max_x - min_x
height = max_y - min_y
return(width,height)
def diff_stills(sdate, cam_num):
file_list = []
image_thresh = []
med_last_objects = []
last_objects = deque(maxlen=5)
diffed_files = []
config = read_config("conf/config-1.txt")
video_dir = "/mnt/ams2/SD/"
images = []
images_orig = []
images_blend = []
images_info = []
count = 0
last_image = None
last_thresh_sum = 0
hits = 0
avg_cnt = 0
avg_tot = 0
avg_pts = 0
count = 0
cv2.namedWindow('pepe')
glob_dir = video_dir + "proc/" + sdate + "/" + "*cam" + str(cam_num) + "-diff.jpg"
report_file = video_dir + "proc/" + sdate + "/" + sdate + "-cam" + str(cam_num) + "-report.txt"
master_stack_file = video_dir + "proc/" + sdate + "/" + sdate + "-cam" + str(cam_num) + "-master_stack.jpg"
for filename in (glob.glob(glob_dir)):
capture_date = parse_file_date(filename)
sun_status, sun_alt = day_or_night(config, capture_date)
if sun_status != 'day' and int(sun_alt) < -4:
#print("NIGHTTIME", capture_date, filename, sun_status)
file_list.append(filename)
else:
print ("This is a daytime or dusk file", filename)
sorted_list = sorted(file_list)
print ("Loading Images...")
for filename in sorted_list:
open_cv_image = cv2.imread(filename,0)
orig_filename = filename.replace("diff", "stacked")
orig_image = cv2.imread(orig_filename,0)
open_cv_image[440:480, 0:640] = [0]
images_orig.append(orig_image)
images.append(open_cv_image)
images_with_points = 0
count = 0
for image in images:
print ("---------START IMAGE----------")
points,xypoints = get_points(image)
#print ("POINTS:", len(points))
clusters = []
if len(points) > 2:
num_c = int(len(points) / 3)
if num_c < 2:
num_c = 2
if num_c > 10:
num_c = 10
clusters,cluster_d = kmeans_cluster2(xypoints, num_c)
if len(clusters) > 2:
clusters = sorted(clusters, key=lambda x: x[0])
#print ("CLUSTERS: ", clusters)
# first lets get rid of anything where there is only 1 cluster and the
# the cluster's max size is < 25x25 px
if len(clusters) == 1:
cw,ch = cluster_size(clusters[0])
if cw < 25 or ch < 25:
print ("Cluster rejected too small.", cw, ch)
clusters = []
else :
print ("Cluster w,h.", cw, ch)
# next lets get rid of anything that has less than 4 points and 2 clusters
if len(points) <= 4 and len(clusters) <= 2:
print ("Clusters rejected not enough points.", len(points))
clusters = []
cn = 0
for cluster in clusters:
np_cluster = np.array(cluster)
min_x = np.min(np_cluster[:,0])
min_y = np.min(np_cluster[:,1])
max_x = np.max(np_cluster[:,0])
max_y = np.max(np_cluster[:,1])
par = np.polyfit(np_cluster[:,0], np_cluster[:,1], 1, full=True)
slope = par[0][0]
intercept = par[0][1]
x1 = [min(np_cluster[:,0]), max(np_cluster[:,0])]
y1 = [int(slope*xx + intercept) for xx in x1]
cw,ch = cluster_size(cluster)
print ("Slope, Intercept", slope, intercept)
print ("Cluster W,H", cw, ch)
cv2.line(image, (x1[0],y1[0]), (x1[1],y1[1]), (200), 1)
cv2.putText(image, "Cluster " + str(cn), (min_x,min_y), cv2.FONT_HERSHEY_SIMPLEX, .4, (255), 1)
cv2.rectangle(image, (min_x,min_y), (max_x, max_y), (255),1)
cn = cn + 1
if len(points) > 2:
images_with_points = images_with_points + 1
all_points = find_orphan_points(points)
last_angle = None
cv2.putText(image, "Total Points:" + str(len(points)), (25,440), cv2.FONT_HERSHEY_SIMPLEX, .4, (255), 1)
cv2.putText(image, "Total Clusters:" + str(len(clusters)), (25,460), cv2.FONT_HERSHEY_SIMPLEX, .4, (255), 1)
if len(points) > 2:
count = 0
gcount = 0
for idx, value in all_points.items():
good_angles = 0
cpc = 0
x = value['x']
y = value['y']
w = value['w']
h = value['h']
#if len(value['closest_points']) > 1:
# cv2.circle(image, (x+ int(w/2),y + int(h/2)), 7, (100), 1)
#print ("CLOSEST POINTS: ", value['closest_points'])
if len(value['closest_points']) > 0:
(xx,yy,dd,aa) = value['closest_points'][0]
#cv2.putText(image, "Point:" + str(value['x']) + "," + str(value['y']) + " " + str(xx) + "," + str(yy) + " " + str(dd) + " " + str(aa), (360,460 - (gcount*20)), cv2.FONT_HERSHEY_SIMPLEX, .4, (255), 1)
# cv2.circle(image, (x+ int(w/2),y + int(h/2)), 15, (255), 1)
gcount = gcount + 1
if last_angle is None:
last_angle = -1000
if (aa - 5 < last_angle < aa + 5) and dd <= 55:
#print ("ANG:", last_angle, aa, dd)
#cv2.circle(image, (xx,yy), 20, (200), 1)
cv2.line(image, (value['x'],value['y']), (xx,yy), (200), 1)
good_angles = good_angles + 1
last_angle = aa
#else:
# print ("There are no close points to this one this point.")
count = count + 1
if good_angles >= 3:
print ("This is good.", good_angles)
print ("---------END IMAGE ----------")
cv2.imshow('pepe', image)
if len(clusters) >= 1:
while(1):
k = cv2.waitKey(33)
if k == 32:
break
if k == 27:
exit()
else:
cv2.waitKey(1)
count = count + 1
print ("")
print ("Total Images Analyzed:", len(images))
print ("Total Images With Points:", images_with_points)
sdate = sys.argv[1]
cam_num = sys.argv[2]
diff_stills(sdate, cam_num)
| gpl-3.0 |
unnikrishnankgs/va | venv/lib/python3.5/site-packages/matplotlib/patheffects.py | 10 | 14296 | """
Defines classes for path effects. The path effects are supported in
:class:`~matplotlib.text.Text`, :class:`~matplotlib.lines.Line2D`
and :class:`~matplotlib.patches.Patch`.
"""
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import six
from matplotlib.backend_bases import RendererBase
from matplotlib import (
colors as mcolors, patches as mpatches, transforms as mtransforms)
class AbstractPathEffect(object):
"""
A base class for path effects.
Subclasses should override the ``draw_path`` method to add effect
functionality.
"""
def __init__(self, offset=(0., 0.)):
"""
Parameters
----------
offset : pair of floats
The offset to apply to the path, measured in points.
"""
self._offset = offset
self._offset_trans = mtransforms.Affine2D()
def _offset_transform(self, renderer, transform):
"""Apply the offset to the given transform."""
offset_x = renderer.points_to_pixels(self._offset[0])
offset_y = renderer.points_to_pixels(self._offset[1])
return transform + self._offset_trans.clear().translate(offset_x,
offset_y)
def _update_gc(self, gc, new_gc_dict):
"""
Update the given GraphicsCollection with the given
dictionary of properties. The keys in the dictionary are used to
identify the appropriate set_ method on the gc.
"""
new_gc_dict = new_gc_dict.copy()
dashes = new_gc_dict.pop("dashes", None)
if dashes:
gc.set_dashes(**dashes)
for k, v in six.iteritems(new_gc_dict):
set_method = getattr(gc, 'set_' + k, None)
if set_method is None or not six.callable(set_method):
raise AttributeError('Unknown property {0}'.format(k))
set_method(v)
return gc
def draw_path(self, renderer, gc, tpath, affine, rgbFace=None):
"""
Derived should override this method. The arguments are the same
as :meth:`matplotlib.backend_bases.RendererBase.draw_path`
except the first argument is a renderer.
"""
# Get the real renderer, not a PathEffectRenderer.
if isinstance(renderer, PathEffectRenderer):
renderer = renderer._renderer
return renderer.draw_path(gc, tpath, affine, rgbFace)
class PathEffectRenderer(RendererBase):
"""
Implements a Renderer which contains another renderer.
This proxy then intercepts draw calls, calling the appropriate
:class:`AbstractPathEffect` draw method.
.. note::
Not all methods have been overridden on this RendererBase subclass.
It may be necessary to add further methods to extend the PathEffects
capabilities further.
"""
def __init__(self, path_effects, renderer):
"""
Parameters
----------
path_effects : iterable of :class:`AbstractPathEffect`
The path effects which this renderer represents.
renderer : :class:`matplotlib.backend_bases.RendererBase` instance
"""
self._path_effects = path_effects
self._renderer = renderer
def new_gc(self):
return self._renderer.new_gc()
def copy_with_path_effect(self, path_effects):
return self.__class__(path_effects, self._renderer)
def draw_path(self, gc, tpath, affine, rgbFace=None):
for path_effect in self._path_effects:
path_effect.draw_path(self._renderer, gc, tpath, affine,
rgbFace)
def draw_markers(self, gc, marker_path, marker_trans, path, *args,
**kwargs):
# We do a little shimmy so that all markers are drawn for each path
# effect in turn. Essentially, we induce recursion (depth 1) which is
# terminated once we have just a single path effect to work with.
if len(self._path_effects) == 1:
# Call the base path effect function - this uses the unoptimised
# approach of calling "draw_path" multiple times.
return RendererBase.draw_markers(self, gc, marker_path,
marker_trans, path, *args,
**kwargs)
for path_effect in self._path_effects:
renderer = self.copy_with_path_effect([path_effect])
# Recursively call this method, only next time we will only have
# one path effect.
renderer.draw_markers(gc, marker_path, marker_trans, path,
*args, **kwargs)
def draw_path_collection(self, gc, master_transform, paths, *args,
**kwargs):
# We do a little shimmy so that all paths are drawn for each path
# effect in turn. Essentially, we induce recursion (depth 1) which is
# terminated once we have just a single path effect to work with.
if len(self._path_effects) == 1:
# Call the base path effect function - this uses the unoptimised
# approach of calling "draw_path" multiple times.
return RendererBase.draw_path_collection(self, gc,
master_transform, paths,
*args, **kwargs)
for path_effect in self._path_effects:
renderer = self.copy_with_path_effect([path_effect])
# Recursively call this method, only next time we will only have
# one path effect.
renderer.draw_path_collection(gc, master_transform, paths,
*args, **kwargs)
def points_to_pixels(self, points):
return self._renderer.points_to_pixels(points)
def _draw_text_as_path(self, gc, x, y, s, prop, angle, ismath):
# Implements the naive text drawing as is found in RendererBase.
path, transform = self._get_text_path_transform(x, y, s, prop,
angle, ismath)
color = gc.get_rgb()
gc.set_linewidth(0.0)
self.draw_path(gc, path, transform, rgbFace=color)
def __getattribute__(self, name):
if name in ['_text2path', 'flipy', 'height', 'width']:
return getattr(self._renderer, name)
else:
return object.__getattribute__(self, name)
class Normal(AbstractPathEffect):
"""
The "identity" PathEffect.
The Normal PathEffect's sole purpose is to draw the original artist with
no special path effect.
"""
pass
class Stroke(AbstractPathEffect):
"""A line based PathEffect which re-draws a stroke."""
def __init__(self, offset=(0, 0), **kwargs):
"""
The path will be stroked with its gc updated with the given
keyword arguments, i.e., the keyword arguments should be valid
gc parameter values.
"""
super(Stroke, self).__init__(offset)
self._gc = kwargs
def draw_path(self, renderer, gc, tpath, affine, rgbFace):
"""
draw the path with updated gc.
"""
# Do not modify the input! Use copy instead.
gc0 = renderer.new_gc()
gc0.copy_properties(gc)
gc0 = self._update_gc(gc0, self._gc)
trans = self._offset_transform(renderer, affine)
renderer.draw_path(gc0, tpath, trans, rgbFace)
gc0.restore()
class withStroke(Stroke):
"""
Adds a simple :class:`Stroke` and then draws the
original Artist to avoid needing to call :class:`Normal`.
"""
def draw_path(self, renderer, gc, tpath, affine, rgbFace):
Stroke.draw_path(self, renderer, gc, tpath, affine, rgbFace)
renderer.draw_path(gc, tpath, affine, rgbFace)
class SimplePatchShadow(AbstractPathEffect):
"""A simple shadow via a filled patch."""
def __init__(self, offset=(2, -2),
shadow_rgbFace=None, alpha=None,
rho=0.3, **kwargs):
"""
Parameters
----------
offset : pair of floats
The offset of the shadow in points.
shadow_rgbFace : color
The shadow color.
alpha : float
The alpha transparency of the created shadow patch.
Default is 0.3.
http://matplotlib.1069221.n5.nabble.com/path-effects-question-td27630.html
rho : float
A scale factor to apply to the rgbFace color if `shadow_rgbFace`
is not specified. Default is 0.3.
**kwargs
Extra keywords are stored and passed through to
:meth:`AbstractPathEffect._update_gc`.
"""
super(SimplePatchShadow, self).__init__(offset)
if shadow_rgbFace is None:
self._shadow_rgbFace = shadow_rgbFace
else:
self._shadow_rgbFace = mcolors.to_rgba(shadow_rgbFace)
if alpha is None:
alpha = 0.3
self._alpha = alpha
self._rho = rho
#: The dictionary of keywords to update the graphics collection with.
self._gc = kwargs
#: The offset transform object. The offset isn't calculated yet
#: as we don't know how big the figure will be in pixels.
self._offset_tran = mtransforms.Affine2D()
def draw_path(self, renderer, gc, tpath, affine, rgbFace):
"""
Overrides the standard draw_path to add the shadow offset and
necessary color changes for the shadow.
"""
# IMPORTANT: Do not modify the input - we copy everything instead.
affine0 = self._offset_transform(renderer, affine)
gc0 = renderer.new_gc()
gc0.copy_properties(gc)
if self._shadow_rgbFace is None:
r,g,b = (rgbFace or (1., 1., 1.))[:3]
# Scale the colors by a factor to improve the shadow effect.
shadow_rgbFace = (r * self._rho, g * self._rho, b * self._rho)
else:
shadow_rgbFace = self._shadow_rgbFace
gc0.set_foreground("none")
gc0.set_alpha(self._alpha)
gc0.set_linewidth(0)
gc0 = self._update_gc(gc0, self._gc)
renderer.draw_path(gc0, tpath, affine0, shadow_rgbFace)
gc0.restore()
class withSimplePatchShadow(SimplePatchShadow):
"""
Adds a simple :class:`SimplePatchShadow` and then draws the
original Artist to avoid needing to call :class:`Normal`.
"""
def draw_path(self, renderer, gc, tpath, affine, rgbFace):
SimplePatchShadow.draw_path(self, renderer, gc, tpath, affine, rgbFace)
renderer.draw_path(gc, tpath, affine, rgbFace)
class SimpleLineShadow(AbstractPathEffect):
"""A simple shadow via a line."""
def __init__(self, offset=(2,-2),
shadow_color='k', alpha=0.3, rho=0.3, **kwargs):
"""
Parameters
----------
offset : pair of floats
The offset to apply to the path, in points.
shadow_color : color
The shadow color. Default is black.
A value of ``None`` takes the original artist's color
with a scale factor of `rho`.
alpha : float
The alpha transparency of the created shadow patch.
Default is 0.3.
rho : float
A scale factor to apply to the rgbFace color if `shadow_rgbFace`
is ``None``. Default is 0.3.
**kwargs
Extra keywords are stored and passed through to
:meth:`AbstractPathEffect._update_gc`.
"""
super(SimpleLineShadow, self).__init__(offset)
if shadow_color is None:
self._shadow_color = shadow_color
else:
self._shadow_color = mcolors.to_rgba(shadow_color)
self._alpha = alpha
self._rho = rho
#: The dictionary of keywords to update the graphics collection with.
self._gc = kwargs
#: The offset transform object. The offset isn't calculated yet
#: as we don't know how big the figure will be in pixels.
self._offset_tran = mtransforms.Affine2D()
def draw_path(self, renderer, gc, tpath, affine, rgbFace):
"""
Overrides the standard draw_path to add the shadow offset and
necessary color changes for the shadow.
"""
# IMPORTANT: Do not modify the input - we copy everything instead.
affine0 = self._offset_transform(renderer, affine)
gc0 = renderer.new_gc()
gc0.copy_properties(gc)
if self._shadow_color is None:
r,g,b = (gc0.get_foreground() or (1., 1., 1.))[:3]
# Scale the colors by a factor to improve the shadow effect.
shadow_rgbFace = (r * self._rho, g * self._rho, b * self._rho)
else:
shadow_rgbFace = self._shadow_color
fill_color = None
gc0.set_foreground(shadow_rgbFace)
gc0.set_alpha(self._alpha)
gc0.set_linestyle("solid")
gc0 = self._update_gc(gc0, self._gc)
renderer.draw_path(gc0, tpath, affine0, fill_color)
gc0.restore()
class PathPatchEffect(AbstractPathEffect):
"""
Draws a :class:`~matplotlib.patches.PathPatch` instance whose Path
comes from the original PathEffect artist.
"""
def __init__(self, offset=(0, 0), **kwargs):
"""
Parameters
----------
offset : pair of floats
The offset to apply to the path, in points.
**kwargs :
All keyword arguments are passed through to the
:class:`~matplotlib.patches.PathPatch` constructor. The
properties which cannot be overridden are "path", "clip_box"
"transform" and "clip_path".
"""
super(PathPatchEffect, self).__init__(offset=offset)
self.patch = mpatches.PathPatch([], **kwargs)
def draw_path(self, renderer, gc, tpath, affine, rgbFace):
affine = self._offset_transform(renderer, affine)
self.patch._path = tpath
self.patch.set_transform(affine)
self.patch.set_clip_box(gc._cliprect)
self.patch.set_clip_path(gc._clippath)
self.patch.draw(renderer)
| bsd-2-clause |
bundgus/python-playground | matplotlib-playground/examples/pylab_examples/plotfile_demo.py | 1 | 1195 | import matplotlib.pyplot as plt
import numpy as np
import matplotlib.cbook as cbook
fname = cbook.get_sample_data('msft.csv', asfileobj=False)
fname2 = cbook.get_sample_data('data_x_x2_x3.csv', asfileobj=False)
# test 1; use ints
plt.plotfile(fname, (0, 5, 6))
# test 2; use names
plt.plotfile(fname, ('date', 'volume', 'adj_close'))
# test 3; use semilogy for volume
plt.plotfile(fname, ('date', 'volume', 'adj_close'),
plotfuncs={'volume': 'semilogy'})
# test 4; use semilogy for volume
plt.plotfile(fname, (0, 5, 6), plotfuncs={5: 'semilogy'})
# test 5; single subplot
plt.plotfile(fname, ('date', 'open', 'high', 'low', 'close'), subplots=False)
# test 6; labeling, if no names in csv-file
plt.plotfile(fname2, cols=(0, 1, 2), delimiter=' ',
names=['$x$', '$f(x)=x^2$', '$f(x)=x^3$'])
# test 7; more than one file per figure--illustrated here with a single file
plt.plotfile(fname2, cols=(0, 1), delimiter=' ')
plt.plotfile(fname2, cols=(0, 2), newfig=False,
delimiter=' ') # use current figure
plt.xlabel(r'$x$')
plt.ylabel(r'$f(x) = x^2, x^3$')
# test 8; use bar for volume
plt.plotfile(fname, (0, 5, 6), plotfuncs={5: 'bar'})
plt.show()
| mit |
jungla/ICOM-fluidity-toolbox | Detectors/plot_FSLE_v.py | 1 | 2388 | #!~/python
import fluidity_tools
import matplotlib as mpl
mpl.use('ps')
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
import myfun
import numpy as np
import pyvtk
import vtktools
import copy
import os
exp = 'r_3k_B_1F0_r'
filename = './ring_checkpoint.detectors'
filename2 = '/tamay2/mensa/fluidity/'+exp+'/ring_30.pvtu'
data = vtktools.vtu(filename2)
coords = data.GetLocations()
depths = sorted(list(set(coords[:,2])))
Xlist = np.arange(-100000,100000+10000,10000)# x co-ordinates of the desired array shape
Ylist = np.arange(0,1)*0.0
Zlist = np.arange(-10,-900,-10)# y co-ordinates of the desired array shape
[X,Y,Z] = myfun.meshgrid2(Xlist,Ylist,Zlist)
Y = np.reshape(Y,(np.size(Y),))
X = np.reshape(X,(np.size(X),))
Z = np.reshape(Z,(np.size(Z),))
pts = zip(X,Y,Z)
pts = vtktools.arr(pts)
R = data.ProbeData(pts, 'Density_CG')
rho = np.reshape(R,[len(Zlist),len(Ylist),len(Xlist)])
try: os.stat('./plot/'+exp)
except OSError: os.mkdir('./plot/'+exp)
print 'reading detectors'
det = fluidity_tools.stat_parser(filename)
keys = det.keys() # particles
print 'done.'
tt = 1200
pt = 5896
step = 1
z = range(-10,-890,-10)
x = range(-100000,100000,3000)
y = 0.0
par = np.zeros((pt,3,tt))
time = range(1800,1800*(tt+1),1800)
# read particles
for d in range(pt):
temp = det['particles_'+myfun.digit(d+1,4)]['position']
par[d,:,:] = temp[:,0:tt]
#fsle param
di = 10 # base separation distance [m]. Taken as the distance between the particles in the triplet.
# read T from archive
for r in np.linspace(1,3):
#print 'plotting for dr:',r*di
fsle = np.zeros(pt)*np.nan
df = 11.0 #r*di # separation distance
#
# loop triplets in time
#
#
for t in range(tt):
for d in range(0,pt-len(x)):
# loop particles
if par[d,2,t] < 0.0 and par[d+len(x),2,t] < 0.0:
dr = np.linalg.norm(par[d,2,t]-par[d+len(x),2,t])
# if dr > 15.0: print dr,d,t
if (dr > df and np.isnan(fsle[d])):
fsle[d] = np.log(dr/di)/time[t]
min_fsle = np.percentile(fsle,0.1)
max_fsle = 0.0000005 #np.percentile(fsle,99)
fsler = np.reshape(fsle,(len(z),len(x)))
#
plt.figure()
v = np.linspace(1e-7,1e-6, 25, endpoint=True)
plt.contourf(x,z,fsler,v,extend='both',cmap='jet')
plt.colorbar(format='%.3e')
plt.contour(Xlist,Zlist,np.squeeze(rho),20,colors=[0.5,0.5,0.5])
plt.savefig('./plot/'+exp+'/fsle_'+exp+'_'+str(df)+'.eps',bbox_inches='tight')
plt.close()
| gpl-2.0 |
abhisg/scikit-learn | benchmarks/bench_isotonic.py | 268 | 3046 | """
Benchmarks of isotonic regression performance.
We generate a synthetic dataset of size 10^n, for n in [min, max], and
examine the time taken to run isotonic regression over the dataset.
The timings are then output to stdout, or visualized on a log-log scale
with matplotlib.
This alows the scaling of the algorithm with the problem size to be
visualized and understood.
"""
from __future__ import print_function
import numpy as np
import gc
from datetime import datetime
from sklearn.isotonic import isotonic_regression
from sklearn.utils.bench import total_seconds
import matplotlib.pyplot as plt
import argparse
def generate_perturbed_logarithm_dataset(size):
return np.random.randint(-50, 50, size=n) \
+ 50. * np.log(1 + np.arange(n))
def generate_logistic_dataset(size):
X = np.sort(np.random.normal(size=size))
return np.random.random(size=size) < 1.0 / (1.0 + np.exp(-X))
DATASET_GENERATORS = {
'perturbed_logarithm': generate_perturbed_logarithm_dataset,
'logistic': generate_logistic_dataset
}
def bench_isotonic_regression(Y):
"""
Runs a single iteration of isotonic regression on the input data,
and reports the total time taken (in seconds).
"""
gc.collect()
tstart = datetime.now()
isotonic_regression(Y)
delta = datetime.now() - tstart
return total_seconds(delta)
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description="Isotonic Regression benchmark tool")
parser.add_argument('--iterations', type=int, required=True,
help="Number of iterations to average timings over "
"for each problem size")
parser.add_argument('--log_min_problem_size', type=int, required=True,
help="Base 10 logarithm of the minimum problem size")
parser.add_argument('--log_max_problem_size', type=int, required=True,
help="Base 10 logarithm of the maximum problem size")
parser.add_argument('--show_plot', action='store_true',
help="Plot timing output with matplotlib")
parser.add_argument('--dataset', choices=DATASET_GENERATORS.keys(),
required=True)
args = parser.parse_args()
timings = []
for exponent in range(args.log_min_problem_size,
args.log_max_problem_size):
n = 10 ** exponent
Y = DATASET_GENERATORS[args.dataset](n)
time_per_iteration = \
[bench_isotonic_regression(Y) for i in range(args.iterations)]
timing = (n, np.mean(time_per_iteration))
timings.append(timing)
# If we're not plotting, dump the timing to stdout
if not args.show_plot:
print(n, np.mean(time_per_iteration))
if args.show_plot:
plt.plot(*zip(*timings))
plt.title("Average time taken running isotonic regression")
plt.xlabel('Number of observations')
plt.ylabel('Time (s)')
plt.axis('tight')
plt.loglog()
plt.show()
| bsd-3-clause |
dssg/wikienergy | disaggregator/build/pandas/doc/sphinxext/numpydoc/tests/test_docscrape.py | 39 | 18326 | # -*- encoding:utf-8 -*-
from __future__ import division, absolute_import, print_function
import sys, textwrap
from numpydoc.docscrape import NumpyDocString, FunctionDoc, ClassDoc
from numpydoc.docscrape_sphinx import SphinxDocString, SphinxClassDoc
from nose.tools import *
if sys.version_info[0] >= 3:
sixu = lambda s: s
else:
sixu = lambda s: unicode(s, 'unicode_escape')
doc_txt = '''\
numpy.multivariate_normal(mean, cov, shape=None, spam=None)
Draw values from a multivariate normal distribution with specified
mean and covariance.
The multivariate normal or Gaussian distribution is a generalisation
of the one-dimensional normal distribution to higher dimensions.
Parameters
----------
mean : (N,) ndarray
Mean of the N-dimensional distribution.
.. math::
(1+2+3)/3
cov : (N, N) ndarray
Covariance matrix of the distribution.
shape : tuple of ints
Given a shape of, for example, (m,n,k), m*n*k samples are
generated, and packed in an m-by-n-by-k arrangement. Because
each sample is N-dimensional, the output shape is (m,n,k,N).
Returns
-------
out : ndarray
The drawn samples, arranged according to `shape`. If the
shape given is (m,n,...), then the shape of `out` is is
(m,n,...,N).
In other words, each entry ``out[i,j,...,:]`` is an N-dimensional
value drawn from the distribution.
list of str
This is not a real return value. It exists to test
anonymous return values.
Other Parameters
----------------
spam : parrot
A parrot off its mortal coil.
Raises
------
RuntimeError
Some error
Warns
-----
RuntimeWarning
Some warning
Warnings
--------
Certain warnings apply.
Notes
-----
Instead of specifying the full covariance matrix, popular
approximations include:
- Spherical covariance (`cov` is a multiple of the identity matrix)
- Diagonal covariance (`cov` has non-negative elements only on the diagonal)
This geometrical property can be seen in two dimensions by plotting
generated data-points:
>>> mean = [0,0]
>>> cov = [[1,0],[0,100]] # diagonal covariance, points lie on x or y-axis
>>> x,y = multivariate_normal(mean,cov,5000).T
>>> plt.plot(x,y,'x'); plt.axis('equal'); plt.show()
Note that the covariance matrix must be symmetric and non-negative
definite.
References
----------
.. [1] A. Papoulis, "Probability, Random Variables, and Stochastic
Processes," 3rd ed., McGraw-Hill Companies, 1991
.. [2] R.O. Duda, P.E. Hart, and D.G. Stork, "Pattern Classification,"
2nd ed., Wiley, 2001.
See Also
--------
some, other, funcs
otherfunc : relationship
Examples
--------
>>> mean = (1,2)
>>> cov = [[1,0],[1,0]]
>>> x = multivariate_normal(mean,cov,(3,3))
>>> print x.shape
(3, 3, 2)
The following is probably true, given that 0.6 is roughly twice the
standard deviation:
>>> print list( (x[0,0,:] - mean) < 0.6 )
[True, True]
.. index:: random
:refguide: random;distributions, random;gauss
'''
doc = NumpyDocString(doc_txt)
def test_signature():
assert doc['Signature'].startswith('numpy.multivariate_normal(')
assert doc['Signature'].endswith('spam=None)')
def test_summary():
assert doc['Summary'][0].startswith('Draw values')
assert doc['Summary'][-1].endswith('covariance.')
def test_extended_summary():
assert doc['Extended Summary'][0].startswith('The multivariate normal')
def test_parameters():
assert_equal(len(doc['Parameters']), 3)
assert_equal([n for n,_,_ in doc['Parameters']], ['mean','cov','shape'])
arg, arg_type, desc = doc['Parameters'][1]
assert_equal(arg_type, '(N, N) ndarray')
assert desc[0].startswith('Covariance matrix')
assert doc['Parameters'][0][-1][-2] == ' (1+2+3)/3'
def test_other_parameters():
assert_equal(len(doc['Other Parameters']), 1)
assert_equal([n for n,_,_ in doc['Other Parameters']], ['spam'])
arg, arg_type, desc = doc['Other Parameters'][0]
assert_equal(arg_type, 'parrot')
assert desc[0].startswith('A parrot off its mortal coil')
def test_returns():
assert_equal(len(doc['Returns']), 2)
arg, arg_type, desc = doc['Returns'][0]
assert_equal(arg, 'out')
assert_equal(arg_type, 'ndarray')
assert desc[0].startswith('The drawn samples')
assert desc[-1].endswith('distribution.')
arg, arg_type, desc = doc['Returns'][1]
assert_equal(arg, 'list of str')
assert_equal(arg_type, '')
assert desc[0].startswith('This is not a real')
assert desc[-1].endswith('anonymous return values.')
def test_notes():
assert doc['Notes'][0].startswith('Instead')
assert doc['Notes'][-1].endswith('definite.')
assert_equal(len(doc['Notes']), 17)
def test_references():
assert doc['References'][0].startswith('..')
assert doc['References'][-1].endswith('2001.')
def test_examples():
assert doc['Examples'][0].startswith('>>>')
assert doc['Examples'][-1].endswith('True]')
def test_index():
assert_equal(doc['index']['default'], 'random')
assert_equal(len(doc['index']), 2)
assert_equal(len(doc['index']['refguide']), 2)
def non_blank_line_by_line_compare(a,b):
a = textwrap.dedent(a)
b = textwrap.dedent(b)
a = [l.rstrip() for l in a.split('\n') if l.strip()]
b = [l.rstrip() for l in b.split('\n') if l.strip()]
for n,line in enumerate(a):
if not line == b[n]:
raise AssertionError("Lines %s of a and b differ: "
"\n>>> %s\n<<< %s\n" %
(n,line,b[n]))
def test_str():
non_blank_line_by_line_compare(str(doc),
"""numpy.multivariate_normal(mean, cov, shape=None, spam=None)
Draw values from a multivariate normal distribution with specified
mean and covariance.
The multivariate normal or Gaussian distribution is a generalisation
of the one-dimensional normal distribution to higher dimensions.
Parameters
----------
mean : (N,) ndarray
Mean of the N-dimensional distribution.
.. math::
(1+2+3)/3
cov : (N, N) ndarray
Covariance matrix of the distribution.
shape : tuple of ints
Given a shape of, for example, (m,n,k), m*n*k samples are
generated, and packed in an m-by-n-by-k arrangement. Because
each sample is N-dimensional, the output shape is (m,n,k,N).
Returns
-------
out : ndarray
The drawn samples, arranged according to `shape`. If the
shape given is (m,n,...), then the shape of `out` is is
(m,n,...,N).
In other words, each entry ``out[i,j,...,:]`` is an N-dimensional
value drawn from the distribution.
list of str
This is not a real return value. It exists to test
anonymous return values.
Other Parameters
----------------
spam : parrot
A parrot off its mortal coil.
Raises
------
RuntimeError
Some error
Warns
-----
RuntimeWarning
Some warning
Warnings
--------
Certain warnings apply.
See Also
--------
`some`_, `other`_, `funcs`_
`otherfunc`_
relationship
Notes
-----
Instead of specifying the full covariance matrix, popular
approximations include:
- Spherical covariance (`cov` is a multiple of the identity matrix)
- Diagonal covariance (`cov` has non-negative elements only on the diagonal)
This geometrical property can be seen in two dimensions by plotting
generated data-points:
>>> mean = [0,0]
>>> cov = [[1,0],[0,100]] # diagonal covariance, points lie on x or y-axis
>>> x,y = multivariate_normal(mean,cov,5000).T
>>> plt.plot(x,y,'x'); plt.axis('equal'); plt.show()
Note that the covariance matrix must be symmetric and non-negative
definite.
References
----------
.. [1] A. Papoulis, "Probability, Random Variables, and Stochastic
Processes," 3rd ed., McGraw-Hill Companies, 1991
.. [2] R.O. Duda, P.E. Hart, and D.G. Stork, "Pattern Classification,"
2nd ed., Wiley, 2001.
Examples
--------
>>> mean = (1,2)
>>> cov = [[1,0],[1,0]]
>>> x = multivariate_normal(mean,cov,(3,3))
>>> print x.shape
(3, 3, 2)
The following is probably true, given that 0.6 is roughly twice the
standard deviation:
>>> print list( (x[0,0,:] - mean) < 0.6 )
[True, True]
.. index:: random
:refguide: random;distributions, random;gauss""")
def test_sphinx_str():
sphinx_doc = SphinxDocString(doc_txt)
non_blank_line_by_line_compare(str(sphinx_doc),
"""
.. index:: random
single: random;distributions, random;gauss
Draw values from a multivariate normal distribution with specified
mean and covariance.
The multivariate normal or Gaussian distribution is a generalisation
of the one-dimensional normal distribution to higher dimensions.
:Parameters:
**mean** : (N,) ndarray
Mean of the N-dimensional distribution.
.. math::
(1+2+3)/3
**cov** : (N, N) ndarray
Covariance matrix of the distribution.
**shape** : tuple of ints
Given a shape of, for example, (m,n,k), m*n*k samples are
generated, and packed in an m-by-n-by-k arrangement. Because
each sample is N-dimensional, the output shape is (m,n,k,N).
:Returns:
**out** : ndarray
The drawn samples, arranged according to `shape`. If the
shape given is (m,n,...), then the shape of `out` is is
(m,n,...,N).
In other words, each entry ``out[i,j,...,:]`` is an N-dimensional
value drawn from the distribution.
list of str
This is not a real return value. It exists to test
anonymous return values.
:Other Parameters:
**spam** : parrot
A parrot off its mortal coil.
:Raises:
**RuntimeError**
Some error
:Warns:
**RuntimeWarning**
Some warning
.. warning::
Certain warnings apply.
.. seealso::
:obj:`some`, :obj:`other`, :obj:`funcs`
:obj:`otherfunc`
relationship
.. rubric:: Notes
Instead of specifying the full covariance matrix, popular
approximations include:
- Spherical covariance (`cov` is a multiple of the identity matrix)
- Diagonal covariance (`cov` has non-negative elements only on the diagonal)
This geometrical property can be seen in two dimensions by plotting
generated data-points:
>>> mean = [0,0]
>>> cov = [[1,0],[0,100]] # diagonal covariance, points lie on x or y-axis
>>> x,y = multivariate_normal(mean,cov,5000).T
>>> plt.plot(x,y,'x'); plt.axis('equal'); plt.show()
Note that the covariance matrix must be symmetric and non-negative
definite.
.. rubric:: References
.. [1] A. Papoulis, "Probability, Random Variables, and Stochastic
Processes," 3rd ed., McGraw-Hill Companies, 1991
.. [2] R.O. Duda, P.E. Hart, and D.G. Stork, "Pattern Classification,"
2nd ed., Wiley, 2001.
.. only:: latex
[1]_, [2]_
.. rubric:: Examples
>>> mean = (1,2)
>>> cov = [[1,0],[1,0]]
>>> x = multivariate_normal(mean,cov,(3,3))
>>> print x.shape
(3, 3, 2)
The following is probably true, given that 0.6 is roughly twice the
standard deviation:
>>> print list( (x[0,0,:] - mean) < 0.6 )
[True, True]
""")
doc2 = NumpyDocString("""
Returns array of indices of the maximum values of along the given axis.
Parameters
----------
a : {array_like}
Array to look in.
axis : {None, integer}
If None, the index is into the flattened array, otherwise along
the specified axis""")
def test_parameters_without_extended_description():
assert_equal(len(doc2['Parameters']), 2)
doc3 = NumpyDocString("""
my_signature(*params, **kwds)
Return this and that.
""")
def test_escape_stars():
signature = str(doc3).split('\n')[0]
assert_equal(signature, 'my_signature(\*params, \*\*kwds)')
doc4 = NumpyDocString(
"""a.conj()
Return an array with all complex-valued elements conjugated.""")
def test_empty_extended_summary():
assert_equal(doc4['Extended Summary'], [])
doc5 = NumpyDocString(
"""
a.something()
Raises
------
LinAlgException
If array is singular.
Warns
-----
SomeWarning
If needed
""")
def test_raises():
assert_equal(len(doc5['Raises']), 1)
name,_,desc = doc5['Raises'][0]
assert_equal(name,'LinAlgException')
assert_equal(desc,['If array is singular.'])
def test_warns():
assert_equal(len(doc5['Warns']), 1)
name,_,desc = doc5['Warns'][0]
assert_equal(name,'SomeWarning')
assert_equal(desc,['If needed'])
def test_see_also():
doc6 = NumpyDocString(
"""
z(x,theta)
See Also
--------
func_a, func_b, func_c
func_d : some equivalent func
foo.func_e : some other func over
multiple lines
func_f, func_g, :meth:`func_h`, func_j,
func_k
:obj:`baz.obj_q`
:class:`class_j`: fubar
foobar
""")
assert len(doc6['See Also']) == 12
for func, desc, role in doc6['See Also']:
if func in ('func_a', 'func_b', 'func_c', 'func_f',
'func_g', 'func_h', 'func_j', 'func_k', 'baz.obj_q'):
assert(not desc)
else:
assert(desc)
if func == 'func_h':
assert role == 'meth'
elif func == 'baz.obj_q':
assert role == 'obj'
elif func == 'class_j':
assert role == 'class'
else:
assert role is None
if func == 'func_d':
assert desc == ['some equivalent func']
elif func == 'foo.func_e':
assert desc == ['some other func over', 'multiple lines']
elif func == 'class_j':
assert desc == ['fubar', 'foobar']
def test_see_also_print():
class Dummy(object):
"""
See Also
--------
func_a, func_b
func_c : some relationship
goes here
func_d
"""
pass
obj = Dummy()
s = str(FunctionDoc(obj, role='func'))
assert(':func:`func_a`, :func:`func_b`' in s)
assert(' some relationship' in s)
assert(':func:`func_d`' in s)
doc7 = NumpyDocString("""
Doc starts on second line.
""")
def test_empty_first_line():
assert doc7['Summary'][0].startswith('Doc starts')
def test_no_summary():
str(SphinxDocString("""
Parameters
----------"""))
def test_unicode():
doc = SphinxDocString("""
öäöäöäöäöåååå
öäöäöäööäååå
Parameters
----------
ååå : äää
ööö
Returns
-------
ååå : ööö
äää
""")
assert isinstance(doc['Summary'][0], str)
assert doc['Summary'][0] == 'öäöäöäöäöåååå'
def test_plot_examples():
cfg = dict(use_plots=True)
doc = SphinxDocString("""
Examples
--------
>>> import matplotlib.pyplot as plt
>>> plt.plot([1,2,3],[4,5,6])
>>> plt.show()
""", config=cfg)
assert 'plot::' in str(doc), str(doc)
doc = SphinxDocString("""
Examples
--------
.. plot::
import matplotlib.pyplot as plt
plt.plot([1,2,3],[4,5,6])
plt.show()
""", config=cfg)
assert str(doc).count('plot::') == 1, str(doc)
def test_class_members():
class Dummy(object):
"""
Dummy class.
"""
def spam(self, a, b):
"""Spam\n\nSpam spam."""
pass
def ham(self, c, d):
"""Cheese\n\nNo cheese."""
pass
@property
def spammity(self):
"""Spammity index"""
return 0.95
class Ignorable(object):
"""local class, to be ignored"""
pass
for cls in (ClassDoc, SphinxClassDoc):
doc = cls(Dummy, config=dict(show_class_members=False))
assert 'Methods' not in str(doc), (cls, str(doc))
assert 'spam' not in str(doc), (cls, str(doc))
assert 'ham' not in str(doc), (cls, str(doc))
assert 'spammity' not in str(doc), (cls, str(doc))
assert 'Spammity index' not in str(doc), (cls, str(doc))
doc = cls(Dummy, config=dict(show_class_members=True))
assert 'Methods' in str(doc), (cls, str(doc))
assert 'spam' in str(doc), (cls, str(doc))
assert 'ham' in str(doc), (cls, str(doc))
assert 'spammity' in str(doc), (cls, str(doc))
if cls is SphinxClassDoc:
assert '.. autosummary::' in str(doc), str(doc)
else:
assert 'Spammity index' in str(doc), str(doc)
def test_duplicate_signature():
# Duplicate function signatures occur e.g. in ufuncs, when the
# automatic mechanism adds one, and a more detailed comes from the
# docstring itself.
doc = NumpyDocString(
"""
z(x1, x2)
z(a, theta)
""")
assert doc['Signature'].strip() == 'z(a, theta)'
class_doc_txt = """
Foo
Parameters
----------
f : callable ``f(t, y, *f_args)``
Aaa.
jac : callable ``jac(t, y, *jac_args)``
Bbb.
Attributes
----------
t : float
Current time.
y : ndarray
Current variable values.
Methods
-------
a
b
c
Examples
--------
For usage examples, see `ode`.
"""
def test_class_members_doc():
doc = ClassDoc(None, class_doc_txt)
non_blank_line_by_line_compare(str(doc),
"""
Foo
Parameters
----------
f : callable ``f(t, y, *f_args)``
Aaa.
jac : callable ``jac(t, y, *jac_args)``
Bbb.
Examples
--------
For usage examples, see `ode`.
Attributes
----------
t : float
Current time.
y : ndarray
Current variable values.
Methods
-------
a
b
c
.. index::
""")
def test_class_members_doc_sphinx():
doc = SphinxClassDoc(None, class_doc_txt)
non_blank_line_by_line_compare(str(doc),
"""
Foo
:Parameters:
**f** : callable ``f(t, y, *f_args)``
Aaa.
**jac** : callable ``jac(t, y, *jac_args)``
Bbb.
.. rubric:: Examples
For usage examples, see `ode`.
.. rubric:: Attributes
=== ==========
t (float) Current time.
y (ndarray) Current variable values.
=== ==========
.. rubric:: Methods
=== ==========
a
b
c
=== ==========
""")
if __name__ == "__main__":
import nose
nose.run()
| mit |
andrewnc/scikit-learn | sklearn/linear_model/tests/test_logistic.py | 59 | 35368 | import numpy as np
import scipy.sparse as sp
from scipy import linalg, optimize, sparse
import scipy
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_warns
from sklearn.utils.testing import raises
from sklearn.utils.testing import ignore_warnings
from sklearn.utils.testing import assert_raise_message
from sklearn.utils import ConvergenceWarning
from sklearn.linear_model.logistic import (
LogisticRegression,
logistic_regression_path, LogisticRegressionCV,
_logistic_loss_and_grad, _logistic_grad_hess,
_multinomial_grad_hess, _logistic_loss,
)
from sklearn.cross_validation import StratifiedKFold
from sklearn.datasets import load_iris, make_classification
from sklearn.metrics import log_loss
X = [[-1, 0], [0, 1], [1, 1]]
X_sp = sp.csr_matrix(X)
Y1 = [0, 1, 1]
Y2 = [2, 1, 0]
iris = load_iris()
sp_version = tuple([int(s) for s in scipy.__version__.split('.')])
def check_predictions(clf, X, y):
"""Check that the model is able to fit the classification data"""
n_samples = len(y)
classes = np.unique(y)
n_classes = classes.shape[0]
predicted = clf.fit(X, y).predict(X)
assert_array_equal(clf.classes_, classes)
assert_equal(predicted.shape, (n_samples,))
assert_array_equal(predicted, y)
probabilities = clf.predict_proba(X)
assert_equal(probabilities.shape, (n_samples, n_classes))
assert_array_almost_equal(probabilities.sum(axis=1), np.ones(n_samples))
assert_array_equal(probabilities.argmax(axis=1), y)
def test_predict_2_classes():
# Simple sanity check on a 2 classes dataset
# Make sure it predicts the correct result on simple datasets.
check_predictions(LogisticRegression(random_state=0), X, Y1)
check_predictions(LogisticRegression(random_state=0), X_sp, Y1)
check_predictions(LogisticRegression(C=100, random_state=0), X, Y1)
check_predictions(LogisticRegression(C=100, random_state=0), X_sp, Y1)
check_predictions(LogisticRegression(fit_intercept=False,
random_state=0), X, Y1)
check_predictions(LogisticRegression(fit_intercept=False,
random_state=0), X_sp, Y1)
def test_error():
# Test for appropriate exception on errors
msg = "Penalty term must be positive"
assert_raise_message(ValueError, msg,
LogisticRegression(C=-1).fit, X, Y1)
assert_raise_message(ValueError, msg,
LogisticRegression(C="test").fit, X, Y1)
for LR in [LogisticRegression, LogisticRegressionCV]:
msg = "Tolerance for stopping criteria must be positive"
assert_raise_message(ValueError, msg, LR(tol=-1).fit, X, Y1)
assert_raise_message(ValueError, msg, LR(tol="test").fit, X, Y1)
msg = "Maximum number of iteration must be positive"
assert_raise_message(ValueError, msg, LR(max_iter=-1).fit, X, Y1)
assert_raise_message(ValueError, msg, LR(max_iter="test").fit, X, Y1)
def test_predict_3_classes():
check_predictions(LogisticRegression(C=10), X, Y2)
check_predictions(LogisticRegression(C=10), X_sp, Y2)
def test_predict_iris():
# Test logistic regression with the iris dataset
n_samples, n_features = iris.data.shape
target = iris.target_names[iris.target]
# Test that both multinomial and OvR solvers handle
# multiclass data correctly and give good accuracy
# score (>0.95) for the training data.
for clf in [LogisticRegression(C=len(iris.data)),
LogisticRegression(C=len(iris.data), solver='lbfgs',
multi_class='multinomial'),
LogisticRegression(C=len(iris.data), solver='newton-cg',
multi_class='multinomial'),
LogisticRegression(C=len(iris.data), solver='sag', tol=1e-2,
multi_class='ovr', random_state=42)]:
clf.fit(iris.data, target)
assert_array_equal(np.unique(target), clf.classes_)
pred = clf.predict(iris.data)
assert_greater(np.mean(pred == target), .95)
probabilities = clf.predict_proba(iris.data)
assert_array_almost_equal(probabilities.sum(axis=1),
np.ones(n_samples))
pred = iris.target_names[probabilities.argmax(axis=1)]
assert_greater(np.mean(pred == target), .95)
def test_multinomial_validation():
for solver in ['lbfgs', 'newton-cg']:
lr = LogisticRegression(C=-1, solver=solver, multi_class='multinomial')
assert_raises(ValueError, lr.fit, [[0, 1], [1, 0]], [0, 1])
def test_check_solver_option():
X, y = iris.data, iris.target
for LR in [LogisticRegression, LogisticRegressionCV]:
msg = ("Logistic Regression supports only liblinear, newton-cg, lbfgs"
" and sag solvers, got wrong_name")
lr = LR(solver="wrong_name")
assert_raise_message(ValueError, msg, lr.fit, X, y)
msg = "multi_class should be either multinomial or ovr, got wrong_name"
lr = LR(solver='newton-cg', multi_class="wrong_name")
assert_raise_message(ValueError, msg, lr.fit, X, y)
# all solver except 'newton-cg' and 'lfbgs'
for solver in ['liblinear', 'sag']:
msg = ("Solver %s does not support a multinomial backend." %
solver)
lr = LR(solver=solver, multi_class='multinomial')
assert_raise_message(ValueError, msg, lr.fit, X, y)
# all solvers except 'liblinear'
for solver in ['newton-cg', 'lbfgs', 'sag']:
msg = ("Solver %s supports only l2 penalties, got l1 penalty." %
solver)
lr = LR(solver=solver, penalty='l1')
assert_raise_message(ValueError, msg, lr.fit, X, y)
msg = ("Solver %s supports only dual=False, got dual=True" %
solver)
lr = LR(solver=solver, dual=True)
assert_raise_message(ValueError, msg, lr.fit, X, y)
def test_multinomial_binary():
# Test multinomial LR on a binary problem.
target = (iris.target > 0).astype(np.intp)
target = np.array(["setosa", "not-setosa"])[target]
for solver in ['lbfgs', 'newton-cg']:
clf = LogisticRegression(solver=solver, multi_class='multinomial')
clf.fit(iris.data, target)
assert_equal(clf.coef_.shape, (1, iris.data.shape[1]))
assert_equal(clf.intercept_.shape, (1,))
assert_array_equal(clf.predict(iris.data), target)
mlr = LogisticRegression(solver=solver, multi_class='multinomial',
fit_intercept=False)
mlr.fit(iris.data, target)
pred = clf.classes_[np.argmax(clf.predict_log_proba(iris.data),
axis=1)]
assert_greater(np.mean(pred == target), .9)
def test_sparsify():
# Test sparsify and densify members.
n_samples, n_features = iris.data.shape
target = iris.target_names[iris.target]
clf = LogisticRegression(random_state=0).fit(iris.data, target)
pred_d_d = clf.decision_function(iris.data)
clf.sparsify()
assert_true(sp.issparse(clf.coef_))
pred_s_d = clf.decision_function(iris.data)
sp_data = sp.coo_matrix(iris.data)
pred_s_s = clf.decision_function(sp_data)
clf.densify()
pred_d_s = clf.decision_function(sp_data)
assert_array_almost_equal(pred_d_d, pred_s_d)
assert_array_almost_equal(pred_d_d, pred_s_s)
assert_array_almost_equal(pred_d_d, pred_d_s)
def test_inconsistent_input():
# Test that an exception is raised on inconsistent input
rng = np.random.RandomState(0)
X_ = rng.random_sample((5, 10))
y_ = np.ones(X_.shape[0])
y_[0] = 0
clf = LogisticRegression(random_state=0)
# Wrong dimensions for training data
y_wrong = y_[:-1]
assert_raises(ValueError, clf.fit, X, y_wrong)
# Wrong dimensions for test data
assert_raises(ValueError, clf.fit(X_, y_).predict,
rng.random_sample((3, 12)))
def test_write_parameters():
# Test that we can write to coef_ and intercept_
clf = LogisticRegression(random_state=0)
clf.fit(X, Y1)
clf.coef_[:] = 0
clf.intercept_[:] = 0
assert_array_almost_equal(clf.decision_function(X), 0)
@raises(ValueError)
def test_nan():
# Test proper NaN handling.
# Regression test for Issue #252: fit used to go into an infinite loop.
Xnan = np.array(X, dtype=np.float64)
Xnan[0, 1] = np.nan
LogisticRegression(random_state=0).fit(Xnan, Y1)
def test_consistency_path():
# Test that the path algorithm is consistent
rng = np.random.RandomState(0)
X = np.concatenate((rng.randn(100, 2) + [1, 1], rng.randn(100, 2)))
y = [1] * 100 + [-1] * 100
Cs = np.logspace(0, 4, 10)
f = ignore_warnings
# can't test with fit_intercept=True since LIBLINEAR
# penalizes the intercept
for solver in ('lbfgs', 'newton-cg', 'liblinear', 'sag'):
coefs, Cs, _ = f(logistic_regression_path)(
X, y, Cs=Cs, fit_intercept=False, tol=1e-5, solver=solver,
random_state=0)
for i, C in enumerate(Cs):
lr = LogisticRegression(C=C, fit_intercept=False, tol=1e-5,
random_state=0)
lr.fit(X, y)
lr_coef = lr.coef_.ravel()
assert_array_almost_equal(lr_coef, coefs[i], decimal=4,
err_msg="with solver = %s" % solver)
# test for fit_intercept=True
for solver in ('lbfgs', 'newton-cg', 'liblinear', 'sag'):
Cs = [1e3]
coefs, Cs, _ = f(logistic_regression_path)(
X, y, Cs=Cs, fit_intercept=True, tol=1e-6, solver=solver,
intercept_scaling=10000., random_state=0)
lr = LogisticRegression(C=Cs[0], fit_intercept=True, tol=1e-4,
intercept_scaling=10000., random_state=0)
lr.fit(X, y)
lr_coef = np.concatenate([lr.coef_.ravel(), lr.intercept_])
assert_array_almost_equal(lr_coef, coefs[0], decimal=4,
err_msg="with solver = %s" % solver)
def test_liblinear_dual_random_state():
# random_state is relevant for liblinear solver only if dual=True
X, y = make_classification(n_samples=20)
lr1 = LogisticRegression(random_state=0, dual=True, max_iter=1, tol=1e-15)
lr1.fit(X, y)
lr2 = LogisticRegression(random_state=0, dual=True, max_iter=1, tol=1e-15)
lr2.fit(X, y)
lr3 = LogisticRegression(random_state=8, dual=True, max_iter=1, tol=1e-15)
lr3.fit(X, y)
# same result for same random state
assert_array_almost_equal(lr1.coef_, lr2.coef_)
# different results for different random states
msg = "Arrays are not almost equal to 6 decimals"
assert_raise_message(AssertionError, msg,
assert_array_almost_equal, lr1.coef_, lr3.coef_)
def test_logistic_loss_and_grad():
X_ref, y = make_classification(n_samples=20)
n_features = X_ref.shape[1]
X_sp = X_ref.copy()
X_sp[X_sp < .1] = 0
X_sp = sp.csr_matrix(X_sp)
for X in (X_ref, X_sp):
w = np.zeros(n_features)
# First check that our derivation of the grad is correct
loss, grad = _logistic_loss_and_grad(w, X, y, alpha=1.)
approx_grad = optimize.approx_fprime(
w, lambda w: _logistic_loss_and_grad(w, X, y, alpha=1.)[0], 1e-3
)
assert_array_almost_equal(grad, approx_grad, decimal=2)
# Second check that our intercept implementation is good
w = np.zeros(n_features + 1)
loss_interp, grad_interp = _logistic_loss_and_grad(
w, X, y, alpha=1.
)
assert_array_almost_equal(loss, loss_interp)
approx_grad = optimize.approx_fprime(
w, lambda w: _logistic_loss_and_grad(w, X, y, alpha=1.)[0], 1e-3
)
assert_array_almost_equal(grad_interp, approx_grad, decimal=2)
def test_logistic_grad_hess():
rng = np.random.RandomState(0)
n_samples, n_features = 50, 5
X_ref = rng.randn(n_samples, n_features)
y = np.sign(X_ref.dot(5 * rng.randn(n_features)))
X_ref -= X_ref.mean()
X_ref /= X_ref.std()
X_sp = X_ref.copy()
X_sp[X_sp < .1] = 0
X_sp = sp.csr_matrix(X_sp)
for X in (X_ref, X_sp):
w = .1 * np.ones(n_features)
# First check that _logistic_grad_hess is consistent
# with _logistic_loss_and_grad
loss, grad = _logistic_loss_and_grad(w, X, y, alpha=1.)
grad_2, hess = _logistic_grad_hess(w, X, y, alpha=1.)
assert_array_almost_equal(grad, grad_2)
# Now check our hessian along the second direction of the grad
vector = np.zeros_like(grad)
vector[1] = 1
hess_col = hess(vector)
# Computation of the Hessian is particularly fragile to numerical
# errors when doing simple finite differences. Here we compute the
# grad along a path in the direction of the vector and then use a
# least-square regression to estimate the slope
e = 1e-3
d_x = np.linspace(-e, e, 30)
d_grad = np.array([
_logistic_loss_and_grad(w + t * vector, X, y, alpha=1.)[1]
for t in d_x
])
d_grad -= d_grad.mean(axis=0)
approx_hess_col = linalg.lstsq(d_x[:, np.newaxis], d_grad)[0].ravel()
assert_array_almost_equal(approx_hess_col, hess_col, decimal=3)
# Second check that our intercept implementation is good
w = np.zeros(n_features + 1)
loss_interp, grad_interp = _logistic_loss_and_grad(w, X, y, alpha=1.)
loss_interp_2 = _logistic_loss(w, X, y, alpha=1.)
grad_interp_2, hess = _logistic_grad_hess(w, X, y, alpha=1.)
assert_array_almost_equal(loss_interp, loss_interp_2)
assert_array_almost_equal(grad_interp, grad_interp_2)
def test_logistic_cv():
# test for LogisticRegressionCV object
n_samples, n_features = 50, 5
rng = np.random.RandomState(0)
X_ref = rng.randn(n_samples, n_features)
y = np.sign(X_ref.dot(5 * rng.randn(n_features)))
X_ref -= X_ref.mean()
X_ref /= X_ref.std()
lr_cv = LogisticRegressionCV(Cs=[1.], fit_intercept=False,
solver='liblinear')
lr_cv.fit(X_ref, y)
lr = LogisticRegression(C=1., fit_intercept=False)
lr.fit(X_ref, y)
assert_array_almost_equal(lr.coef_, lr_cv.coef_)
assert_array_equal(lr_cv.coef_.shape, (1, n_features))
assert_array_equal(lr_cv.classes_, [-1, 1])
assert_equal(len(lr_cv.classes_), 2)
coefs_paths = np.asarray(list(lr_cv.coefs_paths_.values()))
assert_array_equal(coefs_paths.shape, (1, 3, 1, n_features))
assert_array_equal(lr_cv.Cs_.shape, (1, ))
scores = np.asarray(list(lr_cv.scores_.values()))
assert_array_equal(scores.shape, (1, 3, 1))
def test_logistic_cv_sparse():
X, y = make_classification(n_samples=50, n_features=5,
random_state=0)
X[X < 1.0] = 0.0
csr = sp.csr_matrix(X)
clf = LogisticRegressionCV(fit_intercept=True)
clf.fit(X, y)
clfs = LogisticRegressionCV(fit_intercept=True)
clfs.fit(csr, y)
assert_array_almost_equal(clfs.coef_, clf.coef_)
assert_array_almost_equal(clfs.intercept_, clf.intercept_)
assert_equal(clfs.C_, clf.C_)
def test_intercept_logistic_helper():
n_samples, n_features = 10, 5
X, y = make_classification(n_samples=n_samples, n_features=n_features,
random_state=0)
# Fit intercept case.
alpha = 1.
w = np.ones(n_features + 1)
grad_interp, hess_interp = _logistic_grad_hess(w, X, y, alpha)
loss_interp = _logistic_loss(w, X, y, alpha)
# Do not fit intercept. This can be considered equivalent to adding
# a feature vector of ones, i.e column of one vectors.
X_ = np.hstack((X, np.ones(10)[:, np.newaxis]))
grad, hess = _logistic_grad_hess(w, X_, y, alpha)
loss = _logistic_loss(w, X_, y, alpha)
# In the fit_intercept=False case, the feature vector of ones is
# penalized. This should be taken care of.
assert_almost_equal(loss_interp + 0.5 * (w[-1] ** 2), loss)
# Check gradient.
assert_array_almost_equal(grad_interp[:n_features], grad[:n_features])
assert_almost_equal(grad_interp[-1] + alpha * w[-1], grad[-1])
rng = np.random.RandomState(0)
grad = rng.rand(n_features + 1)
hess_interp = hess_interp(grad)
hess = hess(grad)
assert_array_almost_equal(hess_interp[:n_features], hess[:n_features])
assert_almost_equal(hess_interp[-1] + alpha * grad[-1], hess[-1])
def test_ovr_multinomial_iris():
# Test that OvR and multinomial are correct using the iris dataset.
train, target = iris.data, iris.target
n_samples, n_features = train.shape
# Use pre-defined fold as folds generated for different y
cv = StratifiedKFold(target, 3)
clf = LogisticRegressionCV(cv=cv)
clf.fit(train, target)
clf1 = LogisticRegressionCV(cv=cv)
target_copy = target.copy()
target_copy[target_copy == 0] = 1
clf1.fit(train, target_copy)
assert_array_almost_equal(clf.scores_[2], clf1.scores_[2])
assert_array_almost_equal(clf.intercept_[2:], clf1.intercept_)
assert_array_almost_equal(clf.coef_[2][np.newaxis, :], clf1.coef_)
# Test the shape of various attributes.
assert_equal(clf.coef_.shape, (3, n_features))
assert_array_equal(clf.classes_, [0, 1, 2])
coefs_paths = np.asarray(list(clf.coefs_paths_.values()))
assert_array_almost_equal(coefs_paths.shape, (3, 3, 10, n_features + 1))
assert_equal(clf.Cs_.shape, (10, ))
scores = np.asarray(list(clf.scores_.values()))
assert_equal(scores.shape, (3, 3, 10))
# Test that for the iris data multinomial gives a better accuracy than OvR
for solver in ['lbfgs', 'newton-cg']:
clf_multi = LogisticRegressionCV(
solver=solver, multi_class='multinomial', max_iter=15
)
clf_multi.fit(train, target)
multi_score = clf_multi.score(train, target)
ovr_score = clf.score(train, target)
assert_greater(multi_score, ovr_score)
# Test attributes of LogisticRegressionCV
assert_equal(clf.coef_.shape, clf_multi.coef_.shape)
assert_array_equal(clf_multi.classes_, [0, 1, 2])
coefs_paths = np.asarray(list(clf_multi.coefs_paths_.values()))
assert_array_almost_equal(coefs_paths.shape, (3, 3, 10,
n_features + 1))
assert_equal(clf_multi.Cs_.shape, (10, ))
scores = np.asarray(list(clf_multi.scores_.values()))
assert_equal(scores.shape, (3, 3, 10))
def test_logistic_regression_solvers():
X, y = make_classification(n_features=10, n_informative=5, random_state=0)
ncg = LogisticRegression(solver='newton-cg', fit_intercept=False)
lbf = LogisticRegression(solver='lbfgs', fit_intercept=False)
lib = LogisticRegression(fit_intercept=False)
sag = LogisticRegression(solver='sag', fit_intercept=False,
random_state=42)
ncg.fit(X, y)
lbf.fit(X, y)
sag.fit(X, y)
lib.fit(X, y)
assert_array_almost_equal(ncg.coef_, lib.coef_, decimal=3)
assert_array_almost_equal(lib.coef_, lbf.coef_, decimal=3)
assert_array_almost_equal(ncg.coef_, lbf.coef_, decimal=3)
assert_array_almost_equal(sag.coef_, lib.coef_, decimal=3)
assert_array_almost_equal(sag.coef_, ncg.coef_, decimal=3)
assert_array_almost_equal(sag.coef_, lbf.coef_, decimal=3)
def test_logistic_regression_solvers_multiclass():
X, y = make_classification(n_samples=20, n_features=20, n_informative=10,
n_classes=3, random_state=0)
tol = 1e-6
ncg = LogisticRegression(solver='newton-cg', fit_intercept=False, tol=tol)
lbf = LogisticRegression(solver='lbfgs', fit_intercept=False, tol=tol)
lib = LogisticRegression(fit_intercept=False, tol=tol)
sag = LogisticRegression(solver='sag', fit_intercept=False, tol=tol,
max_iter=1000, random_state=42)
ncg.fit(X, y)
lbf.fit(X, y)
sag.fit(X, y)
lib.fit(X, y)
assert_array_almost_equal(ncg.coef_, lib.coef_, decimal=4)
assert_array_almost_equal(lib.coef_, lbf.coef_, decimal=4)
assert_array_almost_equal(ncg.coef_, lbf.coef_, decimal=4)
assert_array_almost_equal(sag.coef_, lib.coef_, decimal=4)
assert_array_almost_equal(sag.coef_, ncg.coef_, decimal=4)
assert_array_almost_equal(sag.coef_, lbf.coef_, decimal=4)
def test_logistic_regressioncv_class_weights():
X, y = make_classification(n_samples=20, n_features=20, n_informative=10,
n_classes=3, random_state=0)
# Test the liblinear fails when class_weight of type dict is
# provided, when it is multiclass. However it can handle
# binary problems.
clf_lib = LogisticRegressionCV(class_weight={0: 0.1, 1: 0.2},
solver='liblinear')
assert_raises(ValueError, clf_lib.fit, X, y)
y_ = y.copy()
y_[y == 2] = 1
clf_lib.fit(X, y_)
assert_array_equal(clf_lib.classes_, [0, 1])
# Test for class_weight=balanced
X, y = make_classification(n_samples=20, n_features=20, n_informative=10,
random_state=0)
clf_lbf = LogisticRegressionCV(solver='lbfgs', fit_intercept=False,
class_weight='balanced')
clf_lbf.fit(X, y)
clf_lib = LogisticRegressionCV(solver='liblinear', fit_intercept=False,
class_weight='balanced')
clf_lib.fit(X, y)
clf_sag = LogisticRegressionCV(solver='sag', fit_intercept=False,
class_weight='balanced', max_iter=2000)
clf_sag.fit(X, y)
assert_array_almost_equal(clf_lib.coef_, clf_lbf.coef_, decimal=4)
assert_array_almost_equal(clf_sag.coef_, clf_lbf.coef_, decimal=4)
assert_array_almost_equal(clf_lib.coef_, clf_sag.coef_, decimal=4)
def test_logistic_regression_sample_weights():
X, y = make_classification(n_samples=20, n_features=5, n_informative=3,
n_classes=2, random_state=0)
for LR in [LogisticRegression, LogisticRegressionCV]:
# Test that liblinear fails when sample weights are provided
clf_lib = LR(solver='liblinear')
assert_raises(ValueError, clf_lib.fit, X, y,
sample_weight=np.ones(y.shape[0]))
# Test that passing sample_weight as ones is the same as
# not passing them at all (default None)
clf_sw_none = LR(solver='lbfgs', fit_intercept=False)
clf_sw_none.fit(X, y)
clf_sw_ones = LR(solver='lbfgs', fit_intercept=False)
clf_sw_ones.fit(X, y, sample_weight=np.ones(y.shape[0]))
assert_array_almost_equal(clf_sw_none.coef_, clf_sw_ones.coef_, decimal=4)
# Test that sample weights work the same with the lbfgs,
# newton-cg, and 'sag' solvers
clf_sw_lbfgs = LR(solver='lbfgs', fit_intercept=False)
clf_sw_lbfgs.fit(X, y, sample_weight=y + 1)
clf_sw_n = LR(solver='newton-cg', fit_intercept=False)
clf_sw_n.fit(X, y, sample_weight=y + 1)
clf_sw_sag = LR(solver='sag', fit_intercept=False,
max_iter=2000, tol=1e-7)
clf_sw_sag.fit(X, y, sample_weight=y + 1)
assert_array_almost_equal(clf_sw_lbfgs.coef_, clf_sw_n.coef_, decimal=4)
assert_array_almost_equal(clf_sw_lbfgs.coef_, clf_sw_sag.coef_, decimal=4)
# Test that passing class_weight as [1,2] is the same as
# passing class weight = [1,1] but adjusting sample weights
# to be 2 for all instances of class 2
clf_cw_12 = LR(solver='lbfgs', fit_intercept=False,
class_weight={0: 1, 1: 2})
clf_cw_12.fit(X, y)
sample_weight = np.ones(y.shape[0])
sample_weight[y == 1] = 2
clf_sw_12 = LR(solver='lbfgs', fit_intercept=False)
clf_sw_12.fit(X, y, sample_weight=sample_weight)
assert_array_almost_equal(clf_cw_12.coef_, clf_sw_12.coef_, decimal=4)
def test_logistic_regression_convergence_warnings():
# Test that warnings are raised if model does not converge
X, y = make_classification(n_samples=20, n_features=20)
clf_lib = LogisticRegression(solver='liblinear', max_iter=2, verbose=1)
assert_warns(ConvergenceWarning, clf_lib.fit, X, y)
assert_equal(clf_lib.n_iter_, 2)
def test_logistic_regression_multinomial():
# Tests for the multinomial option in logistic regression
# Some basic attributes of Logistic Regression
n_samples, n_features, n_classes = 50, 20, 3
X, y = make_classification(n_samples=n_samples,
n_features=n_features,
n_informative=10,
n_classes=n_classes, random_state=0)
clf_int = LogisticRegression(solver='lbfgs', multi_class='multinomial')
clf_int.fit(X, y)
assert_array_equal(clf_int.coef_.shape, (n_classes, n_features))
clf_wint = LogisticRegression(solver='lbfgs', multi_class='multinomial',
fit_intercept=False)
clf_wint.fit(X, y)
assert_array_equal(clf_wint.coef_.shape, (n_classes, n_features))
# Similar tests for newton-cg solver option
clf_ncg_int = LogisticRegression(solver='newton-cg',
multi_class='multinomial')
clf_ncg_int.fit(X, y)
assert_array_equal(clf_ncg_int.coef_.shape, (n_classes, n_features))
clf_ncg_wint = LogisticRegression(solver='newton-cg', fit_intercept=False,
multi_class='multinomial')
clf_ncg_wint.fit(X, y)
assert_array_equal(clf_ncg_wint.coef_.shape, (n_classes, n_features))
# Compare solutions between lbfgs and newton-cg
assert_almost_equal(clf_int.coef_, clf_ncg_int.coef_, decimal=3)
assert_almost_equal(clf_wint.coef_, clf_ncg_wint.coef_, decimal=3)
assert_almost_equal(clf_int.intercept_, clf_ncg_int.intercept_, decimal=3)
# Test that the path give almost the same results. However since in this
# case we take the average of the coefs after fitting across all the
# folds, it need not be exactly the same.
for solver in ['lbfgs', 'newton-cg']:
clf_path = LogisticRegressionCV(solver=solver,
multi_class='multinomial', Cs=[1.])
clf_path.fit(X, y)
assert_array_almost_equal(clf_path.coef_, clf_int.coef_, decimal=3)
assert_almost_equal(clf_path.intercept_, clf_int.intercept_, decimal=3)
def test_multinomial_grad_hess():
rng = np.random.RandomState(0)
n_samples, n_features, n_classes = 100, 5, 3
X = rng.randn(n_samples, n_features)
w = rng.rand(n_classes, n_features)
Y = np.zeros((n_samples, n_classes))
ind = np.argmax(np.dot(X, w.T), axis=1)
Y[range(0, n_samples), ind] = 1
w = w.ravel()
sample_weights = np.ones(X.shape[0])
grad, hessp = _multinomial_grad_hess(w, X, Y, alpha=1.,
sample_weight=sample_weights)
# extract first column of hessian matrix
vec = np.zeros(n_features * n_classes)
vec[0] = 1
hess_col = hessp(vec)
# Estimate hessian using least squares as done in
# test_logistic_grad_hess
e = 1e-3
d_x = np.linspace(-e, e, 30)
d_grad = np.array([
_multinomial_grad_hess(w + t * vec, X, Y, alpha=1.,
sample_weight=sample_weights)[0]
for t in d_x
])
d_grad -= d_grad.mean(axis=0)
approx_hess_col = linalg.lstsq(d_x[:, np.newaxis], d_grad)[0].ravel()
assert_array_almost_equal(hess_col, approx_hess_col)
def test_liblinear_decision_function_zero():
# Test negative prediction when decision_function values are zero.
# Liblinear predicts the positive class when decision_function values
# are zero. This is a test to verify that we do not do the same.
# See Issue: https://github.com/scikit-learn/scikit-learn/issues/3600
# and the PR https://github.com/scikit-learn/scikit-learn/pull/3623
X, y = make_classification(n_samples=5, n_features=5)
clf = LogisticRegression(fit_intercept=False)
clf.fit(X, y)
# Dummy data such that the decision function becomes zero.
X = np.zeros((5, 5))
assert_array_equal(clf.predict(X), np.zeros(5))
def test_liblinear_logregcv_sparse():
# Test LogRegCV with solver='liblinear' works for sparse matrices
X, y = make_classification(n_samples=10, n_features=5)
clf = LogisticRegressionCV(solver='liblinear')
clf.fit(sparse.csr_matrix(X), y)
def test_logreg_intercept_scaling():
# Test that the right error message is thrown when intercept_scaling <= 0
for i in [-1, 0]:
clf = LogisticRegression(intercept_scaling=i)
msg = ('Intercept scaling is %r but needs to be greater than 0.'
' To disable fitting an intercept,'
' set fit_intercept=False.' % clf.intercept_scaling)
assert_raise_message(ValueError, msg, clf.fit, X, Y1)
def test_logreg_intercept_scaling_zero():
# Test that intercept_scaling is ignored when fit_intercept is False
clf = LogisticRegression(fit_intercept=False)
clf.fit(X, Y1)
assert_equal(clf.intercept_, 0.)
def test_logreg_cv_penalty():
# Test that the correct penalty is passed to the final fit.
X, y = make_classification(n_samples=50, n_features=20, random_state=0)
lr_cv = LogisticRegressionCV(penalty="l1", Cs=[1.0], solver='liblinear')
lr_cv.fit(X, y)
lr = LogisticRegression(penalty="l1", C=1.0, solver='liblinear')
lr.fit(X, y)
assert_equal(np.count_nonzero(lr_cv.coef_), np.count_nonzero(lr.coef_))
def test_logreg_predict_proba_multinomial():
X, y = make_classification(n_samples=10, n_features=20, random_state=0,
n_classes=3, n_informative=10)
# Predicted probabilites using the true-entropy loss should give a
# smaller loss than those using the ovr method.
clf_multi = LogisticRegression(multi_class="multinomial", solver="lbfgs")
clf_multi.fit(X, y)
clf_multi_loss = log_loss(y, clf_multi.predict_proba(X))
clf_ovr = LogisticRegression(multi_class="ovr", solver="lbfgs")
clf_ovr.fit(X, y)
clf_ovr_loss = log_loss(y, clf_ovr.predict_proba(X))
assert_greater(clf_ovr_loss, clf_multi_loss)
# Predicted probabilites using the soft-max function should give a
# smaller loss than those using the logistic function.
clf_multi_loss = log_loss(y, clf_multi.predict_proba(X))
clf_wrong_loss = log_loss(y, clf_multi._predict_proba_lr(X))
assert_greater(clf_wrong_loss, clf_multi_loss)
@ignore_warnings
def test_max_iter():
# Test that the maximum number of iteration is reached
X, y_bin = iris.data, iris.target.copy()
y_bin[y_bin == 2] = 0
solvers = ['newton-cg', 'liblinear', 'sag']
# old scipy doesn't have maxiter
if sp_version >= (0, 12):
solvers.append('lbfgs')
for max_iter in range(1, 5):
for solver in solvers:
lr = LogisticRegression(max_iter=max_iter, tol=1e-15,
random_state=0, solver=solver)
lr.fit(X, y_bin)
assert_equal(lr.n_iter_[0], max_iter)
def test_n_iter():
# Test that self.n_iter_ has the correct format.
X, y = iris.data, iris.target
y_bin = y.copy()
y_bin[y_bin == 2] = 0
n_Cs = 4
n_cv_fold = 2
for solver in ['newton-cg', 'liblinear', 'sag', 'lbfgs']:
# OvR case
n_classes = 1 if solver == 'liblinear' else np.unique(y).shape[0]
clf = LogisticRegression(tol=1e-2, multi_class='ovr',
solver=solver, C=1.,
random_state=42, max_iter=100)
clf.fit(X, y)
assert_equal(clf.n_iter_.shape, (n_classes,))
n_classes = np.unique(y).shape[0]
clf = LogisticRegressionCV(tol=1e-2, multi_class='ovr',
solver=solver, Cs=n_Cs, cv=n_cv_fold,
random_state=42, max_iter=100)
clf.fit(X, y)
assert_equal(clf.n_iter_.shape, (n_classes, n_cv_fold, n_Cs))
clf.fit(X, y_bin)
assert_equal(clf.n_iter_.shape, (1, n_cv_fold, n_Cs))
# multinomial case
n_classes = 1
if solver in ('liblinear', 'sag'):
break
clf = LogisticRegression(tol=1e-2, multi_class='multinomial',
solver=solver, C=1.,
random_state=42, max_iter=100)
clf.fit(X, y)
assert_equal(clf.n_iter_.shape, (n_classes,))
clf = LogisticRegressionCV(tol=1e-2, multi_class='multinomial',
solver=solver, Cs=n_Cs, cv=n_cv_fold,
random_state=42, max_iter=100)
clf.fit(X, y)
assert_equal(clf.n_iter_.shape, (n_classes, n_cv_fold, n_Cs))
clf.fit(X, y_bin)
assert_equal(clf.n_iter_.shape, (1, n_cv_fold, n_Cs))
@ignore_warnings
def test_warm_start():
# A 1-iteration second fit on same data should give almost same result
# with warm starting, and quite different result without warm starting.
# Warm starting does not work with liblinear solver.
X, y = iris.data, iris.target
solvers = ['newton-cg', 'sag']
# old scipy doesn't have maxiter
if sp_version >= (0, 12):
solvers.append('lbfgs')
for warm_start in [True, False]:
for fit_intercept in [True, False]:
for solver in solvers:
for multi_class in ['ovr', 'multinomial']:
if solver == 'sag' and multi_class == 'multinomial':
break
clf = LogisticRegression(tol=1e-4, multi_class=multi_class,
warm_start=warm_start,
solver=solver,
random_state=42, max_iter=100,
fit_intercept=fit_intercept)
clf.fit(X, y)
coef_1 = clf.coef_
clf.max_iter = 1
with ignore_warnings():
clf.fit(X, y)
cum_diff = np.sum(np.abs(coef_1 - clf.coef_))
msg = ("Warm starting issue with %s solver in %s mode "
"with fit_intercept=%s and warm_start=%s"
% (solver, multi_class, str(fit_intercept),
str(warm_start)))
if warm_start:
assert_greater(2.0, cum_diff, msg)
else:
assert_greater(cum_diff, 2.0, msg)
| bsd-3-clause |
glouppe/scikit-learn | sklearn/linear_model/tests/test_ridge.py | 19 | 26553 | import numpy as np
import scipy.sparse as sp
from scipy import linalg
from itertools import product
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_raise_message
from sklearn.utils.testing import ignore_warnings
from sklearn.utils.testing import assert_warns
from sklearn import datasets
from sklearn.metrics import mean_squared_error
from sklearn.metrics import make_scorer
from sklearn.metrics import get_scorer
from sklearn.linear_model.base import LinearRegression
from sklearn.linear_model.ridge import ridge_regression
from sklearn.linear_model.ridge import Ridge
from sklearn.linear_model.ridge import _RidgeGCV
from sklearn.linear_model.ridge import RidgeCV
from sklearn.linear_model.ridge import RidgeClassifier
from sklearn.linear_model.ridge import RidgeClassifierCV
from sklearn.linear_model.ridge import _solve_cholesky
from sklearn.linear_model.ridge import _solve_cholesky_kernel
from sklearn.datasets import make_regression
from sklearn.model_selection import GridSearchCV
from sklearn.model_selection import KFold
from sklearn.utils import check_random_state
from sklearn.datasets import make_multilabel_classification
diabetes = datasets.load_diabetes()
X_diabetes, y_diabetes = diabetes.data, diabetes.target
ind = np.arange(X_diabetes.shape[0])
rng = np.random.RandomState(0)
rng.shuffle(ind)
ind = ind[:200]
X_diabetes, y_diabetes = X_diabetes[ind], y_diabetes[ind]
iris = datasets.load_iris()
X_iris = sp.csr_matrix(iris.data)
y_iris = iris.target
DENSE_FILTER = lambda X: X
SPARSE_FILTER = lambda X: sp.csr_matrix(X)
def test_ridge():
# Ridge regression convergence test using score
# TODO: for this test to be robust, we should use a dataset instead
# of np.random.
rng = np.random.RandomState(0)
alpha = 1.0
for solver in ("svd", "sparse_cg", "cholesky", "lsqr", "sag"):
# With more samples than features
n_samples, n_features = 6, 5
y = rng.randn(n_samples)
X = rng.randn(n_samples, n_features)
ridge = Ridge(alpha=alpha, solver=solver)
ridge.fit(X, y)
assert_equal(ridge.coef_.shape, (X.shape[1], ))
assert_greater(ridge.score(X, y), 0.47)
if solver in ("cholesky", "sag"):
# Currently the only solvers to support sample_weight.
ridge.fit(X, y, sample_weight=np.ones(n_samples))
assert_greater(ridge.score(X, y), 0.47)
# With more features than samples
n_samples, n_features = 5, 10
y = rng.randn(n_samples)
X = rng.randn(n_samples, n_features)
ridge = Ridge(alpha=alpha, solver=solver)
ridge.fit(X, y)
assert_greater(ridge.score(X, y), .9)
if solver in ("cholesky", "sag"):
# Currently the only solvers to support sample_weight.
ridge.fit(X, y, sample_weight=np.ones(n_samples))
assert_greater(ridge.score(X, y), 0.9)
def test_primal_dual_relationship():
y = y_diabetes.reshape(-1, 1)
coef = _solve_cholesky(X_diabetes, y, alpha=[1e-2])
K = np.dot(X_diabetes, X_diabetes.T)
dual_coef = _solve_cholesky_kernel(K, y, alpha=[1e-2])
coef2 = np.dot(X_diabetes.T, dual_coef).T
assert_array_almost_equal(coef, coef2)
def test_ridge_singular():
# test on a singular matrix
rng = np.random.RandomState(0)
n_samples, n_features = 6, 6
y = rng.randn(n_samples // 2)
y = np.concatenate((y, y))
X = rng.randn(n_samples // 2, n_features)
X = np.concatenate((X, X), axis=0)
ridge = Ridge(alpha=0)
ridge.fit(X, y)
assert_greater(ridge.score(X, y), 0.9)
def test_ridge_regression_sample_weights():
rng = np.random.RandomState(0)
for solver in ("cholesky", ):
for n_samples, n_features in ((6, 5), (5, 10)):
for alpha in (1.0, 1e-2):
y = rng.randn(n_samples)
X = rng.randn(n_samples, n_features)
sample_weight = 1.0 + rng.rand(n_samples)
coefs = ridge_regression(X, y,
alpha=alpha,
sample_weight=sample_weight,
solver=solver)
# Sample weight can be implemented via a simple rescaling
# for the square loss.
coefs2 = ridge_regression(
X * np.sqrt(sample_weight)[:, np.newaxis],
y * np.sqrt(sample_weight),
alpha=alpha, solver=solver)
assert_array_almost_equal(coefs, coefs2)
def test_ridge_sample_weights():
# TODO: loop over sparse data as well
rng = np.random.RandomState(0)
param_grid = product((1.0, 1e-2), (True, False),
('svd', 'cholesky', 'lsqr', 'sparse_cg'))
for n_samples, n_features in ((6, 5), (5, 10)):
y = rng.randn(n_samples)
X = rng.randn(n_samples, n_features)
sample_weight = 1.0 + rng.rand(n_samples)
for (alpha, intercept, solver) in param_grid:
# Ridge with explicit sample_weight
est = Ridge(alpha=alpha, fit_intercept=intercept, solver=solver)
est.fit(X, y, sample_weight=sample_weight)
coefs = est.coef_
inter = est.intercept_
# Closed form of the weighted regularized least square
# theta = (X^T W X + alpha I)^(-1) * X^T W y
W = np.diag(sample_weight)
if intercept is False:
X_aug = X
I = np.eye(n_features)
else:
dummy_column = np.ones(shape=(n_samples, 1))
X_aug = np.concatenate((dummy_column, X), axis=1)
I = np.eye(n_features + 1)
I[0, 0] = 0
cf_coefs = linalg.solve(X_aug.T.dot(W).dot(X_aug) + alpha * I,
X_aug.T.dot(W).dot(y))
if intercept is False:
assert_array_almost_equal(coefs, cf_coefs)
else:
assert_array_almost_equal(coefs, cf_coefs[1:])
assert_almost_equal(inter, cf_coefs[0])
def test_ridge_shapes():
# Test shape of coef_ and intercept_
rng = np.random.RandomState(0)
n_samples, n_features = 5, 10
X = rng.randn(n_samples, n_features)
y = rng.randn(n_samples)
Y1 = y[:, np.newaxis]
Y = np.c_[y, 1 + y]
ridge = Ridge()
ridge.fit(X, y)
assert_equal(ridge.coef_.shape, (n_features,))
assert_equal(ridge.intercept_.shape, ())
ridge.fit(X, Y1)
assert_equal(ridge.coef_.shape, (1, n_features))
assert_equal(ridge.intercept_.shape, (1, ))
ridge.fit(X, Y)
assert_equal(ridge.coef_.shape, (2, n_features))
assert_equal(ridge.intercept_.shape, (2, ))
def test_ridge_intercept():
# Test intercept with multiple targets GH issue #708
rng = np.random.RandomState(0)
n_samples, n_features = 5, 10
X = rng.randn(n_samples, n_features)
y = rng.randn(n_samples)
Y = np.c_[y, 1. + y]
ridge = Ridge()
ridge.fit(X, y)
intercept = ridge.intercept_
ridge.fit(X, Y)
assert_almost_equal(ridge.intercept_[0], intercept)
assert_almost_equal(ridge.intercept_[1], intercept + 1.)
def test_toy_ridge_object():
# Test BayesianRegression ridge classifier
# TODO: test also n_samples > n_features
X = np.array([[1], [2]])
Y = np.array([1, 2])
clf = Ridge(alpha=0.0)
clf.fit(X, Y)
X_test = [[1], [2], [3], [4]]
assert_almost_equal(clf.predict(X_test), [1., 2, 3, 4])
assert_equal(len(clf.coef_.shape), 1)
assert_equal(type(clf.intercept_), np.float64)
Y = np.vstack((Y, Y)).T
clf.fit(X, Y)
X_test = [[1], [2], [3], [4]]
assert_equal(len(clf.coef_.shape), 2)
assert_equal(type(clf.intercept_), np.ndarray)
def test_ridge_vs_lstsq():
# On alpha=0., Ridge and OLS yield the same solution.
rng = np.random.RandomState(0)
# we need more samples than features
n_samples, n_features = 5, 4
y = rng.randn(n_samples)
X = rng.randn(n_samples, n_features)
ridge = Ridge(alpha=0., fit_intercept=False)
ols = LinearRegression(fit_intercept=False)
ridge.fit(X, y)
ols.fit(X, y)
assert_almost_equal(ridge.coef_, ols.coef_)
ridge.fit(X, y)
ols.fit(X, y)
assert_almost_equal(ridge.coef_, ols.coef_)
def test_ridge_individual_penalties():
# Tests the ridge object using individual penalties
rng = np.random.RandomState(42)
n_samples, n_features, n_targets = 20, 10, 5
X = rng.randn(n_samples, n_features)
y = rng.randn(n_samples, n_targets)
penalties = np.arange(n_targets)
coef_cholesky = np.array([
Ridge(alpha=alpha, solver="cholesky").fit(X, target).coef_
for alpha, target in zip(penalties, y.T)])
coefs_indiv_pen = [
Ridge(alpha=penalties, solver=solver, tol=1e-8).fit(X, y).coef_
for solver in ['svd', 'sparse_cg', 'lsqr', 'cholesky', 'sag']]
for coef_indiv_pen in coefs_indiv_pen:
assert_array_almost_equal(coef_cholesky, coef_indiv_pen)
# Test error is raised when number of targets and penalties do not match.
ridge = Ridge(alpha=penalties[:-1])
assert_raises(ValueError, ridge.fit, X, y)
def _test_ridge_loo(filter_):
# test that can work with both dense or sparse matrices
n_samples = X_diabetes.shape[0]
ret = []
ridge_gcv = _RidgeGCV(fit_intercept=False)
ridge = Ridge(alpha=1.0, fit_intercept=False)
# generalized cross-validation (efficient leave-one-out)
decomp = ridge_gcv._pre_compute(X_diabetes, y_diabetes)
errors, c = ridge_gcv._errors(1.0, y_diabetes, *decomp)
values, c = ridge_gcv._values(1.0, y_diabetes, *decomp)
# brute-force leave-one-out: remove one example at a time
errors2 = []
values2 = []
for i in range(n_samples):
sel = np.arange(n_samples) != i
X_new = X_diabetes[sel]
y_new = y_diabetes[sel]
ridge.fit(X_new, y_new)
value = ridge.predict([X_diabetes[i]])[0]
error = (y_diabetes[i] - value) ** 2
errors2.append(error)
values2.append(value)
# check that efficient and brute-force LOO give same results
assert_almost_equal(errors, errors2)
assert_almost_equal(values, values2)
# generalized cross-validation (efficient leave-one-out,
# SVD variation)
decomp = ridge_gcv._pre_compute_svd(X_diabetes, y_diabetes)
errors3, c = ridge_gcv._errors_svd(ridge.alpha, y_diabetes, *decomp)
values3, c = ridge_gcv._values_svd(ridge.alpha, y_diabetes, *decomp)
# check that efficient and SVD efficient LOO give same results
assert_almost_equal(errors, errors3)
assert_almost_equal(values, values3)
# check best alpha
ridge_gcv.fit(filter_(X_diabetes), y_diabetes)
alpha_ = ridge_gcv.alpha_
ret.append(alpha_)
# check that we get same best alpha with custom loss_func
f = ignore_warnings
scoring = make_scorer(mean_squared_error, greater_is_better=False)
ridge_gcv2 = RidgeCV(fit_intercept=False, scoring=scoring)
f(ridge_gcv2.fit)(filter_(X_diabetes), y_diabetes)
assert_equal(ridge_gcv2.alpha_, alpha_)
# check that we get same best alpha with custom score_func
func = lambda x, y: -mean_squared_error(x, y)
scoring = make_scorer(func)
ridge_gcv3 = RidgeCV(fit_intercept=False, scoring=scoring)
f(ridge_gcv3.fit)(filter_(X_diabetes), y_diabetes)
assert_equal(ridge_gcv3.alpha_, alpha_)
# check that we get same best alpha with a scorer
scorer = get_scorer('mean_squared_error')
ridge_gcv4 = RidgeCV(fit_intercept=False, scoring=scorer)
ridge_gcv4.fit(filter_(X_diabetes), y_diabetes)
assert_equal(ridge_gcv4.alpha_, alpha_)
# check that we get same best alpha with sample weights
ridge_gcv.fit(filter_(X_diabetes), y_diabetes,
sample_weight=np.ones(n_samples))
assert_equal(ridge_gcv.alpha_, alpha_)
# simulate several responses
Y = np.vstack((y_diabetes, y_diabetes)).T
ridge_gcv.fit(filter_(X_diabetes), Y)
Y_pred = ridge_gcv.predict(filter_(X_diabetes))
ridge_gcv.fit(filter_(X_diabetes), y_diabetes)
y_pred = ridge_gcv.predict(filter_(X_diabetes))
assert_array_almost_equal(np.vstack((y_pred, y_pred)).T,
Y_pred, decimal=5)
return ret
def _test_ridge_cv(filter_):
ridge_cv = RidgeCV()
ridge_cv.fit(filter_(X_diabetes), y_diabetes)
ridge_cv.predict(filter_(X_diabetes))
assert_equal(len(ridge_cv.coef_.shape), 1)
assert_equal(type(ridge_cv.intercept_), np.float64)
cv = KFold(5)
ridge_cv.set_params(cv=cv)
ridge_cv.fit(filter_(X_diabetes), y_diabetes)
ridge_cv.predict(filter_(X_diabetes))
assert_equal(len(ridge_cv.coef_.shape), 1)
assert_equal(type(ridge_cv.intercept_), np.float64)
def _test_ridge_diabetes(filter_):
ridge = Ridge(fit_intercept=False)
ridge.fit(filter_(X_diabetes), y_diabetes)
return np.round(ridge.score(filter_(X_diabetes), y_diabetes), 5)
def _test_multi_ridge_diabetes(filter_):
# simulate several responses
Y = np.vstack((y_diabetes, y_diabetes)).T
n_features = X_diabetes.shape[1]
ridge = Ridge(fit_intercept=False)
ridge.fit(filter_(X_diabetes), Y)
assert_equal(ridge.coef_.shape, (2, n_features))
Y_pred = ridge.predict(filter_(X_diabetes))
ridge.fit(filter_(X_diabetes), y_diabetes)
y_pred = ridge.predict(filter_(X_diabetes))
assert_array_almost_equal(np.vstack((y_pred, y_pred)).T,
Y_pred, decimal=3)
def _test_ridge_classifiers(filter_):
n_classes = np.unique(y_iris).shape[0]
n_features = X_iris.shape[1]
for clf in (RidgeClassifier(), RidgeClassifierCV()):
clf.fit(filter_(X_iris), y_iris)
assert_equal(clf.coef_.shape, (n_classes, n_features))
y_pred = clf.predict(filter_(X_iris))
assert_greater(np.mean(y_iris == y_pred), .79)
cv = KFold(5)
clf = RidgeClassifierCV(cv=cv)
clf.fit(filter_(X_iris), y_iris)
y_pred = clf.predict(filter_(X_iris))
assert_true(np.mean(y_iris == y_pred) >= 0.8)
def _test_tolerance(filter_):
ridge = Ridge(tol=1e-5, fit_intercept=False)
ridge.fit(filter_(X_diabetes), y_diabetes)
score = ridge.score(filter_(X_diabetes), y_diabetes)
ridge2 = Ridge(tol=1e-3, fit_intercept=False)
ridge2.fit(filter_(X_diabetes), y_diabetes)
score2 = ridge2.score(filter_(X_diabetes), y_diabetes)
assert_true(score >= score2)
# ignore warning that solvers are changed to SAG for
# temporary fix
@ignore_warnings
def test_dense_sparse():
for test_func in (_test_ridge_loo,
_test_ridge_cv,
_test_ridge_diabetes,
_test_multi_ridge_diabetes,
_test_ridge_classifiers,
_test_tolerance):
# test dense matrix
ret_dense = test_func(DENSE_FILTER)
# test sparse matrix
ret_sparse = test_func(SPARSE_FILTER)
# test that the outputs are the same
if ret_dense is not None and ret_sparse is not None:
assert_array_almost_equal(ret_dense, ret_sparse, decimal=3)
def test_ridge_cv_sparse_svd():
X = sp.csr_matrix(X_diabetes)
ridge = RidgeCV(gcv_mode="svd")
assert_raises(TypeError, ridge.fit, X)
def test_ridge_sparse_svd():
X = sp.csc_matrix(rng.rand(100, 10))
y = rng.rand(100)
ridge = Ridge(solver='svd', fit_intercept=False)
assert_raises(TypeError, ridge.fit, X, y)
def test_class_weights():
# Test class weights.
X = np.array([[-1.0, -1.0], [-1.0, 0], [-.8, -1.0],
[1.0, 1.0], [1.0, 0.0]])
y = [1, 1, 1, -1, -1]
clf = RidgeClassifier(class_weight=None)
clf.fit(X, y)
assert_array_equal(clf.predict([[0.2, -1.0]]), np.array([1]))
# we give a small weights to class 1
clf = RidgeClassifier(class_weight={1: 0.001})
clf.fit(X, y)
# now the hyperplane should rotate clock-wise and
# the prediction on this point should shift
assert_array_equal(clf.predict([[0.2, -1.0]]), np.array([-1]))
# check if class_weight = 'balanced' can handle negative labels.
clf = RidgeClassifier(class_weight='balanced')
clf.fit(X, y)
assert_array_equal(clf.predict([[0.2, -1.0]]), np.array([1]))
# class_weight = 'balanced', and class_weight = None should return
# same values when y has equal number of all labels
X = np.array([[-1.0, -1.0], [-1.0, 0], [-.8, -1.0], [1.0, 1.0]])
y = [1, 1, -1, -1]
clf = RidgeClassifier(class_weight=None)
clf.fit(X, y)
clfa = RidgeClassifier(class_weight='balanced')
clfa.fit(X, y)
assert_equal(len(clfa.classes_), 2)
assert_array_almost_equal(clf.coef_, clfa.coef_)
assert_array_almost_equal(clf.intercept_, clfa.intercept_)
def test_class_weight_vs_sample_weight():
"""Check class_weights resemble sample_weights behavior."""
for clf in (RidgeClassifier, RidgeClassifierCV):
# Iris is balanced, so no effect expected for using 'balanced' weights
clf1 = clf()
clf1.fit(iris.data, iris.target)
clf2 = clf(class_weight='balanced')
clf2.fit(iris.data, iris.target)
assert_almost_equal(clf1.coef_, clf2.coef_)
# Inflate importance of class 1, check against user-defined weights
sample_weight = np.ones(iris.target.shape)
sample_weight[iris.target == 1] *= 100
class_weight = {0: 1., 1: 100., 2: 1.}
clf1 = clf()
clf1.fit(iris.data, iris.target, sample_weight)
clf2 = clf(class_weight=class_weight)
clf2.fit(iris.data, iris.target)
assert_almost_equal(clf1.coef_, clf2.coef_)
# Check that sample_weight and class_weight are multiplicative
clf1 = clf()
clf1.fit(iris.data, iris.target, sample_weight ** 2)
clf2 = clf(class_weight=class_weight)
clf2.fit(iris.data, iris.target, sample_weight)
assert_almost_equal(clf1.coef_, clf2.coef_)
def test_class_weights_cv():
# Test class weights for cross validated ridge classifier.
X = np.array([[-1.0, -1.0], [-1.0, 0], [-.8, -1.0],
[1.0, 1.0], [1.0, 0.0]])
y = [1, 1, 1, -1, -1]
clf = RidgeClassifierCV(class_weight=None, alphas=[.01, .1, 1])
clf.fit(X, y)
# we give a small weights to class 1
clf = RidgeClassifierCV(class_weight={1: 0.001}, alphas=[.01, .1, 1, 10])
clf.fit(X, y)
assert_array_equal(clf.predict([[-.2, 2]]), np.array([-1]))
def test_ridgecv_store_cv_values():
# Test _RidgeCV's store_cv_values attribute.
rng = rng = np.random.RandomState(42)
n_samples = 8
n_features = 5
x = rng.randn(n_samples, n_features)
alphas = [1e-1, 1e0, 1e1]
n_alphas = len(alphas)
r = RidgeCV(alphas=alphas, store_cv_values=True)
# with len(y.shape) == 1
y = rng.randn(n_samples)
r.fit(x, y)
assert_equal(r.cv_values_.shape, (n_samples, n_alphas))
# with len(y.shape) == 2
n_responses = 3
y = rng.randn(n_samples, n_responses)
r.fit(x, y)
assert_equal(r.cv_values_.shape, (n_samples, n_responses, n_alphas))
def test_ridgecv_sample_weight():
rng = np.random.RandomState(0)
alphas = (0.1, 1.0, 10.0)
# There are different algorithms for n_samples > n_features
# and the opposite, so test them both.
for n_samples, n_features in ((6, 5), (5, 10)):
y = rng.randn(n_samples)
X = rng.randn(n_samples, n_features)
sample_weight = 1.0 + rng.rand(n_samples)
cv = KFold(5)
ridgecv = RidgeCV(alphas=alphas, cv=cv)
ridgecv.fit(X, y, sample_weight=sample_weight)
# Check using GridSearchCV directly
parameters = {'alpha': alphas}
fit_params = {'sample_weight': sample_weight}
gs = GridSearchCV(Ridge(), parameters, fit_params=fit_params,
cv=cv)
gs.fit(X, y)
assert_equal(ridgecv.alpha_, gs.best_estimator_.alpha)
assert_array_almost_equal(ridgecv.coef_, gs.best_estimator_.coef_)
def test_raises_value_error_if_sample_weights_greater_than_1d():
# Sample weights must be either scalar or 1D
n_sampless = [2, 3]
n_featuress = [3, 2]
rng = np.random.RandomState(42)
for n_samples, n_features in zip(n_sampless, n_featuress):
X = rng.randn(n_samples, n_features)
y = rng.randn(n_samples)
sample_weights_OK = rng.randn(n_samples) ** 2 + 1
sample_weights_OK_1 = 1.
sample_weights_OK_2 = 2.
sample_weights_not_OK = sample_weights_OK[:, np.newaxis]
sample_weights_not_OK_2 = sample_weights_OK[np.newaxis, :]
ridge = Ridge(alpha=1)
# make sure the "OK" sample weights actually work
ridge.fit(X, y, sample_weights_OK)
ridge.fit(X, y, sample_weights_OK_1)
ridge.fit(X, y, sample_weights_OK_2)
def fit_ridge_not_ok():
ridge.fit(X, y, sample_weights_not_OK)
def fit_ridge_not_ok_2():
ridge.fit(X, y, sample_weights_not_OK_2)
assert_raise_message(ValueError,
"Sample weights must be 1D array or scalar",
fit_ridge_not_ok)
assert_raise_message(ValueError,
"Sample weights must be 1D array or scalar",
fit_ridge_not_ok_2)
def test_sparse_design_with_sample_weights():
# Sample weights must work with sparse matrices
n_sampless = [2, 3]
n_featuress = [3, 2]
rng = np.random.RandomState(42)
sparse_matrix_converters = [sp.coo_matrix,
sp.csr_matrix,
sp.csc_matrix,
sp.lil_matrix,
sp.dok_matrix
]
sparse_ridge = Ridge(alpha=1., fit_intercept=False)
dense_ridge = Ridge(alpha=1., fit_intercept=False)
for n_samples, n_features in zip(n_sampless, n_featuress):
X = rng.randn(n_samples, n_features)
y = rng.randn(n_samples)
sample_weights = rng.randn(n_samples) ** 2 + 1
for sparse_converter in sparse_matrix_converters:
X_sparse = sparse_converter(X)
sparse_ridge.fit(X_sparse, y, sample_weight=sample_weights)
dense_ridge.fit(X, y, sample_weight=sample_weights)
assert_array_almost_equal(sparse_ridge.coef_, dense_ridge.coef_,
decimal=6)
def test_raises_value_error_if_solver_not_supported():
# Tests whether a ValueError is raised if a non-identified solver
# is passed to ridge_regression
wrong_solver = "This is not a solver (MagritteSolveCV QuantumBitcoin)"
exception = ValueError
message = "Solver %s not understood" % wrong_solver
def func():
X = np.eye(3)
y = np.ones(3)
ridge_regression(X, y, alpha=1., solver=wrong_solver)
assert_raise_message(exception, message, func)
def test_sparse_cg_max_iter():
reg = Ridge(solver="sparse_cg", max_iter=1)
reg.fit(X_diabetes, y_diabetes)
assert_equal(reg.coef_.shape[0], X_diabetes.shape[1])
@ignore_warnings
def test_n_iter():
# Test that self.n_iter_ is correct.
n_targets = 2
X, y = X_diabetes, y_diabetes
y_n = np.tile(y, (n_targets, 1)).T
for max_iter in range(1, 4):
for solver in ('sag', 'lsqr'):
reg = Ridge(solver=solver, max_iter=max_iter, tol=1e-12)
reg.fit(X, y_n)
assert_array_equal(reg.n_iter_, np.tile(max_iter, n_targets))
for solver in ('sparse_cg', 'svd', 'cholesky'):
reg = Ridge(solver=solver, max_iter=1, tol=1e-1)
reg.fit(X, y_n)
assert_equal(reg.n_iter_, None)
def test_ridge_fit_intercept_sparse():
X, y = make_regression(n_samples=1000, n_features=2, n_informative=2,
bias=10., random_state=42)
X_csr = sp.csr_matrix(X)
dense = Ridge(alpha=1., tol=1.e-15, solver='sag', fit_intercept=True)
sparse = Ridge(alpha=1., tol=1.e-15, solver='sag', fit_intercept=True)
dense.fit(X, y)
sparse.fit(X_csr, y)
assert_almost_equal(dense.intercept_, sparse.intercept_)
assert_array_almost_equal(dense.coef_, sparse.coef_)
# test the solver switch and the corresponding warning
sparse = Ridge(alpha=1., tol=1.e-15, solver='lsqr', fit_intercept=True)
assert_warns(UserWarning, sparse.fit, X_csr, y)
assert_almost_equal(dense.intercept_, sparse.intercept_)
assert_array_almost_equal(dense.coef_, sparse.coef_)
def test_errors_and_values_helper():
ridgecv = _RidgeGCV()
rng = check_random_state(42)
alpha = 1.
n = 5
y = rng.randn(n)
v = rng.randn(n)
Q = rng.randn(len(v), len(v))
QT_y = Q.T.dot(y)
G_diag, c = ridgecv._errors_and_values_helper(alpha, y, v, Q, QT_y)
# test that helper function behaves as expected
out, c_ = ridgecv._errors(alpha, y, v, Q, QT_y)
np.testing.assert_array_equal(out, (c / G_diag) ** 2)
np.testing.assert_array_equal(c, c)
out, c_ = ridgecv._values(alpha, y, v, Q, QT_y)
np.testing.assert_array_equal(out, y - (c / G_diag))
np.testing.assert_array_equal(c_, c)
def test_errors_and_values_svd_helper():
ridgecv = _RidgeGCV()
rng = check_random_state(42)
alpha = 1.
for n, p in zip((5, 10), (12, 6)):
y = rng.randn(n)
v = rng.randn(p)
U = rng.randn(n, p)
UT_y = U.T.dot(y)
G_diag, c = ridgecv._errors_and_values_svd_helper(alpha, y, v, U, UT_y)
# test that helper function behaves as expected
out, c_ = ridgecv._errors_svd(alpha, y, v, U, UT_y)
np.testing.assert_array_equal(out, (c / G_diag) ** 2)
np.testing.assert_array_equal(c, c)
out, c_ = ridgecv._values_svd(alpha, y, v, U, UT_y)
np.testing.assert_array_equal(out, y - (c / G_diag))
np.testing.assert_array_equal(c_, c)
def test_ridge_classifier_no_support_multilabel():
X, y = make_multilabel_classification(n_samples=10, random_state=0)
assert_raises(ValueError, RidgeClassifier().fit, X, y)
| bsd-3-clause |
lucidfrontier45/scikit-learn | examples/linear_model/plot_logistic_path.py | 7 | 1170 | #!/usr/bin/env python
"""
=================================
Path with L1- Logistic Regression
=================================
Computes path on IRIS dataset.
"""
print __doc__
# Author: Alexandre Gramfort <alexandre.gramfort@inria.fr>
# License: BSD Style.
from datetime import datetime
import numpy as np
import pylab as pl
from sklearn import linear_model
from sklearn import datasets
from sklearn.svm import l1_min_c
iris = datasets.load_iris()
X = iris.data
y = iris.target
X = X[y != 2]
y = y[y != 2]
X -= np.mean(X, 0)
###############################################################################
# Demo path functions
cs = l1_min_c(X, y, loss='log') * np.logspace(0, 3)
print "Computing regularization path ..."
start = datetime.now()
clf = linear_model.LogisticRegression(C=1.0, penalty='l1', tol=1e-6)
coefs_ = []
for c in cs:
clf.set_params(C=c)
clf.fit(X, y)
coefs_.append(clf.coef_.ravel().copy())
print "This took ", datetime.now() - start
coefs_ = np.array(coefs_)
pl.plot(np.log10(cs), coefs_)
ymin, ymax = pl.ylim()
pl.xlabel('log(C)')
pl.ylabel('Coefficients')
pl.title('Logistic Regression Path')
pl.axis('tight')
pl.show()
| bsd-3-clause |
tauzero7/Motion4D | python/schwarzschildLightPulse.py | 1 | 1198 | """
Light pulse in Schwarzschild spacetime
"""
import numpy as np
import matplotlib.pyplot as plt
import m4d
obj = m4d.Object()
obj.setMetric("SchwarzschildCart")
obj.setSolver("GSL_RK4")
obj.setSolverParam("eps_a", 1e-8)
obj.setSolverParam("stepctrl", False)
boxSize = 20.0
obj.setSolverParam("lower_bb", -1e12, -boxSize, -boxSize, -boxSize)
obj.setSolverParam("upper_bb", 1e12, boxSize, boxSize, boxSize)
obj.setInitialPosition(0.0, 10.0, 0.0, 0.0)
maxPoints = 1000
obj.setParam("maxNumPoints", maxPoints)
data = np.ndarray((361,maxPoints,4))
for n in range(0,361):
print(n)
alpha = 2.0 * np.pi / 360.0 * n;
obj.setInitialLocalNullDirection(1, -np.cos(alpha), np.sin(alpha), 0.0)
obj.calculateGeodesic()
num = obj.getNumPoints()
for i in range(num):
pos = obj.getPosition(i)
data[n,i] = [pos[0], pos[1], pos[2], pos[3]]
maxLambda = 300
plt.plot(data[:,maxLambda,1],data[:,maxLambda,2],'r.')
maxLambda = 500
plt.plot(data[:,maxLambda,1],data[:,maxLambda,2],'g.')
maxLambda = 700
plt.plot(data[:,maxLambda,1],data[:,maxLambda,2],'b.')
maxLambda = 900
plt.plot(data[:,maxLambda,1],data[:,maxLambda,2],'k*')
plt.show()
#print(data[:,1])
| gpl-3.0 |
brianholland/tiler | tiler.py | 1 | 7835 | """Run like python tiler.py myimage.jpg.
Tiler produces myimage.txt and myimage.txt.png."""
import sys, getopt
import matplotlib as mpl
#http://stackoverflow.com/questions/25561009/how-do-you-i-use-mandarin-charecters-in-matplotlib
#mpl.use("pgf") #I'm not there with Chinese yet.
import matplotlib.pyplot as plt, cStringIO, numpy as np, IPython.display as dis, base64
from PIL import Image
from io import BytesIO
#%matplotlib inline
#Get the pixelated versions of characters.
def charDat(aChar, wgt, fs, **kwargs):
"""Pass an character, weight='normal' or 'bold', and fontsize. Get back a numpy array.
Can also pass fontname."""
plt.clf();#clean up
plt.cla();
fontname = kwargs.get('fontname', 'Bitstream Vera Sans')
fig = plt.gcf(); #The current figure
mydpi=80.;
fs = float(fs); #fontsize
fig.set_size_inches(fs/mydpi, fs/mydpi);
plt.axis('off'); #Hide the axes
#The char wasn't filling the box so I scaled it by eyeballing it.
plt.text(0,0.0, aChar, fontsize=fs*1.2, weight=wgt);# weight='bold' if you want
plt.subplots_adjust(left=0, right=1, top=1, bottom=0)
#Save plot as image encoded as text.
sio = cStringIO.StringIO();
plt.savefig(sio, dpi= mydpi, format="png");
plt.close();
s = sio.getvalue().encode('base64').strip();
#Get the image to be an array of its values (RGB+a 4th for transparency I think.)
im = Image.open(BytesIO(base64.b64decode(s)));
dat = list(im.getdata())
datarr = np.array(dat).reshape(list(im.size)+[4]); #+4 for the 4 elements of the png
return datarr[:,:,0] #Will be BW, so just take the red channel (0)
def getBase64FromArray(arr):
"""Show the image from a numpy array. Use Python's display features.
This is for checking how arrays look."""
arr = arr.astype('B');
if len(arr.shape)==3:
im = Image.fromarray(arr, 'RGB')
else:
im = Image.fromarray(arr)
buff= cStringIO.StringIO()
im.save(buff, format="PNG")
b64 = base64.b64encode(buff.getvalue())
buff.close()
return b64;
def saveArrToPNG(arr, fn):
f = open(fn, 'w');
im = Image.fromarray(arr.astype('B'))
im.save(f, format='PNG')
f.close();
class tileSet(object):
def __init__(self, **kwargs):
"""Pass:
characters = (list of characters or string), will be turned into images
width: default 12, width of tiles, fontsize will be something like it
Attributes:
tile: a list of the tiles, a numpy array of numpy arrays. Allow for them to be in color.
To do:
pass images=[fns]: a list of fileames of images, could do but not there yet
Make an iterator. Where t is a tileSet, want t[3] to give me t.tile[3]. Would be nice.
Check that all tiles are the same size when import the filenames. The characters are forced to be the same size.
"""
self.characterTiles = True; #Will be false when / if use photos for the tiles.
self.characters = kwargs.get('characters', [str(chr(c)) for c in range(32,127)]); #default chars.
self.dim = (kwargs.get('width',12), kwargs.get('width',12)); #Works for characters. Would have to check images.
self.tile = np.array([charDat(c, 'normal', self.dim[0]) for c in self.characters]).astype('B')
class tiler(object):
def __init__(self, fn, tiles, **kwargs):
"""Pass
1. source filename
2. tiles to use in making the picture: a tileSet object
3. optional named args:
transpose=True or False (default) to transpose image
power = 0 to white out to 1 to leave alone to >1 to make darker (up to +infinity to black at limit)
Returns list:
* Input image displayed with white-out, transpose applied
* Resulting image of letters
* Values underlying the new image
* characters in the list that matched, by block (which characters are used in the list: index)
* the text of the image
"""
self.fn = fn;
self.tiles = tiles;
#Read in the data from the source image
im = Image.open(fn);
data = np.array(list(im.getdata()));
data = data.reshape([im.size[1], im.size[0], len(data[0])]);
im.close();
if kwargs.get('transpose',False):
data = np.transpose(data, (1,0,2)); #Some test images needed transposing, dunno why
self.dataOriginal = data;
#Force black and white target. Fix later when have color tiles.
self.dataTarget = 255 * ((np.mean(data, -1) * 1. / 255.) ** kwargs.get('power', 1.0) ) #Exponentiate fraction of brighness. [0-1) brigher 1 same, (1,inf+) darger
def matchTiles(self):
"""Match the tiles to the portions of the image. Set:
matches: an array of the indices of tiles taht match
dataText: the numerical data of the image, which can then be written out."""
dim = self.tiles.dim;
#Check each block.
targdat = self.dataTarget
newdat = targdat.copy(); #Will overwrite all this
#The numbers of the tiles that matched to each place:
matches = np.zeros((1+targdat.shape[0]/dim[0], 1+targdat.shape[1]/dim[1])); #The tiles I match
for i0 in range(0, targdat.shape[0], dim[0]):
for i1 in range(0, targdat.shape[1], dim[1]):
ablk = newdat[i0:i0+dim[0], i1:i1+dim[1]]; # A block in the target data.
#Get the dot products of this block with each character. Pick the largest.
n = ablk.shape; #For the last row, column that will get truncated. Need to know size.
#dist is the distance from the letter to the image, for each letter in the palette.
#Not a great use of the numpy lin alg features here, but works.
dist = list(np.array([np.linalg.norm((ablk - cod[:n[0], :n[1]]).flatten())
for cod in self.tiles.tile]));
j = dist.index(min(dist)) #The position of the (first) best match.
# Fix: @ random in prop to match quality so don't get big patches of same letter.
matches[i0/dim[0],i1/dim[1]] = j; #Save the one that matched.
#Put that letter in, only up to the lower or rightmost edge.
newdat[i0:i0+n[0], i1:i1+n[1]] = self.tiles.tile[j, :n[0], :n[1]];
self.matches = matches;
self.dataText = newdat;
def show(self):
return dis.HTML('<table><tr>%s</tr></table>'%'\n'.join(['<td><img src="data:image/png;base64,%s"></img></td>'%getBase64FromArray(d)
for d in [self.dataOriginal, self.dataTarget, self.dataText]]))
def save(self):
outfn = self.fn; #assume no dot. Wrong probably but doesn't hurt.
if '.' in outfn:
outfn = '.'.join(outfn.split('.')[:-1]) #Strip extension
outfn += '.txt'
#Save the characters, if there are characters.
if self.tiles.characterTiles:
txt = '\n'.join([''.join([self.tiles.characters[int(c)] for c in r]) for r in self.matches])
f = open(outfn, 'w');
f.write(txt);
f.close();
#Save the image.
saveArrToPNG(self.dataText, outfn+'.png');
# For below see http://www.artima.com/weblogs/viewpost.jsp?thread=4829
def main():
# parse command line options
try:
opts, args = getopt.getopt(sys.argv[1:], "h", ["help"])
except getopt.error, msg:
print msg
print "for help use --help"
sys.exit(2)
# process options
for o, a in opts:
if o in ("-h", "--help"):
print __doc__
sys.exit(0)
# process arguments
fn = args[0];
ts = tileSet() #just use defaults
t = tiler(fn, ts)
t.matchTiles();
t.save();
if __name__ == "__main__":
main()
| mit |
ZENGXH/scikit-learn | examples/linear_model/plot_sgd_comparison.py | 167 | 1659 | """
==================================
Comparing various online solvers
==================================
An example showing how different online solvers perform
on the hand-written digits dataset.
"""
# Author: Rob Zinkov <rob at zinkov dot com>
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn import datasets
from sklearn.cross_validation import train_test_split
from sklearn.linear_model import SGDClassifier, Perceptron
from sklearn.linear_model import PassiveAggressiveClassifier
heldout = [0.95, 0.90, 0.75, 0.50, 0.01]
rounds = 20
digits = datasets.load_digits()
X, y = digits.data, digits.target
classifiers = [
("SGD", SGDClassifier()),
("ASGD", SGDClassifier(average=True)),
("Perceptron", Perceptron()),
("Passive-Aggressive I", PassiveAggressiveClassifier(loss='hinge',
C=1.0)),
("Passive-Aggressive II", PassiveAggressiveClassifier(loss='squared_hinge',
C=1.0)),
]
xx = 1. - np.array(heldout)
for name, clf in classifiers:
rng = np.random.RandomState(42)
yy = []
for i in heldout:
yy_ = []
for r in range(rounds):
X_train, X_test, y_train, y_test = \
train_test_split(X, y, test_size=i, random_state=rng)
clf.fit(X_train, y_train)
y_pred = clf.predict(X_test)
yy_.append(1 - np.mean(y_pred == y_test))
yy.append(np.mean(yy_))
plt.plot(xx, yy, label=name)
plt.legend(loc="upper right")
plt.xlabel("Proportion train")
plt.ylabel("Test Error Rate")
plt.show()
| bsd-3-clause |
cxxgtxy/tensorflow | tensorflow/contrib/learn/python/learn/learn_io/pandas_io_test.py | 111 | 7865 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for pandas_io."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.contrib.learn.python.learn.learn_io import pandas_io
from tensorflow.python.framework import errors
from tensorflow.python.platform import test
from tensorflow.python.training import coordinator
from tensorflow.python.training import queue_runner_impl
# pylint: disable=g-import-not-at-top
try:
import pandas as pd
HAS_PANDAS = True
except ImportError:
HAS_PANDAS = False
class PandasIoTest(test.TestCase):
def makeTestDataFrame(self):
index = np.arange(100, 104)
a = np.arange(4)
b = np.arange(32, 36)
x = pd.DataFrame({'a': a, 'b': b}, index=index)
y = pd.Series(np.arange(-32, -28), index=index)
return x, y
def callInputFnOnce(self, input_fn, session):
results = input_fn()
coord = coordinator.Coordinator()
threads = queue_runner_impl.start_queue_runners(session, coord=coord)
result_values = session.run(results)
coord.request_stop()
coord.join(threads)
return result_values
def testPandasInputFn_IndexMismatch(self):
if not HAS_PANDAS:
return
x, _ = self.makeTestDataFrame()
y_noindex = pd.Series(np.arange(-32, -28))
with self.assertRaises(ValueError):
pandas_io.pandas_input_fn(
x, y_noindex, batch_size=2, shuffle=False, num_epochs=1)
def testPandasInputFn_ProducesExpectedOutputs(self):
if not HAS_PANDAS:
return
with self.test_session() as session:
x, y = self.makeTestDataFrame()
input_fn = pandas_io.pandas_input_fn(
x, y, batch_size=2, shuffle=False, num_epochs=1)
features, target = self.callInputFnOnce(input_fn, session)
self.assertAllEqual(features['a'], [0, 1])
self.assertAllEqual(features['b'], [32, 33])
self.assertAllEqual(target, [-32, -31])
def testPandasInputFn_ProducesOutputsForLargeBatchAndMultipleEpochs(self):
if not HAS_PANDAS:
return
with self.test_session() as session:
index = np.arange(100, 102)
a = np.arange(2)
b = np.arange(32, 34)
x = pd.DataFrame({'a': a, 'b': b}, index=index)
y = pd.Series(np.arange(-32, -30), index=index)
input_fn = pandas_io.pandas_input_fn(
x, y, batch_size=128, shuffle=False, num_epochs=2)
results = input_fn()
coord = coordinator.Coordinator()
threads = queue_runner_impl.start_queue_runners(session, coord=coord)
features, target = session.run(results)
self.assertAllEqual(features['a'], [0, 1, 0, 1])
self.assertAllEqual(features['b'], [32, 33, 32, 33])
self.assertAllEqual(target, [-32, -31, -32, -31])
with self.assertRaises(errors.OutOfRangeError):
session.run(results)
coord.request_stop()
coord.join(threads)
def testPandasInputFn_ProducesOutputsWhenDataSizeNotDividedByBatchSize(self):
if not HAS_PANDAS:
return
with self.test_session() as session:
index = np.arange(100, 105)
a = np.arange(5)
b = np.arange(32, 37)
x = pd.DataFrame({'a': a, 'b': b}, index=index)
y = pd.Series(np.arange(-32, -27), index=index)
input_fn = pandas_io.pandas_input_fn(
x, y, batch_size=2, shuffle=False, num_epochs=1)
results = input_fn()
coord = coordinator.Coordinator()
threads = queue_runner_impl.start_queue_runners(session, coord=coord)
features, target = session.run(results)
self.assertAllEqual(features['a'], [0, 1])
self.assertAllEqual(features['b'], [32, 33])
self.assertAllEqual(target, [-32, -31])
features, target = session.run(results)
self.assertAllEqual(features['a'], [2, 3])
self.assertAllEqual(features['b'], [34, 35])
self.assertAllEqual(target, [-30, -29])
features, target = session.run(results)
self.assertAllEqual(features['a'], [4])
self.assertAllEqual(features['b'], [36])
self.assertAllEqual(target, [-28])
with self.assertRaises(errors.OutOfRangeError):
session.run(results)
coord.request_stop()
coord.join(threads)
def testPandasInputFn_OnlyX(self):
if not HAS_PANDAS:
return
with self.test_session() as session:
x, _ = self.makeTestDataFrame()
input_fn = pandas_io.pandas_input_fn(
x, y=None, batch_size=2, shuffle=False, num_epochs=1)
features = self.callInputFnOnce(input_fn, session)
self.assertAllEqual(features['a'], [0, 1])
self.assertAllEqual(features['b'], [32, 33])
def testPandasInputFn_ExcludesIndex(self):
if not HAS_PANDAS:
return
with self.test_session() as session:
x, y = self.makeTestDataFrame()
input_fn = pandas_io.pandas_input_fn(
x, y, batch_size=2, shuffle=False, num_epochs=1)
features, _ = self.callInputFnOnce(input_fn, session)
self.assertFalse('index' in features)
def assertInputsCallableNTimes(self, input_fn, session, n):
inputs = input_fn()
coord = coordinator.Coordinator()
threads = queue_runner_impl.start_queue_runners(session, coord=coord)
for _ in range(n):
session.run(inputs)
with self.assertRaises(errors.OutOfRangeError):
session.run(inputs)
coord.request_stop()
coord.join(threads)
def testPandasInputFn_RespectsEpoch_NoShuffle(self):
if not HAS_PANDAS:
return
with self.test_session() as session:
x, y = self.makeTestDataFrame()
input_fn = pandas_io.pandas_input_fn(
x, y, batch_size=4, shuffle=False, num_epochs=1)
self.assertInputsCallableNTimes(input_fn, session, 1)
def testPandasInputFn_RespectsEpoch_WithShuffle(self):
if not HAS_PANDAS:
return
with self.test_session() as session:
x, y = self.makeTestDataFrame()
input_fn = pandas_io.pandas_input_fn(
x, y, batch_size=4, shuffle=True, num_epochs=1)
self.assertInputsCallableNTimes(input_fn, session, 1)
def testPandasInputFn_RespectsEpoch_WithShuffleAutosize(self):
if not HAS_PANDAS:
return
with self.test_session() as session:
x, y = self.makeTestDataFrame()
input_fn = pandas_io.pandas_input_fn(
x, y, batch_size=2, shuffle=True, queue_capacity=None, num_epochs=2)
self.assertInputsCallableNTimes(input_fn, session, 4)
def testPandasInputFn_RespectsEpochUnevenBatches(self):
if not HAS_PANDAS:
return
x, y = self.makeTestDataFrame()
with self.test_session() as session:
input_fn = pandas_io.pandas_input_fn(
x, y, batch_size=3, shuffle=False, num_epochs=1)
# Before the last batch, only one element of the epoch should remain.
self.assertInputsCallableNTimes(input_fn, session, 2)
def testPandasInputFn_Idempotent(self):
if not HAS_PANDAS:
return
x, y = self.makeTestDataFrame()
for _ in range(2):
pandas_io.pandas_input_fn(
x, y, batch_size=2, shuffle=False, num_epochs=1)()
for _ in range(2):
pandas_io.pandas_input_fn(
x, y, batch_size=2, shuffle=True, num_epochs=1)()
if __name__ == '__main__':
test.main()
| apache-2.0 |
elingg/tensorflow | tensorflow/contrib/learn/python/learn/estimators/__init__.py | 6 | 11427 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""An estimator is a rule for calculating an estimate of a given quantity.
# Estimators
* **Estimators** are used to train and evaluate TensorFlow models.
They support regression and classification problems.
* **Classifiers** are functions that have discrete outcomes.
* **Regressors** are functions that predict continuous values.
## Choosing the correct estimator
* For **Regression** problems use one of the following:
* `LinearRegressor`: Uses linear model.
* `DNNRegressor`: Uses DNN.
* `DNNLinearCombinedRegressor`: Uses Wide & Deep.
* `TensorForestEstimator`: Uses RandomForest. Use `.predict()` for
regression problems.
* `Estimator`: Use when you need a custom model.
* For **Classification** problems use one of the following:
* `LinearClassifier`: Multiclass classifier using Linear model.
* `DNNClassifier`: Multiclass classifier using DNN.
* `DNNLinearCombinedClassifier`: Multiclass classifier using Wide & Deep.
* `TensorForestEstimator`: Uses RandomForest. Use `.predict_proba()` when
using for binary classification problems.
* `SVM`: Binary classifier using linear SVMs.
* `LogisticRegressor`: Use when you need custom model for binary
classification.
* `Estimator`: Use when you need custom model for N class classification.
## Pre-canned Estimators
Pre-canned estimators are machine learning estimators premade for general
purpose problems. If you need more customization, you can always write your
own custom estimator as described in the section below.
Pre-canned estimators are tested and optimized for speed and quality.
### Define the feature columns
Here are some possible types of feature columns used as inputs to a pre-canned
estimator.
Feature columns may vary based on the estimator used. So you can see which
feature columns are fed to each estimator in the below section.
```python
sparse_feature_a = sparse_column_with_keys(
column_name="sparse_feature_a", keys=["AB", "CD", ...])
embedding_feature_a = embedding_column(
sparse_id_column=sparse_feature_a, dimension=3, combiner="sum")
sparse_feature_b = sparse_column_with_hash_bucket(
column_name="sparse_feature_b", hash_bucket_size=1000)
embedding_feature_b = embedding_column(
sparse_id_column=sparse_feature_b, dimension=16, combiner="sum")
crossed_feature_a_x_b = crossed_column(
columns=[sparse_feature_a, sparse_feature_b], hash_bucket_size=10000)
real_feature = real_valued_column("real_feature")
real_feature_buckets = bucketized_column(
source_column=real_feature,
boundaries=[18, 25, 30, 35, 40, 45, 50, 55, 60, 65])
```
### Create the pre-canned estimator
DNNClassifier, DNNRegressor, and DNNLinearCombinedClassifier are all pretty
similar to each other in how you use them. You can easily plug in an
optimizer and/or regularization to those estimators.
#### DNNClassifier
A classifier for TensorFlow DNN models.
```python
my_features = [embedding_feature_a, embedding_feature_b]
estimator = DNNClassifier(
feature_columns=my_features,
hidden_units=[1024, 512, 256],
optimizer=tf.train.ProximalAdagradOptimizer(
learning_rate=0.1,
l1_regularization_strength=0.001
))
```
#### DNNRegressor
A regressor for TensorFlow DNN models.
```python
my_features = [embedding_feature_a, embedding_feature_b]
estimator = DNNRegressor(
feature_columns=my_features,
hidden_units=[1024, 512, 256])
# Or estimator using the ProximalAdagradOptimizer optimizer with
# regularization.
estimator = DNNRegressor(
feature_columns=my_features,
hidden_units=[1024, 512, 256],
optimizer=tf.train.ProximalAdagradOptimizer(
learning_rate=0.1,
l1_regularization_strength=0.001
))
```
#### DNNLinearCombinedClassifier
A classifier for TensorFlow Linear and DNN joined training models.
* Wide and deep model
* Multi class (2 by default)
```python
my_linear_features = [crossed_feature_a_x_b]
my_deep_features = [embedding_feature_a, embedding_feature_b]
estimator = DNNLinearCombinedClassifier(
# Common settings
n_classes=n_classes,
weight_column_name=weight_column_name,
# Wide settings
linear_feature_columns=my_linear_features,
linear_optimizer=tf.train.FtrlOptimizer(...),
# Deep settings
dnn_feature_columns=my_deep_features,
dnn_hidden_units=[1000, 500, 100],
dnn_optimizer=tf.train.AdagradOptimizer(...))
```
#### LinearClassifier
Train a linear model to classify instances into one of multiple possible
classes. When number of possible classes is 2, this is binary classification.
```python
my_features = [sparse_feature_b, crossed_feature_a_x_b]
estimator = LinearClassifier(
feature_columns=my_features,
optimizer=tf.train.FtrlOptimizer(
learning_rate=0.1,
l1_regularization_strength=0.001
))
```
#### LinearRegressor
Train a linear regression model to predict a label value given observation of
feature values.
```python
my_features = [sparse_feature_b, crossed_feature_a_x_b]
estimator = LinearRegressor(
feature_columns=my_features)
```
### LogisticRegressor
Logistic regression estimator for binary classification.
```python
# See tf.contrib.learn.Estimator(...) for details on model_fn structure
def my_model_fn(...):
pass
estimator = LogisticRegressor(model_fn=my_model_fn)
# Input builders
def input_fn_train:
pass
estimator.fit(input_fn=input_fn_train)
estimator.predict(x=x)
```
#### SVM - Support Vector Machine
Support Vector Machine (SVM) model for binary classification.
Currently only linear SVMs are supported.
```python
my_features = [real_feature, sparse_feature_a]
estimator = SVM(
example_id_column='example_id',
feature_columns=my_features,
l2_regularization=10.0)
```
#### TensorForestEstimator
Supports regression and binary classification.
```python
params = tf.contrib.tensor_forest.python.tensor_forest.ForestHParams(
num_classes=2, num_features=40, num_trees=10, max_nodes=1000)
# Estimator using the default graph builder.
estimator = TensorForestEstimator(params, model_dir=model_dir)
# Or estimator using TrainingLossForest as the graph builder.
estimator = TensorForestEstimator(
params, graph_builder_class=tensor_forest.TrainingLossForest,
model_dir=model_dir)
# Input builders
def input_fn_train: # returns x, y
...
def input_fn_eval: # returns x, y
...
estimator.fit(input_fn=input_fn_train)
estimator.evaluate(input_fn=input_fn_eval)
estimator.predict(x=x)
```
### Use the estimator
There are two main functions for using estimators, one of which is for
training, and one of which is for evaluation.
You can specify different data sources for each one in order to use different
datasets for train and eval.
```python
# Input builders
def input_fn_train: # returns x, Y
...
estimator.fit(input_fn=input_fn_train)
def input_fn_eval: # returns x, Y
...
estimator.evaluate(input_fn=input_fn_eval)
estimator.predict(x=x)
```
## Creating Custom Estimator
To create a custom `Estimator`, provide a function to `Estimator`'s
constructor that builds your model (`model_fn`, below):
```python
estimator = tf.contrib.learn.Estimator(
model_fn=model_fn,
model_dir=model_dir) # Where the model's data (e.g., checkpoints)
# are saved.
```
Here is a skeleton of this function, with descriptions of its arguments and
return values in the accompanying tables:
```python
def model_fn(features, targets, mode, params):
# Logic to do the following:
# 1. Configure the model via TensorFlow operations
# 2. Define the loss function for training/evaluation
# 3. Define the training operation/optimizer
# 4. Generate predictions
return predictions, loss, train_op
```
You may use `mode` and check against
`tf.contrib.learn.ModeKeys.{TRAIN, EVAL, INFER}` to parameterize `model_fn`.
In the Further Reading section below, there is an end-to-end TensorFlow
tutorial for building a custom estimator.
## Additional Estimators
There is an additional estimators under
`tensorflow.contrib.factorization.python.ops`:
* Gaussian mixture model (GMM) clustering
## Further reading
For further reading, there are several tutorials with relevant topics,
including:
* [Overview of linear models](../../../tutorials/linear/overview.md)
* [Linear model tutorial](../../../tutorials/wide/index.md)
* [Wide and deep learning tutorial](../../../tutorials/wide_and_deep/index.md)
* [Custom estimator tutorial](../../../tutorials/estimators/index.md)
* [Building input functions](../../../tutorials/input_fn/index.md)
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.learn.python.learn.estimators._sklearn import NotFittedError
from tensorflow.contrib.learn.python.learn.estimators.dnn import DNNClassifier
from tensorflow.contrib.learn.python.learn.estimators.dnn import DNNRegressor
from tensorflow.contrib.learn.python.learn.estimators.dnn_linear_combined import DNNLinearCombinedClassifier
from tensorflow.contrib.learn.python.learn.estimators.dnn_linear_combined import DNNLinearCombinedRegressor
from tensorflow.contrib.learn.python.learn.estimators.estimator import BaseEstimator
from tensorflow.contrib.learn.python.learn.estimators.estimator import Estimator
from tensorflow.contrib.learn.python.learn.estimators.estimator import infer_real_valued_columns_from_input
from tensorflow.contrib.learn.python.learn.estimators.estimator import infer_real_valued_columns_from_input_fn
from tensorflow.contrib.learn.python.learn.estimators.estimator import SKCompat
from tensorflow.contrib.learn.python.learn.estimators.kmeans import KMeansClustering
from tensorflow.contrib.learn.python.learn.estimators.linear import LinearClassifier
from tensorflow.contrib.learn.python.learn.estimators.linear import LinearRegressor
from tensorflow.contrib.learn.python.learn.estimators.logistic_regressor import LogisticRegressor
from tensorflow.contrib.learn.python.learn.estimators.metric_key import MetricKey
from tensorflow.contrib.learn.python.learn.estimators.model_fn import ModeKeys
from tensorflow.contrib.learn.python.learn.estimators.model_fn import ModelFnOps
from tensorflow.contrib.learn.python.learn.estimators.prediction_key import PredictionKey
from tensorflow.contrib.learn.python.learn.estimators.run_config import ClusterConfig
from tensorflow.contrib.learn.python.learn.estimators.run_config import Environment
from tensorflow.contrib.learn.python.learn.estimators.run_config import RunConfig
from tensorflow.contrib.learn.python.learn.estimators.run_config import TaskType
from tensorflow.contrib.learn.python.learn.estimators.svm import SVM
| apache-2.0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.