repo_name stringlengths 6 67 | path stringlengths 5 185 | copies stringlengths 1 3 | size stringlengths 4 6 | content stringlengths 1.02k 962k | license stringclasses 15 values |
|---|---|---|---|---|---|
loli/sklearn-ensembletrees | sklearn/metrics/tests/test_score_objects.py | 5 | 8592 | import pickle
import numpy as np
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_raises_regexp
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import ignore_warnings
from sklearn.metrics import (f1_score, r2_score, roc_auc_score, fbeta_score,
log_loss)
from sklearn.metrics.cluster import adjusted_rand_score
from sklearn.metrics.scorer import check_scoring
from sklearn.metrics import make_scorer, SCORERS
from sklearn.svm import LinearSVC
from sklearn.cluster import KMeans
from sklearn.linear_model import Ridge, LogisticRegression
from sklearn.tree import DecisionTreeClassifier
from sklearn.datasets import make_blobs
from sklearn.datasets import make_multilabel_classification
from sklearn.datasets import load_diabetes
from sklearn.cross_validation import train_test_split, cross_val_score
from sklearn.grid_search import GridSearchCV
from sklearn.multiclass import OneVsRestClassifier
class EstimatorWithoutFit(object):
"""Dummy estimator to test check_scoring"""
pass
class EstimatorWithFit(object):
"""Dummy estimator to test check_scoring"""
def fit(self, X, y):
return self
class EstimatorWithFitAndScore(object):
"""Dummy estimator to test check_scoring"""
def fit(self, X, y):
return self
def score(self, X, y):
return 1.0
class EstimatorWithFitAndPredict(object):
"""Dummy estimator to test check_scoring"""
def fit(self, X, y):
self.y = y
return self
def predict(self, X):
return self.y
def test_check_scoring():
"""Test all branches of check_scoring"""
estimator = EstimatorWithoutFit()
pattern = (r"estimator should a be an estimator implementing 'fit' method,"
r" .* was passed")
assert_raises_regexp(TypeError, pattern, check_scoring, estimator)
estimator = EstimatorWithFitAndScore()
estimator.fit([[1]], [1])
scorer = check_scoring(estimator)
assert_almost_equal(scorer(estimator, [[1]], [1]), 1.0)
estimator = EstimatorWithFitAndPredict()
estimator.fit([[1]], [1])
pattern = (r"If no scoring is specified, the estimator passed should have"
r" a 'score' method\. The estimator .* does not\.")
assert_raises_regexp(TypeError, pattern, check_scoring, estimator)
scorer = check_scoring(estimator, "accuracy")
assert_almost_equal(scorer(estimator, [[1]], [1]), 1.0)
estimator = EstimatorWithFit()
pattern = (r"The estimator passed should have a 'score'"
r" or a 'predict' method\. The estimator .* does not\.")
assert_raises_regexp(TypeError, pattern, check_scoring, estimator,
"accuracy")
estimator = EstimatorWithFit()
scorer = check_scoring(estimator, allow_none=True)
assert_true(scorer is None)
def test_make_scorer():
"""Sanity check on the make_scorer factory function."""
f = lambda *args: 0
assert_raises(ValueError, make_scorer, f, needs_threshold=True,
needs_proba=True)
def test_classification_scores():
"""Test classification scorers."""
X, y = make_blobs(random_state=0)
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
clf = LinearSVC(random_state=0)
clf.fit(X_train, y_train)
score1 = SCORERS['f1'](clf, X_test, y_test)
score2 = f1_score(y_test, clf.predict(X_test))
assert_almost_equal(score1, score2)
# test fbeta score that takes an argument
scorer = make_scorer(fbeta_score, beta=2)
score1 = scorer(clf, X_test, y_test)
score2 = fbeta_score(y_test, clf.predict(X_test), beta=2)
assert_almost_equal(score1, score2)
# test that custom scorer can be pickled
unpickled_scorer = pickle.loads(pickle.dumps(scorer))
score3 = unpickled_scorer(clf, X_test, y_test)
assert_almost_equal(score1, score3)
# smoke test the repr:
repr(fbeta_score)
def test_regression_scorers():
"""Test regression scorers."""
diabetes = load_diabetes()
X, y = diabetes.data, diabetes.target
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
clf = Ridge()
clf.fit(X_train, y_train)
score1 = SCORERS['r2'](clf, X_test, y_test)
score2 = r2_score(y_test, clf.predict(X_test))
assert_almost_equal(score1, score2)
def test_thresholded_scorers():
"""Test scorers that take thresholds."""
X, y = make_blobs(random_state=0, centers=2)
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
clf = LogisticRegression(random_state=0)
clf.fit(X_train, y_train)
score1 = SCORERS['roc_auc'](clf, X_test, y_test)
score2 = roc_auc_score(y_test, clf.decision_function(X_test))
score3 = roc_auc_score(y_test, clf.predict_proba(X_test)[:, 1])
assert_almost_equal(score1, score2)
assert_almost_equal(score1, score3)
logscore = SCORERS['log_loss'](clf, X_test, y_test)
logloss = log_loss(y_test, clf.predict_proba(X_test))
assert_almost_equal(-logscore, logloss)
# same for an estimator without decision_function
clf = DecisionTreeClassifier()
clf.fit(X_train, y_train)
score1 = SCORERS['roc_auc'](clf, X_test, y_test)
score2 = roc_auc_score(y_test, clf.predict_proba(X_test)[:, 1])
assert_almost_equal(score1, score2)
# Test that an exception is raised on more than two classes
X, y = make_blobs(random_state=0, centers=3)
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
clf.fit(X_train, y_train)
assert_raises(ValueError, SCORERS['roc_auc'], clf, X_test, y_test)
def test_thresholded_scorers_multilabel_indicator_data():
"""Test that the scorer work with multilabel-indicator format
for multilabel and multi-output multi-class classifier
"""
X, y = make_multilabel_classification(return_indicator=True,
allow_unlabeled=False,
random_state=0)
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
# Multi-output multi-class predict_proba
clf = DecisionTreeClassifier()
clf.fit(X_train, y_train)
y_proba = clf.predict_proba(X_test)
score1 = SCORERS['roc_auc'](clf, X_test, y_test)
score2 = roc_auc_score(y_test, np.vstack(p[:, -1] for p in y_proba).T)
assert_almost_equal(score1, score2)
# Multi-output multi-class decision_function
# TODO Is there any yet?
clf = DecisionTreeClassifier()
clf.fit(X_train, y_train)
clf._predict_proba = clf.predict_proba
clf.predict_proba = None
clf.decision_function = lambda X: [p[:, 1] for p in clf._predict_proba(X)]
y_proba = clf.decision_function(X_test)
score1 = SCORERS['roc_auc'](clf, X_test, y_test)
score2 = roc_auc_score(y_test, np.vstack(p for p in y_proba).T)
assert_almost_equal(score1, score2)
# Multilabel predict_proba
clf = OneVsRestClassifier(DecisionTreeClassifier())
clf.fit(X_train, y_train)
score1 = SCORERS['roc_auc'](clf, X_test, y_test)
score2 = roc_auc_score(y_test, clf.predict_proba(X_test))
assert_almost_equal(score1, score2)
# Multilabel decision function
clf = OneVsRestClassifier(LinearSVC(random_state=0))
clf.fit(X_train, y_train)
score1 = SCORERS['roc_auc'](clf, X_test, y_test)
score2 = roc_auc_score(y_test, clf.decision_function(X_test))
assert_almost_equal(score1, score2)
def test_unsupervised_scorers():
"""Test clustering scorers against gold standard labeling."""
# We don't have any real unsupervised Scorers yet.
X, y = make_blobs(random_state=0, centers=2)
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
km = KMeans(n_clusters=3)
km.fit(X_train)
score1 = SCORERS['adjusted_rand_score'](km, X_test, y_test)
score2 = adjusted_rand_score(y_test, km.predict(X_test))
assert_almost_equal(score1, score2)
@ignore_warnings
def test_raises_on_score_list():
"""Test that when a list of scores is returned, we raise proper errors."""
X, y = make_blobs(random_state=0)
f1_scorer_no_average = make_scorer(f1_score, average=None)
clf = DecisionTreeClassifier()
assert_raises(ValueError, cross_val_score, clf, X, y,
scoring=f1_scorer_no_average)
grid_search = GridSearchCV(clf, scoring=f1_scorer_no_average,
param_grid={'max_depth': [1, 2]})
assert_raises(ValueError, grid_search.fit, X, y)
| bsd-3-clause |
assad2012/ggplot | ggplot/tests/test_basic.py | 12 | 9308 | from __future__ import (absolute_import, division, print_function,
unicode_literals)
from six.moves import xrange
from nose.tools import assert_equal, assert_true, assert_raises
from . import get_assert_same_ggplot, cleanup
assert_same_ggplot = get_assert_same_ggplot(__file__)
from ggplot import *
from ggplot.exampledata import diamonds
import numpy as np
import pandas as pd
def _build_testing_df():
df = pd.DataFrame({
"x": np.arange(0, 100),
"y": np.arange(0, 100),
"z": np.arange(0, 100)
})
df['cat'] = np.where(df.x*2 > 50, 'blah', 'blue')
df['cat'] = np.where(df.y > 50, 'hello', df.cat)
df['cat2'] = np.where(df.y < 15, 'one', 'two')
df['y'] = np.sin(df.y)
df['z'] = df['y'] + 100
df['c'] = np.where(df.x%2==0,"red", "blue")
return df
def _build_meat_df():
meat['date'] = pd.to_datetime(meat.date)
return meat
@cleanup
def test_geom_density():
df = _build_testing_df()
gg = ggplot(aes(x="x", color="c"), data=df)
gg = gg + geom_density() + xlab("x label") + ylab("y label")
assert_same_ggplot(gg, "geom_density")
@cleanup
def test_geom_histogram():
df = _build_testing_df()
# TODO: use fill aesthetic for a better test
gg = ggplot(aes(x="x", y="y", shape="cat2", color="cat"), data=df)
assert_same_ggplot(gg + geom_histogram(), "geom_hist")
assert_same_ggplot(gg + geom_histogram() + ggtitle("My Histogram"), "geom_hist_title")
@cleanup
def test_geom_point():
df = _build_testing_df()
gg = ggplot(aes(x="x", y="y", shape="cat2", color="cat"), data=df)
assert_same_ggplot(gg + geom_point(), "geom_point")
gg = gg + geom_point() + geom_vline(xintercept=50, ymin=-1.5, ymax=1.5)
assert_same_ggplot(gg, "geom_point_vline")
@cleanup
def test_geom_area():
df = _build_testing_df()
gg = ggplot(aes(x='x', ymax='y', ymin='z', color="cat2"), data=df)
assert_same_ggplot(gg + geom_area(), "geom_area")
@cleanup
def test_geom_text():
gg = ggplot(aes(x='wt',y='mpg',label='name'),data=mtcars) + geom_text()
assert_same_ggplot(gg, "geom_text")
@cleanup
def test_geom_line():
p = ggplot(mtcars, aes(x='wt', y='mpg', colour='factor(cyl)', size='mpg', linetype='factor(cyl)'))
assert_same_ggplot(p + geom_line(), "factor_geom_line")
@cleanup
def test_geom_rect():
df = pd.DataFrame({
'xmin':[3, 5, 3, 3, 9, 4, 8, 3, 9, 2, 9, 1, 11, 4, 7, 1],
'xmax':[10, 8, 10, 4, 10, 5, 9, 4, 10, 4, 11, 2, 12, 6, 9, 12],
'ymin':[3, 3, 6, 2, 2, 6, 6, 8, 8, 4, 4, 2, 2, 1, 1, 4],
'ymax':[5, 7, 7, 7, 7, 8, 8, 9, 9, 6, 6, 5, 5, 2, 2, 5]})
p = ggplot(df, aes(xmin='xmin', xmax='xmax', ymin='ymin', ymax='ymax'))
p += geom_rect(xmin=0, xmax=13, ymin=0, ymax=10)
p += geom_rect(colour="white", fill="white")
p += xlim(0, 13)
assert_same_ggplot(p, "geom_rect_inv")
@cleanup
def test_factor_geom_point():
p = ggplot(mtcars, aes(x='wt', y='mpg', colour='factor(cyl)', size='mpg', linetype='factor(cyl)'))
assert_same_ggplot(p + geom_point(), "factor_geom_point")
@cleanup
def test_factor_geom_point_line():
p = ggplot(mtcars, aes(x='wt', y='mpg', colour='factor(cyl)', size='mpg', linetype='factor(cyl)'))
assert_same_ggplot(p + geom_line() + geom_point(), "factor_geom_point_line")
@cleanup
def test_factor_point_line_title_lab():
p = ggplot(mtcars, aes(x='wt', y='mpg', colour='factor(cyl)', size='mpg', linetype='factor(cyl)'))
p = p + geom_point() + geom_line(color='lightblue') + ggtitle("Beef: It's What's for Dinner")
p = p + xlab("Date") + ylab("Head of Cattle Slaughtered")
assert_same_ggplot(p, "factor_complicated")
@cleanup
def test_labs():
p = ggplot(mtcars, aes(x='wt', y='mpg', colour='factor(cyl)', size='mpg', linetype='factor(cyl)'))
p = p + geom_point() + geom_line(color='lightblue')
p = p + labs(title="Beef: It's What's for Dinner", x="Date", y="Head of Cattle Slaughtered")
assert_same_ggplot(p, "labs")
@cleanup
def test_factor_bar():
p = ggplot(aes(x='factor(cyl)'), data=mtcars)
assert_same_ggplot(p + geom_histogram(), "factor_geom_bar")
@cleanup
def test_stats_smooth():
df = _build_testing_df()
gg = ggplot(aes(x="x", y="y", color="cat"), data=df)
gg = gg + stat_smooth(se=False) + ggtitle("My Smoothed Chart")
assert_same_ggplot(gg, "stat_smooth")
@cleanup
def test_stats_bin2d():
import matplotlib.pyplot as plt
if not hasattr(plt, "hist2d"):
import nose
raise nose.SkipTest("stat_bin2d only works with newer matplotlib (1.3) versions.")
df = _build_testing_df()
gg = ggplot(aes(x='x', y='y', shape='cat', color='cat2'), data=df)
assert_same_ggplot(gg + stat_bin2d(), "stat_bin2d")
@cleanup
def test_alpha_density():
gg = ggplot(aes(x='mpg'), data=mtcars)
assert_same_ggplot(gg + geom_density(fill=True, alpha=0.3), "geom_density_alpha")
@cleanup
def test_facet_wrap():
df = _build_testing_df()
gg = ggplot(aes(x='x', ymax='y', ymin='z'), data=df)
#assert_same_ggplot(gg + geom_bar() + facet_wrap(x="cat2"), "geom_bar_facet")
assert_same_ggplot(gg + geom_area() + facet_wrap(x="cat2"), "geom_area_facet")
@cleanup
def test_facet_wrap2():
meat = _build_meat_df()
meat_lng = pd.melt(meat, id_vars=['date'])
p = ggplot(aes(x='date', y='value', colour='variable'), data=meat_lng)
assert_same_ggplot(p + geom_density(fill=True, alpha=0.3) + facet_wrap("variable"), "geom_density_facet")
assert_same_ggplot(p + geom_line(alpha=0.3) + facet_wrap("variable"), "geom_line_facet")
@cleanup
def test_facet_grid_exceptions():
meat = _build_meat_df()
meat_lng = pd.melt(meat, id_vars=['date'])
p = ggplot(aes(x="date", y="value", colour="variable", shape="variable"), meat_lng)
with assert_raises(Exception):
print(p + geom_point() + facet_grid(y="variable"))
with assert_raises(Exception):
print(p + geom_point() + facet_grid(y="variable", x="NOT_AVAILABLE"))
with assert_raises(Exception):
print(p + geom_point() + facet_grid(y="NOT_AVAILABLE", x="variable"))
@cleanup
def test_facet_grid():
# only use a small subset of the data to speedup tests
# N=53940 -> N=7916 and only 2x2 facets
_mask1 = (diamonds.cut == "Ideal") | (diamonds.cut == "Good")
_mask2 = (diamonds.clarity == "SI2") | (diamonds.clarity == "VS1")
_df = diamonds[_mask1 & _mask2]
p = ggplot(aes(x='x', y='y', colour='z'), data=_df)
p = p + geom_point() + scale_colour_gradient(low="white", high="red")
p = p + facet_grid("cut", "clarity")
assert_same_ggplot(p, "diamonds_big")
p = ggplot(aes(x='carat'), data=_df)
p = p + geom_density() + facet_grid("cut", "clarity")
assert_same_ggplot(p, "diamonds_facet")
@cleanup
def test_smooth_se():
meat = _build_meat_df()
p = ggplot(aes(x='date', y='beef'), data=meat)
assert_same_ggplot(p + geom_point() + stat_smooth(), "point_smooth_se")
assert_same_ggplot(p + stat_smooth(), "smooth_se")
@cleanup
def test_scale_xy_continous():
meat = _build_meat_df()
p = ggplot(aes(x='date', y='beef'), data=meat)
p = p + geom_point() + scale_x_continuous("This is the X")
p = p + scale_y_continuous("Squared", limits=[0, 1500])
assert_same_ggplot(p, "scale1")
@cleanup
def test_ylim():
meat = _build_meat_df()
p = ggplot(aes(x='date', y='beef'), data=meat)
assert_same_ggplot(p + geom_point() + ylim(0, 1500), "ylim")
@cleanup
def test_partial_limits() :
p = ggplot(diamonds, aes('carat', 'price'))
assert_same_ggplot(p + geom_point(alpha=1/20.) + xlim(high = 4) + ylim(0), "partial_limits")
@cleanup
def test_partial_limits_facet() :
p = ggplot(diamonds, aes('carat', 'price', color="clarity"))
p = p + geom_point(alpha=1/20.) + facet_wrap(x="cut", scales="free") + xlim(low=0) + ylim(low=0)
assert_same_ggplot(p, "partial_limits_facet")
@cleanup
def test_scale_date():
meat = _build_meat_df()
gg = ggplot(aes(x='date', y='beef'), data=meat) + geom_line()
assert_same_ggplot(gg+scale_x_date(labels="%Y-%m-%d"), "scale_date")
@cleanup
def test_diamond():
p = ggplot(aes(x='x', y='y', colour='z'), data=diamonds.head(4))
p = p + geom_point() + scale_colour_gradient(low="white", high="red")
p = p + facet_wrap("cut")
assert_same_ggplot(p, "diamonds_small")
def test_aes_positional_args():
result = aes("weight", "hp")
expected = {"x": "weight", "y": "hp"}
assert_equal(result, expected)
result3 = aes("weight", "hp", "qsec")
expected3 = {"x": "weight", "y": "hp", "color": "qsec"}
assert_equal(result3, expected3)
def test_aes_keyword_args():
result = aes(x="weight", y="hp")
expected = {"x": "weight", "y": "hp"}
assert_equal(result, expected)
result3 = aes(x="weight", y="hp", color="qsec")
expected3 = {"x": "weight", "y": "hp", "color": "qsec"}
assert_equal(result3,expected3)
def test_aes_mixed_args():
result = aes("weight", "hp", color="qsec")
expected = {"x": "weight", "y": "hp", "color": "qsec"}
assert_equal(result, expected)
@cleanup
def test_scale_color_brewer() :
p = ggplot(diamonds, aes(x = "x", y="y"))
p = p + geom_line() + scale_color_brewer(type='qual', palette=2)
assert_same_ggplot(p, "scale_color_brewer")
| bsd-2-clause |
kaichogami/scikit-learn | sklearn/utils/tests/test_linear_assignment.py | 421 | 1349 | # Author: Brian M. Clapper, G Varoquaux
# License: BSD
import numpy as np
# XXX we should be testing the public API here
from sklearn.utils.linear_assignment_ import _hungarian
def test_hungarian():
matrices = [
# Square
([[400, 150, 400],
[400, 450, 600],
[300, 225, 300]],
850 # expected cost
),
# Rectangular variant
([[400, 150, 400, 1],
[400, 450, 600, 2],
[300, 225, 300, 3]],
452 # expected cost
),
# Square
([[10, 10, 8],
[9, 8, 1],
[9, 7, 4]],
18
),
# Rectangular variant
([[10, 10, 8, 11],
[9, 8, 1, 1],
[9, 7, 4, 10]],
15
),
# n == 2, m == 0 matrix
([[], []],
0
),
]
for cost_matrix, expected_total in matrices:
cost_matrix = np.array(cost_matrix)
indexes = _hungarian(cost_matrix)
total_cost = 0
for r, c in indexes:
x = cost_matrix[r, c]
total_cost += x
assert expected_total == total_cost
indexes = _hungarian(cost_matrix.T)
total_cost = 0
for c, r in indexes:
x = cost_matrix[r, c]
total_cost += x
assert expected_total == total_cost
| bsd-3-clause |
elin-moco/metrics | metrics/mocotw/views.py | 1 | 2772 | """Example views. Feel free to delete this app."""
import json
import logging
from django.http import HttpResponse
import pandas as pd
import numpy as np
from django.shortcuts import render
import commonware
from django.template.loader import render_to_string
from datetime import datetime
log = commonware.log.getLogger('playdoh')
def fx_download(request):
"""Main example view."""
data = {} # You'd add data here that you're sending to the template.
return render(request, 'mocotw/fx_download.html', data)
def fx_download_sum_data(request):
"""Main example view."""
df_sum = pd.read_hdf('mocotw.h5', 'fx_download_sum')
arr_sum = []
for pagePath, pageviews in df_sum[df_sum.pageviews > 200].itertuples():
arr_sum += [{'pagePath': pagePath, 'pageviews': str(pageviews)}]
return HttpResponse(json.dumps(arr_sum), mimetype='application/json')
# return HttpResponse(render_to_string('dashboard/data.json'), mimetype='application/json')
def fx_download_stack_data(request):
"""Main example view."""
df_stack = pd.read_hdf('mocotw.h5', 'fx_download_stack')
arr_stack = []
cols = df_stack.columns.tolist()
for index, row in df_stack.iterrows():
stack = {}
for col in cols:
val = row[col]
if not isinstance(val, pd.datetime):
stack[col] = int(val)
else:
stack[col] = val.strftime('%Y-%m-%d')
arr_stack += [stack]
return HttpResponse(json.dumps(arr_stack), mimetype='application/json')
def moztech_billboard(request):
df_posts = pd.read_hdf('moztech.h5', 'posts')
df_posts['fbSharesRate'] = df_posts['fbShares'].astype(float)/df_posts['pageviews']
data = {'posts': df_posts.transpose().to_dict(), 'now': datetime.now()}
return render(request, 'mocotw/moztech_billboard.html', data)
def mozblog_billboard(request):
df_posts = pd.read_hdf('mozblog.h5', 'posts')
df_posts['fbSharesRate'] = df_posts['fbShares'].astype(float)/df_posts['pageviews']
data = {'posts': df_posts.transpose().to_dict(), 'now': datetime.now()}
return render(request, 'mocotw/mozblog_billboard.html', data)
def newsletter_views(request):
df_posts = pd.read_hdf('newsletter.h5', 'main')
df_refers = pd.read_hdf('newsletter.h5', 'refers')
dic = df_posts.transpose().to_dict()
issues = {}
for path, issue in dic.items():
issue['refers'] = df_refers[df_refers['campaign'] == path[11:]].transpose().to_dict()
issues[path] = issue
data = {'issues': issues}
return render(request, 'mocotw/newsletter_views.html', data)
def data(request):
"""Main example view."""
return HttpResponse(render_to_string('dashboard/data.tsv'), mimetype='application/json')
| bsd-3-clause |
pv/scikit-learn | examples/linear_model/plot_sgd_weighted_samples.py | 344 | 1458 | """
=====================
SGD: Weighted samples
=====================
Plot decision function of a weighted dataset, where the size of points
is proportional to its weight.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn import linear_model
# we create 20 points
np.random.seed(0)
X = np.r_[np.random.randn(10, 2) + [1, 1], np.random.randn(10, 2)]
y = [1] * 10 + [-1] * 10
sample_weight = 100 * np.abs(np.random.randn(20))
# and assign a bigger weight to the last 10 samples
sample_weight[:10] *= 10
# plot the weighted data points
xx, yy = np.meshgrid(np.linspace(-4, 5, 500), np.linspace(-4, 5, 500))
plt.figure()
plt.scatter(X[:, 0], X[:, 1], c=y, s=sample_weight, alpha=0.9,
cmap=plt.cm.bone)
## fit the unweighted model
clf = linear_model.SGDClassifier(alpha=0.01, n_iter=100)
clf.fit(X, y)
Z = clf.decision_function(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
no_weights = plt.contour(xx, yy, Z, levels=[0], linestyles=['solid'])
## fit the weighted model
clf = linear_model.SGDClassifier(alpha=0.01, n_iter=100)
clf.fit(X, y, sample_weight=sample_weight)
Z = clf.decision_function(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
samples_weights = plt.contour(xx, yy, Z, levels=[0], linestyles=['dashed'])
plt.legend([no_weights.collections[0], samples_weights.collections[0]],
["no weights", "with weights"], loc="lower left")
plt.xticks(())
plt.yticks(())
plt.show()
| bsd-3-clause |
srio/shadow3-scripts | COMSOL/test1.py | 1 | 2706 |
# https://numerical-analysis.readthedocs.io/en/latest/Interpolation/2D_Interpolation.html
# Setup
# %matplotlib inline
import numpy as np
import matplotlib.pyplot as plt
import matplotlib
params = {'font.size' : 14,
'figure.figsize':(15.0, 8.0),
'lines.linewidth': 2.,
'lines.markersize': 15,}
matplotlib.rcParams.update(params)
Ni = 40
Pi = np.random.rand(Ni, 2)
print(">>>>Pi",Pi.shape)
Xi, Yi = Pi[:,0], Pi[:,1]
Zi = np.random.rand(Ni)
import matplotlib as mpl
from mpl_toolkits.mplot3d import Axes3D
import numpy as np
import matplotlib.pyplot as plt
fig = plt.figure()
ax = fig.gca(projection='3d')
ax.plot(Xi, Yi, Zi, "or", label='Data')
ax.legend()
ax.set_xlabel('x')
ax.set_ylabel('y')
ax.set_zlabel('z')
plt.show()
# triangulation
from scipy.spatial import Delaunay
Pi = np.array([Xi, Yi]).transpose()
tri = Delaunay(Pi)
plt.triplot(Xi, Yi , tri.simplices.copy())
plt.plot(Xi, Yi, "or", label = "Data")
plt.grid()
plt.legend()
plt.xlabel("x")
plt.ylabel("y")
plt.show()
# interpolation
N = 100
x = np.linspace(0., 1., N)
y = np.linspace(0., 1., N)
X, Y = np.meshgrid(x, y)
P = np.array([X.flatten(), Y.flatten() ]).transpose()
plt.plot(Xi, Yi, "or", label = "Data")
plt.triplot(Xi, Yi , tri.simplices.copy())
plt.plot(X.flatten(), Y.flatten(), "g,", label = "Z = ?")
plt.legend()
plt.grid()
plt.show()
# nearest interpolation
from scipy.interpolate import griddata
Z_nearest = griddata(Pi, Zi, P, method = "nearest").reshape([N, N])
plt.contourf(X, Y, Z_nearest, 50)
plt.plot(Xi, Yi, "or", label = "Data")
plt.colorbar()
plt.legend()
plt.grid()
plt.show()
# linear interpolation
from scipy.interpolate import griddata
Z_linear = griddata(Pi, Zi, P, method = "linear").reshape([N, N])
plt.contourf(X, Y, Z_linear, 50, cmap = mpl.cm.jet)
plt.colorbar()
plt.contour(X, Y, Z_linear, 10, colors = "k")
#plt.triplot(Xi, Yi , tri.simplices.copy(), color = "k")
plt.plot(Xi, Yi, "or", label = "Data")
plt.legend()
plt.grid()
plt.show()
# higher order interpolation
from scipy.interpolate import griddata
Z_cubic = griddata(Pi, Zi, P, method = "cubic").reshape([N, N])
plt.contourf(X, Y, Z_cubic, 50, cmap = mpl.cm.jet)
plt.colorbar()
plt.contour(X, Y, Z_cubic, 20, colors = "k")
#plt.triplot(Xi, Yi , tri.simplices.copy(), color = "k")
plt.plot(Xi, Yi, "or", label = "Data")
plt.legend()
plt.grid()
plt.show()
# comparison/discussion
levels = np.linspace(0., 1., 50)
fig = plt.figure()
ax = fig.add_subplot(1, 3, 1)
plt.contourf(X, Y, Z_nearest, levels)
plt.grid()
ax = fig.add_subplot(1, 3, 2)
plt.contourf(X, Y, Z_linear, levels)
plt.grid()
ax = fig.add_subplot(1, 3, 3)
plt.contourf(X, Y, Z_cubic, levels)
plt.grid()
plt.plot
| mit |
CodeFanatic23/newsBehavior | quick_scraper.py | 1 | 3846 | import scrapy
from bs4 import BeautifulSoup
import sys
import os
import _pickle as pickle
import pandas as pd
from .scrape_with_bs4 import *
import datetime
class ContentSpider(scrapy.Spider):
name = "yolo"
handle_httpstatus_list = [i for i in range(100,999) if i!=200]
BASE_PATH = os.path.dirname(os.path.abspath(__file__))
date_=None
file=None
url=None
total_urls=0
counter=0
##these variables store the content scraped
b={}
date={}
contents={}
total_links={}
NEWS={'reuters.com':sc_reuters,'thehindu.com':sc_thehindu,'economictimes.indiatimes':sc_econt,
'moneycontrol.com':moneyControl,'ndtv.com':ndtv,'hindubusinessline.com':hindu_bl}
#initialises the data types with the respective keys and empty list/dictionary
for key in NEWS:
date[key]=[]
b[key]={}
contents[key]=[]
total_links[key]=[]
#generates all the links to be scraped
def start_requests(self):
print("\n\nEnter company name to scrape content for")
cos=[i.split('_')[1] for i in list_files('links/finallinks')]
print('\n'+str(cos))
self.dest_file=input()
for file_name in list_files('links/finallinks'):
if(self.dest_file.lower() in file_name.lower()):
tracker(file_name)
print("SCRAPING DATA FOR "+file_name)
links = [line.rstrip('\n') for line in open('links/finallinks/'+file_name)]
self.total_urls=len(links)
self.file=file_name
for l in links:
self.date_,self.url=l.split('::')
request=scrapy.Request(self.url,self.parse,dont_filter=True)
request.meta['date']=self.date_
yield request
# gets called at the end when all the data has been scraped .
# It maintains the same folder format for data storage as before.
def writeTo(self):
company=self.dest_file
for webp in self.date:
make_directory(company,webp)
with open('content/'+company+'/'+webp+'/raw_'+self.file.split('.data')[0]+'_'+webp+'.pkl', 'wb') as fp:
pickle.dump(self.b[webp], fp)
temp = {'date':self.date[webp],
'data':self.contents[webp],
'url':self.total_links[webp]
}
df = pd.DataFrame(temp)
df.set_index('date',inplace=True)
df.to_pickle('content/'+company+'/'+webp+'/'+self.file.split('.data')[0]+'_'+webp+'_content.pkl')
df.to_csv('content/'+company+'/'+webp+'/'+self.file.split('.data')[0]+'_'+webp+'_content.csv')
def parse(self, response):
if(response.status in self.handle_httpstatus_list):
self.counter+=1
else:
self.counter+=1
for key in self.NEWS:
if key in response.url:
bs=BeautifulSoup(response.text,'html.parser')
content=self.NEWS[key](bs)
str1=''
tokens=[]
for text in content:
tokens.extend(text)
for tk in tokens:
str1+=''.join(tk)
c = datetime.datetime.strptime(response.meta['date'], '%d-%b-%Y')
#yield self.logger.info("date -"+str(c)+" #"*15)
self.date[key].append(c)
self.contents[key].append(str1)
self.total_links[key].append(response.url)
temp_={c:str1}
self.b[key].update(temp_)
yield self.logger.info("COUNTER -"+str(self.counter)+" #"*15)
yield self.logger.info("TOTAL URLS -"+str(self.total_urls)+" #"*12)
if(self.counter==self.total_urls):
self.writeTo()
| mit |
gnieboer/tensorflow | tensorflow/contrib/learn/python/learn/tests/dataframe/dataframe_test.py | 62 | 3753 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests of the DataFrame class."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.learn.python import learn
from tensorflow.contrib.learn.python.learn.tests.dataframe import mocks
from tensorflow.python.framework import dtypes
from tensorflow.python.platform import test
def setup_test_df():
"""Create a dataframe populated with some test columns."""
df = learn.DataFrame()
df["a"] = learn.TransformedSeries(
[mocks.MockSeries("foobar", mocks.MockTensor("Tensor a", dtypes.int32))],
mocks.MockTwoOutputTransform("iue", "eui", "snt"), "out1")
df["b"] = learn.TransformedSeries(
[mocks.MockSeries("foobar", mocks.MockTensor("Tensor b", dtypes.int32))],
mocks.MockTwoOutputTransform("iue", "eui", "snt"), "out2")
df["c"] = learn.TransformedSeries(
[mocks.MockSeries("foobar", mocks.MockTensor("Tensor c", dtypes.int32))],
mocks.MockTwoOutputTransform("iue", "eui", "snt"), "out1")
return df
class DataFrameTest(test.TestCase):
"""Test of `DataFrame`."""
def test_create(self):
df = setup_test_df()
self.assertEqual(df.columns(), frozenset(["a", "b", "c"]))
def test_select_columns(self):
df = setup_test_df()
df2 = df.select_columns(["a", "c"])
self.assertEqual(df2.columns(), frozenset(["a", "c"]))
def test_exclude_columns(self):
df = setup_test_df()
df2 = df.exclude_columns(["a", "c"])
self.assertEqual(df2.columns(), frozenset(["b"]))
def test_get_item(self):
df = setup_test_df()
c1 = df["b"]
self.assertEqual(
mocks.MockTensor("Mock Tensor 2", dtypes.int32), c1.build())
def test_del_item_column(self):
df = setup_test_df()
self.assertEqual(3, len(df))
del df["b"]
self.assertEqual(2, len(df))
self.assertEqual(df.columns(), frozenset(["a", "c"]))
def test_set_item_column(self):
df = setup_test_df()
self.assertEqual(3, len(df))
col1 = mocks.MockSeries("QuackColumn",
mocks.MockTensor("Tensor ", dtypes.int32))
df["quack"] = col1
self.assertEqual(4, len(df))
col2 = df["quack"]
self.assertEqual(col1, col2)
def test_set_item_column_multi(self):
df = setup_test_df()
self.assertEqual(3, len(df))
col1 = mocks.MockSeries("QuackColumn", [])
col2 = mocks.MockSeries("MooColumn", [])
df["quack", "moo"] = [col1, col2]
self.assertEqual(5, len(df))
col3 = df["quack"]
self.assertEqual(col1, col3)
col4 = df["moo"]
self.assertEqual(col2, col4)
def test_set_item_pandas(self):
# TODO(jamieas)
pass
def test_set_item_numpy(self):
# TODO(jamieas)
pass
def test_build(self):
df = setup_test_df()
result = df.build()
expected = {
"a": mocks.MockTensor("Mock Tensor 1", dtypes.int32),
"b": mocks.MockTensor("Mock Tensor 2", dtypes.int32),
"c": mocks.MockTensor("Mock Tensor 1", dtypes.int32)
}
self.assertEqual(expected, result)
if __name__ == "__main__":
test.main()
| apache-2.0 |
ryanpdwyer/teensyio | teensyio/__init__.py | 1 | 7217 | # -*- coding: utf-8 -*-
"""
============================
teensyio
============================
"""
import matplotlib as mpl
mpl.use('Qt4Agg')
import socket
import serial
import io
import matplotlib.pyplot as plt
from matplotlib import animation
import numpy as np
from serial.tools.list_ports import comports
import time
import pandas as pd
from time import sleep
# import bunch
def ls_ports():
return [x for x in comports()]
def _m():
"""See http://stackoverflow.com/a/22679451/2823213"""
sock = socket.socket()
sock.bind(("127.0.0.1",12346))
sock.listen(3)
print "Waiting on connection"
conn = sock.accept()
print "Client connected"
while True:
try:
m = conn[0].recv(4096)
conn[0].send(m[::-1])
except KeyboardInterrupt:
break
sock.close()
def find_teensy():
cu = [port for port in comports()
if 'cu.usb' in port[0] and 'USB Serial' in port[1]]
if len(cu) == 1:
return cu[0][0]
else:
raise ValueError("Multiple ports found: {}".format(cu))
def parse_int(line):
return [int(x) for x in line.strip('\n').split(' ')]
def log_message(message):
return [time.time(), message]
def parse_line_generic(line, x, y, log, strip='\r\n'):
line_stripped = line.strip(strip)
if line_stripped == '':
return None
try:
data = [int(i) for i in line_stripped.split(' ')]
if len(data) == 2:
x.append(data[0])
y.append(data[1])
else:
log.append(log_message(data))
return data
except ValueError:
log.append(log_message(line_stripped))
return None
# def parse_line(line, data, log, stop_message):
def arbitrary_line_split(buf, newline='\n', return_if_empty=False):
left_over = ""
while True:
bytesToRead = buf.inWaiting()
lines = buf.read(bytesToRead).split(newline)
lines[0] = left_over + lines[0]
left_over = lines.pop()
if return_if_empty:
if (len(lines) == 0) and (left_over == ''):
return
yield lines
# Need a class to represent the circuit state. Current ranges, voltage ranges, etc.
# cs = bunch.Bunch(
# dac_bit=12,
# adc_bits=12,
# self.v_max = 3.3,
# self.v_min = -3.3,
# )
class TeensyIO(object):
messages = ("Teensy reset", "Start acquisition", "Continuing", "Stopped")
stop_message = "Done"
current_ranges = {'3uA': 3, '30uA':30, '300uA':300, '3mA':3000}
def __init__(self, port, timeout=1, x_name='x', y_name='y',
x_scale=1, y_scale=1, **kwargs):
self.s = serial.Serial(port, timeout=timeout, **kwargs)
self.x_name = x_name
self.y_name = y_name
self.x_scale = x_scale
self.y_scale = y_scale
self.log = list()
def __repr__(self):
return "TeensyIO(port={s.port}, timeout={s.timeout})".format(s=self.s)
def send_cmd(self, cmd, printing=True):
self.s.write(cmd)
lines = self.s.readlines()
if printing:
print("".join(lines))
return lines
def setup_cyclic_voltamettry(self, V_start=0, V_max=4095,
V_min=0, V_end=0, N_cycles=1, current_range='3mA', printing=True):
"""
Raises AttributeError if current range incorrect. Possible values:
3uA
30uA
300uA
3mA
"""
cr = self.current_ranges[current_range]
text = "cv set {V_start} {V_max} {V_min} {V_end} {N_cycles} {cr} ".format(
V_start=V_start, V_max=V_max, V_min=V_min, V_end=V_end,
N_cycles=N_cycles, cr=cr)
self.s.write(text)
self.s.write('print quit ')
for line in self.s.readlines():
self.log.append(line)
if printing:
print(line.strip('\n'))
# def setup_cyclic_voltamettry(self, V_start=0, V_max=3.3, V_min=-3.3, V_end=0,
# N_cycles=1, printing=True):
def run(self, timeout=1):
"""Run data acquisition for a certain length of time."""
start_time = time.time()
self.s.write('cv run ')
self.x = []
self.y = []
while time.time() < (start_time + timeout):
line = self.s.readline().strip('\r\n')
parse_line_generic(line, self.x, self.y, self.log)
self.s.write('q')
self.s.write('quit ')
for line in self.s.readlines():
parse_line_generic(line, self.x, self.y, self.log)
def run_and_plot(self, xlim=(0, 4095), ylim=(0, 4095), frames=None):
fig = plt.figure()
ax = plt.axes(xlim=xlim, ylim=ylim)
line, = ax.plot([], [])
self.x = []
self.y = []
self.s.write('cv run ')
gen = arbitrary_line_split(self.s)
def init():
line.set_data([], [], )
return line,
def animate(i):
try:
serial_lines = gen.next()
for line_ in serial_lines:
parse_line_generic(line_, self.x, self.y, self.log)
# I could check for "stop message if necessary."
print i
except KeyboardInterrupt:
self.s.write('q')
line.set_data(self.x, self.y)
raise
line.set_data(self.x, self.y)
return line,
anim = animation.FuncAnimation(fig, animate, frames=frames, repeat=False,
init_func=init, interval=50,
blit=True)
plt.show()
self.s.write('q ')
self.s.write('quit ')
lines = self.s.readlines()
for line in lines:
parse_line_generic(line, self.x, self.y, self.log)
self._make_df()
def _make_df(self):
self.df = pd.DataFrame()
self.df[self.x_name] = self.x
self.df[self.x_name] *= self.x_scale
self.df[self.y_name] = self.y
self.df[self.y_name] *= self.y_scale
def save(self, filename):
with io.open(filename, 'w') as f:
f.write(u'x:\n{}\n\ny:\n{}\n\nlog:\n{}'.format(self.x, self.y, self.log))
def save_df(self, filename):
self.df.to_csv(filename)
def test_run():
teensy_port = find_teensy()
t = TeensyIO(teensy_port)
t.run()
print("x size: {}".format(len(t.x)))
print("y size: {}".format(len(t.y)))
print("log size: {}".format(len(t.log)))
print("x tail: \n{}".format(t.x[max(-25, -len(t.d)):]))
print("y tail: \n{}".format(t.y[max(-25, -len(t.d)):]))
print("log:\n{}".format(t.log))
t.save('test_run.log.txt')
def test_plot():
teensy_port = find_teensy()
t = TeensyIO(teensy_port, timeout=1, baudrate=115200)
t.run_and_plot(xlim=(-32768, 2**16), ylim=(-32768, 32768))
t.s.write('s')
t.save_df('test_plot.csv')
t.save('test_plot.log.txt')
def test():
test_plot()
# Versioneer versioning
# from ._version import get_versions
# __version__ = get_versions()['version']
# del get_versions | mit |
bikong2/scikit-learn | sklearn/feature_extraction/hashing.py | 183 | 6155 | # Author: Lars Buitinck <L.J.Buitinck@uva.nl>
# License: BSD 3 clause
import numbers
import numpy as np
import scipy.sparse as sp
from . import _hashing
from ..base import BaseEstimator, TransformerMixin
def _iteritems(d):
"""Like d.iteritems, but accepts any collections.Mapping."""
return d.iteritems() if hasattr(d, "iteritems") else d.items()
class FeatureHasher(BaseEstimator, TransformerMixin):
"""Implements feature hashing, aka the hashing trick.
This class turns sequences of symbolic feature names (strings) into
scipy.sparse matrices, using a hash function to compute the matrix column
corresponding to a name. The hash function employed is the signed 32-bit
version of Murmurhash3.
Feature names of type byte string are used as-is. Unicode strings are
converted to UTF-8 first, but no Unicode normalization is done.
Feature values must be (finite) numbers.
This class is a low-memory alternative to DictVectorizer and
CountVectorizer, intended for large-scale (online) learning and situations
where memory is tight, e.g. when running prediction code on embedded
devices.
Read more in the :ref:`User Guide <feature_hashing>`.
Parameters
----------
n_features : integer, optional
The number of features (columns) in the output matrices. Small numbers
of features are likely to cause hash collisions, but large numbers
will cause larger coefficient dimensions in linear learners.
dtype : numpy type, optional
The type of feature values. Passed to scipy.sparse matrix constructors
as the dtype argument. Do not set this to bool, np.boolean or any
unsigned integer type.
input_type : string, optional
Either "dict" (the default) to accept dictionaries over
(feature_name, value); "pair" to accept pairs of (feature_name, value);
or "string" to accept single strings.
feature_name should be a string, while value should be a number.
In the case of "string", a value of 1 is implied.
The feature_name is hashed to find the appropriate column for the
feature. The value's sign might be flipped in the output (but see
non_negative, below).
non_negative : boolean, optional, default np.float64
Whether output matrices should contain non-negative values only;
effectively calls abs on the matrix prior to returning it.
When True, output values can be interpreted as frequencies.
When False, output values will have expected value zero.
Examples
--------
>>> from sklearn.feature_extraction import FeatureHasher
>>> h = FeatureHasher(n_features=10)
>>> D = [{'dog': 1, 'cat':2, 'elephant':4},{'dog': 2, 'run': 5}]
>>> f = h.transform(D)
>>> f.toarray()
array([[ 0., 0., -4., -1., 0., 0., 0., 0., 0., 2.],
[ 0., 0., 0., -2., -5., 0., 0., 0., 0., 0.]])
See also
--------
DictVectorizer : vectorizes string-valued features using a hash table.
sklearn.preprocessing.OneHotEncoder : handles nominal/categorical features
encoded as columns of integers.
"""
def __init__(self, n_features=(2 ** 20), input_type="dict",
dtype=np.float64, non_negative=False):
self._validate_params(n_features, input_type)
self.dtype = dtype
self.input_type = input_type
self.n_features = n_features
self.non_negative = non_negative
@staticmethod
def _validate_params(n_features, input_type):
# strangely, np.int16 instances are not instances of Integral,
# while np.int64 instances are...
if not isinstance(n_features, (numbers.Integral, np.integer)):
raise TypeError("n_features must be integral, got %r (%s)."
% (n_features, type(n_features)))
elif n_features < 1 or n_features >= 2 ** 31:
raise ValueError("Invalid number of features (%d)." % n_features)
if input_type not in ("dict", "pair", "string"):
raise ValueError("input_type must be 'dict', 'pair' or 'string',"
" got %r." % input_type)
def fit(self, X=None, y=None):
"""No-op.
This method doesn't do anything. It exists purely for compatibility
with the scikit-learn transformer API.
Returns
-------
self : FeatureHasher
"""
# repeat input validation for grid search (which calls set_params)
self._validate_params(self.n_features, self.input_type)
return self
def transform(self, raw_X, y=None):
"""Transform a sequence of instances to a scipy.sparse matrix.
Parameters
----------
raw_X : iterable over iterable over raw features, length = n_samples
Samples. Each sample must be iterable an (e.g., a list or tuple)
containing/generating feature names (and optionally values, see
the input_type constructor argument) which will be hashed.
raw_X need not support the len function, so it can be the result
of a generator; n_samples is determined on the fly.
y : (ignored)
Returns
-------
X : scipy.sparse matrix, shape = (n_samples, self.n_features)
Feature matrix, for use with estimators or further transformers.
"""
raw_X = iter(raw_X)
if self.input_type == "dict":
raw_X = (_iteritems(d) for d in raw_X)
elif self.input_type == "string":
raw_X = (((f, 1) for f in x) for x in raw_X)
indices, indptr, values = \
_hashing.transform(raw_X, self.n_features, self.dtype)
n_samples = indptr.shape[0] - 1
if n_samples == 0:
raise ValueError("Cannot vectorize empty sequence.")
X = sp.csr_matrix((values, indices, indptr), dtype=self.dtype,
shape=(n_samples, self.n_features))
X.sum_duplicates() # also sorts the indices
if self.non_negative:
np.abs(X.data, X.data)
return X
| bsd-3-clause |
wattlebird/pystruct | examples/plot_ssvm_objective_curves.py | 5 | 2606 | """
==================================
SSVM Convergence Curves
==================================
Showing the relation between cutting plane and primal objectives,
as well as the different algorithms.
We use exact inference here, so the plots are easier to interpret.
As this is a small toy example, it is hard to generalize
the results indicated in the plot to more realistic settigs.
"""
import numpy as np
import matplotlib.pyplot as plt
from pystruct.models import GridCRF
from pystruct.learners import (NSlackSSVM, OneSlackSSVM, SubgradientSSVM,
FrankWolfeSSVM)
from pystruct.datasets import generate_crosses_explicit
X, Y = generate_crosses_explicit(n_samples=50, noise=10, size=6, n_crosses=1)
n_labels = len(np.unique(Y))
crf = GridCRF(n_states=n_labels, inference_method=("ad3", {'branch_and_bound': True}))
n_slack_svm = NSlackSSVM(crf, check_constraints=False,
max_iter=50, batch_size=1, tol=0.001)
one_slack_svm = OneSlackSSVM(crf, check_constraints=False,
max_iter=100, tol=0.001, inference_cache=50)
subgradient_svm = SubgradientSSVM(crf, learning_rate=0.001, max_iter=20,
decay_exponent=0, momentum=0)
bcfw_svm = FrankWolfeSSVM(crf, max_iter=50, check_dual_every=4)
#n-slack cutting plane ssvm
n_slack_svm.fit(X, Y)
# 1-slack cutting plane ssvm
one_slack_svm.fit(X, Y)
# online subgradient ssvm
subgradient_svm.fit(X, Y)
# Block coordinate Frank-Wolfe
bcfw_svm.fit(X, Y)
# don't plot objective from chached inference for 1-slack
inference_run = ~np.array(one_slack_svm.cached_constraint_)
time_one = np.array(one_slack_svm.timestamps_[1:])[inference_run]
# plot stuff
plt.plot(n_slack_svm.timestamps_[1:], n_slack_svm.objective_curve_,
label="n-slack cutting plane")
plt.plot(n_slack_svm.timestamps_[1:], n_slack_svm.primal_objective_curve_,
label="n-slack primal")
plt.plot(time_one,
np.array(one_slack_svm.objective_curve_)[inference_run],
label="one-slack cutting_plane")
plt.plot(time_one,
np.array(one_slack_svm.primal_objective_curve_)[inference_run],
label="one-slack primal")
plt.plot(subgradient_svm.timestamps_[1:], subgradient_svm.objective_curve_,
label="subgradient")
plt.plot(bcfw_svm.timestamps_[1:], bcfw_svm.objective_curve_,
label="Block-Coordinate Frank-Wolfe Dual")
plt.plot(bcfw_svm.timestamps_[1:], bcfw_svm.primal_objective_curve_,
label="Block-Coordinate Frank-Wolfe Primal")
plt.legend(loc="best")
plt.yscale('log')
plt.xlabel("training time")
plt.show()
| bsd-2-clause |
AlexanderFabisch/scikit-learn | sklearn/datasets/mlcomp.py | 289 | 3855 | # Copyright (c) 2010 Olivier Grisel <olivier.grisel@ensta.org>
# License: BSD 3 clause
"""Glue code to load http://mlcomp.org data as a scikit.learn dataset"""
import os
import numbers
from sklearn.datasets.base import load_files
def _load_document_classification(dataset_path, metadata, set_=None, **kwargs):
if set_ is not None:
dataset_path = os.path.join(dataset_path, set_)
return load_files(dataset_path, metadata.get('description'), **kwargs)
LOADERS = {
'DocumentClassification': _load_document_classification,
# TODO: implement the remaining domain formats
}
def load_mlcomp(name_or_id, set_="raw", mlcomp_root=None, **kwargs):
"""Load a datasets as downloaded from http://mlcomp.org
Parameters
----------
name_or_id : the integer id or the string name metadata of the MLComp
dataset to load
set_ : select the portion to load: 'train', 'test' or 'raw'
mlcomp_root : the filesystem path to the root folder where MLComp datasets
are stored, if mlcomp_root is None, the MLCOMP_DATASETS_HOME
environment variable is looked up instead.
**kwargs : domain specific kwargs to be passed to the dataset loader.
Read more in the :ref:`User Guide <datasets>`.
Returns
-------
data : Bunch
Dictionary-like object, the interesting attributes are:
'filenames', the files holding the raw to learn, 'target', the
classification labels (integer index), 'target_names',
the meaning of the labels, and 'DESCR', the full description of the
dataset.
Note on the lookup process: depending on the type of name_or_id,
will choose between integer id lookup or metadata name lookup by
looking at the unzipped archives and metadata file.
TODO: implement zip dataset loading too
"""
if mlcomp_root is None:
try:
mlcomp_root = os.environ['MLCOMP_DATASETS_HOME']
except KeyError:
raise ValueError("MLCOMP_DATASETS_HOME env variable is undefined")
mlcomp_root = os.path.expanduser(mlcomp_root)
mlcomp_root = os.path.abspath(mlcomp_root)
mlcomp_root = os.path.normpath(mlcomp_root)
if not os.path.exists(mlcomp_root):
raise ValueError("Could not find folder: " + mlcomp_root)
# dataset lookup
if isinstance(name_or_id, numbers.Integral):
# id lookup
dataset_path = os.path.join(mlcomp_root, str(name_or_id))
else:
# assume name based lookup
dataset_path = None
expected_name_line = "name: " + name_or_id
for dataset in os.listdir(mlcomp_root):
metadata_file = os.path.join(mlcomp_root, dataset, 'metadata')
if not os.path.exists(metadata_file):
continue
with open(metadata_file) as f:
for line in f:
if line.strip() == expected_name_line:
dataset_path = os.path.join(mlcomp_root, dataset)
break
if dataset_path is None:
raise ValueError("Could not find dataset with metadata line: " +
expected_name_line)
# loading the dataset metadata
metadata = dict()
metadata_file = os.path.join(dataset_path, 'metadata')
if not os.path.exists(metadata_file):
raise ValueError(dataset_path + ' is not a valid MLComp dataset')
with open(metadata_file) as f:
for line in f:
if ":" in line:
key, value = line.split(":", 1)
metadata[key.strip()] = value.strip()
format = metadata.get('format', 'unknow')
loader = LOADERS.get(format)
if loader is None:
raise ValueError("No loader implemented for format: " + format)
return loader(dataset_path, metadata, set_=set_, **kwargs)
| bsd-3-clause |
matthiasplappert/hmmlearn | hmmlearn/tests/test_base.py | 4 | 7022 | from __future__ import print_function
from unittest import TestCase
import numpy as np
from numpy.testing import assert_array_equal, assert_array_almost_equal
from sklearn.utils.extmath import logsumexp
from hmmlearn import hmm
rng = np.random.RandomState(0)
np.seterr(all='warn')
class TestBaseHMM(TestCase):
def setUp(self):
self.prng = np.random.RandomState(9)
class StubHMM(hmm._BaseHMM):
def _compute_log_likelihood(self, X):
return self.framelogprob
def _generate_sample_from_state(self):
pass
def _init(self):
pass
def setup_example_hmm(self):
# Example from http://en.wikipedia.org/wiki/Forward-backward_algorithm
h = self.StubHMM(2)
h.transmat_ = [[0.7, 0.3], [0.3, 0.7]]
h.startprob_ = [0.5, 0.5]
framelogprob = np.log([[0.9, 0.2],
[0.9, 0.2],
[0.1, 0.8],
[0.9, 0.2],
[0.9, 0.2]])
# Add dummy observations to stub.
h.framelogprob = framelogprob
return h, framelogprob
def test_init(self):
h, framelogprob = self.setup_example_hmm()
for params in [('transmat_',), ('startprob_', 'transmat_')]:
d = dict((x[:-1], getattr(h, x)) for x in params)
h2 = self.StubHMM(h.n_components, **d)
self.assertEqual(h.n_components, h2.n_components)
for p in params:
assert_array_almost_equal(getattr(h, p), getattr(h2, p))
def test_set_startprob(self):
h, framelogprob = self.setup_example_hmm()
startprob = np.array([0.0, 1.0])
h.startprob_ = startprob
assert np.allclose(startprob, h.startprob_)
def test_set_transmat(self):
h, framelogprob = self.setup_example_hmm()
transmat = np.array([[0.8, 0.2], [0.0, 1.0]])
h.transmat_ = transmat
assert np.allclose(transmat, h.transmat_)
def test_do_forward_pass(self):
h, framelogprob = self.setup_example_hmm()
logprob, fwdlattice = h._do_forward_pass(framelogprob)
reflogprob = -3.3725
self.assertAlmostEqual(logprob, reflogprob, places=4)
reffwdlattice = np.array([[0.4500, 0.1000],
[0.3105, 0.0410],
[0.0230, 0.0975],
[0.0408, 0.0150],
[0.0298, 0.0046]])
assert_array_almost_equal(np.exp(fwdlattice), reffwdlattice, 4)
def test_do_backward_pass(self):
h, framelogprob = self.setup_example_hmm()
bwdlattice = h._do_backward_pass(framelogprob)
refbwdlattice = np.array([[0.0661, 0.0455],
[0.0906, 0.1503],
[0.4593, 0.2437],
[0.6900, 0.4100],
[1.0000, 1.0000]])
assert_array_almost_equal(np.exp(bwdlattice), refbwdlattice, 4)
def test_do_viterbi_pass(self):
h, framelogprob = self.setup_example_hmm()
logprob, state_sequence = h._do_viterbi_pass(framelogprob)
refstate_sequence = [0, 0, 1, 0, 0]
assert_array_equal(state_sequence, refstate_sequence)
reflogprob = -4.4590
self.assertAlmostEqual(logprob, reflogprob, places=4)
def test_score_samples(self):
h, framelogprob = self.setup_example_hmm()
nobs = len(framelogprob)
logprob, posteriors = h.score_samples([])
assert_array_almost_equal(posteriors.sum(axis=1), np.ones(nobs))
reflogprob = -3.3725
self.assertAlmostEqual(logprob, reflogprob, places=4)
refposteriors = np.array([[0.8673, 0.1327],
[0.8204, 0.1796],
[0.3075, 0.6925],
[0.8204, 0.1796],
[0.8673, 0.1327]])
assert_array_almost_equal(posteriors, refposteriors, decimal=4)
def test_hmm_score_samples_consistent_with_gmm(self):
n_components = 8
nobs = 10
h = self.StubHMM(n_components)
# Add dummy observations to stub.
framelogprob = np.log(self.prng.rand(nobs, n_components))
h.framelogprob = framelogprob
# If startprob and transmat are uniform across all states (the
# default), the transitions are uninformative - the model
# reduces to a GMM with uniform mixing weights (in terms of
# posteriors, not likelihoods).
logprob, hmmposteriors = h.score_samples([])
assert_array_almost_equal(hmmposteriors.sum(axis=1), np.ones(nobs))
norm = logsumexp(framelogprob, axis=1)[:, np.newaxis]
gmmposteriors = np.exp(framelogprob - np.tile(norm, (1, n_components)))
assert_array_almost_equal(hmmposteriors, gmmposteriors)
def test_hmm_decode_consistent_with_gmm(self):
n_components = 8
nobs = 10
h = self.StubHMM(n_components)
# Add dummy observations to stub.
framelogprob = np.log(self.prng.rand(nobs, n_components))
h.framelogprob = framelogprob
# If startprob and transmat are uniform across all states (the
# default), the transitions are uninformative - the model
# reduces to a GMM with uniform mixing weights (in terms of
# posteriors, not likelihoods).
viterbi_ll, state_sequence = h.decode([])
norm = logsumexp(framelogprob, axis=1)[:, np.newaxis]
gmmposteriors = np.exp(framelogprob - np.tile(norm, (1, n_components)))
gmmstate_sequence = gmmposteriors.argmax(axis=1)
assert_array_equal(state_sequence, gmmstate_sequence)
def test_base_hmm_attributes(self):
n_components = 20
startprob = self.prng.rand(n_components)
startprob = startprob / startprob.sum()
transmat = self.prng.rand(n_components, n_components)
transmat /= np.tile(transmat.sum(axis=1)
[:, np.newaxis], (1, n_components))
h = self.StubHMM(n_components)
self.assertEqual(h.n_components, n_components)
h.startprob_ = startprob
assert_array_almost_equal(h.startprob_, startprob)
self.assertRaises(ValueError, setattr, h, 'startprob_',
2 * startprob)
self.assertRaises(ValueError, setattr, h, 'startprob_', [])
self.assertRaises(ValueError, setattr, h, 'startprob_',
np.zeros((n_components - 2, 2)))
h.transmat_ = transmat
assert_array_almost_equal(h.transmat_, transmat)
self.assertRaises(ValueError, setattr, h, 'transmat_',
2 * transmat)
self.assertRaises(ValueError, setattr, h, 'transmat_', [])
self.assertRaises(ValueError, setattr, h, 'transmat_',
np.zeros((n_components - 2, n_components)))
| bsd-3-clause |
laurakurup/census-api | census_api.py | 1 | 8612 | '''
Last Updated June 30, 2015
by Laura Kurup
https://github.com/laurakurup
Description: Create a pandas dataframe and csv file from the U.S. Census
Decennial Census API, which offers access to population data by sex, age,
race, etc. and housing data by occupancy, vacancy status, and tenure.
Documentation: https://github.com/laurakurup/census-api/raw/master/README.md
Please check out README.md for a complete
explanation of the variables and options below
Happy querying!
'''
''' Configure your request '''
# census API key --> request at http://www.census.gov/developers/
census_api_key = 'YOUR-KEY-HERE'
# csv file of the variables you want (relative path from python's working directory)
# Get the template: https://github.com/laurakurup/census-api/raw/master/census_variables_sample.csv
variables_csv = 'census_variables.csv'
# 'state', 'county', 'metro' or 'metro-micro'
location_type = 'state'
# maximum variables per request
api_variable_limit = 50
# append the year to your column names (True / False)
# e.g. 'housing_renter' becomes 'housing_renter_2010'
add_year = True
# subdirectory for data exports (relative to python's working directory)
# if not using, leave as empty string
subdirectory = 'data/census/'
# add a timestamp to your csv export filename (True / False)
# this keeps me from overwriting my data :)
time_stamp = True
''' Imports '''
import pandas as pd
import urllib2
import datetime
import time
''' Process your variables '''
# read in the data
df = pd.read_csv(variables_csv)
# creates a list of dictionaries from relevant columns
variables_list = df[['year', 'variable', 'column_name']].to_dict(orient='records')
# create a list of the years to query (must be seperate API calls)
years_to_query = [item for item in set([item['year'] for item in variables_list])]
''' Set up the dataframe based on location type '''
# create a dataframe of the lcation names and FIPS codes
if location_type == 'metro' or location_type == 'metro-micro':
fips_codes = 'https://github.com/laurakurup/data/raw/master/us_metro_areas/us_metro_areas_fips_codes.csv'
df = pd.read_csv(fips_codes, dtype={'state_fips': object, 'place_fips': object})
elif location_type == 'state':
fips_codes = 'https://raw.githubusercontent.com/laurakurup/data/master/us_states/us_states_fips_codes.csv'
df = pd.read_csv(fips_codes, dtype={'state_fips': object})
else:
fips_codes = 'https://github.com/laurakurup/data/raw/master/us_counties/us_counties_fips_codes.csv'
df = pd.read_csv(fips_codes, dtype={'state_fips': object, 'county_fips': object})
# if 'metro' only, drop the micropolitan areas, leaving the 685 metropolitan areas
if location_type == 'metro':
df = df[df['metro_micro'] != 'micropolitan']
df = df.reset_index(drop=True)
''' Optional: Test your query '''
# save some time! uncomment line 124 to cut your request to 10 locations
# if everything is running smoothly, rerun it on the full list :)
# df = df.head(10)
''' Get the data '''
# function that will request and save the data for each source URL
def get_census_data(source_url):
# start timing the request
start_time = time.time()
# print location to be requested
if location_type == 'state':
print "requesting data for " + source_url.split('=')[3] + ' . . .'
else:
print "requesting data for " + source_url.split('=')[4].split('&')[0] + ' ' + source_url.split('=')[3].split('&')[0] + ' . . .'
try:
# read json for the source URL
response = pd.read_json(source_url)
# transpose for easy saving
response = response.transpose()
# if location type is state, save all but the last value (the state FIPS)
if location_type == 'state':
data = [item for item in response[1]][:-1]
# otherwise, save all but the last two values (the state and place FIPS)
else:
data = [item for item in response[1]][:-2]
# print a success message with the length of time
print("success! %s seconds" % (round(time.time() - start_time, 5)))
print '---------------------------------------------------'
# If the API returns a HTTPError, print the error message and the source URL
except urllib2.HTTPError, error:
contents = error.read()
print 'ERROR MESSAGE FROM API: ' + contents
print source_url
print '---------------------------------------------------'
# save 'error' for the variables requested
data = ['error'] * len(source_url.split(','))
# For other errors, print the source URL
# this will happen if the location does not exist for the year requested
except:
print 'ERROR – NO RESPONSE FOR URL:'
print source_url
print '---------------------------------------------------'
# save 'error' for the variables requested
data = ['error'] * len(source_url.split(','))
# return a list of the response values
return data
# to build the source URLs, iterate through each year
for year in years_to_query:
# create a list of the variables for the year
new_variables_list = [item for item in variables_list if item['year'] == year]
# iterate through list(s) of variables that are < the api limit
for i in xrange(1, len(new_variables_list), api_variable_limit):
print 'Starting request for Census ' + str(year) + ' variables ' + str(i) + ' through ' + str(i+api_variable_limit) + ':'
print '---------------------------------------------------'
# create a list of the next batch of varibles to request
list_within_limit = new_variables_list[i:i+api_variable_limit]
# create a string of variables to build the source URL (comma seperated, no spaces)
variables_str = ''
for item in list_within_limit:
variables_str = variables_str + item['variable'] + ','
# trim the last character (extra comma)
variables_str = variables_str[:-1]
# build the source URL based on location type
if location_type == 'metro' or location_type == 'metro-micro':
df['source_url'] = 'http://api.census.gov/data/' + str(year) + '/sf1?key=' + census_api_key + '&get=' + variables_str + '&for=place:' + df['place_fips'] + '&in=state:' + df['state_fips']
elif location_type == 'state':
df['source_url'] = 'http://api.census.gov/data/' + str(year) + '/sf1?key=' + census_api_key + '&get=' + variables_str + '&for=state:' + df['state_fips']
else:
df['source_url'] = 'http://api.census.gov/data/' + str(year) + '/sf1?key=' + census_api_key + '&get=' + variables_str + '&for=county:' + df['county_fips'] + '&in=state:' + df['state_fips']
# run the get_census_data() function on each source URL, create a new column of results
df['new_data'] = [get_census_data(item) for item in df['source_url']]
print 'Request complete for Census ' + str(year) + ' variables ' + str(i) + ' through ' + str(i+api_variable_limit)
print '---------------------------------------------------'
# iterate through the list of variables that were requested to create columns
for item in list_within_limit:
# create column names for each variable with the year (if add_year == True)
if add_year == True:
new_column_name = item['column_name'] + '_' + str(item['year'])
# create column names for each variable without the year (if add_year == False)
else:
new_column_name = item['column_name']
# create the new column with the data returned from the API
n = list_within_limit.index(item)
df[new_column_name] = [item[n] for item in df['new_data']]
# print success message when year is complete
# print success message when everything is complete
print 'FINISHED! Request complete for all ' + str(len(variables_list)) + ' variables'
''' Clean up and export to csv'''
# drop unneeded columns (used during API calls)
df = df.drop('source_url', 1)
df = df.drop('new_data', 1)
# construct csv file name with date and time added to file name (if time_stamp == True)
# or, without date and time (if time_stamp == False)
if time_stamp == True:
file_name = subdirectory + 'census-data-by-' + location_type + '-' + datetime.datetime.strftime(datetime.datetime.now(), '%Y.%m.%d-%I.%M%p') + '.csv'
else:
file_name = subdirectory + 'census-data-by' + location_type + '.csv'
# save data to csv
df.to_csv(file_name, index=False)
| mit |
jordan-melendez/buqeyemodel | gsum/tests/test.py | 1 | 6151 | import numpy as np
import scipy as sp
from scipy.stats import multivariate_normal
# import unittest
from gsum import ConjugateGaussianProcess
from gsum.helpers import *
from gsum.helpers import pivoted_cholesky
from gsum.cutils import pivoted_cholesky as pivoted_cholesky_cython
from sklearn.gaussian_process import GaussianProcessRegressor
from sklearn.gaussian_process.kernels \
import RBF, ConstantKernel as C, WhiteKernel
from sklearn.gaussian_process.kernels import DotProduct
from sklearn.utils.testing \
import (assert_greater, assert_array_less,
assert_almost_equal, assert_equal, assert_raise_message,
assert_array_almost_equal, assert_array_equal)
import pytest
def f(x):
return x * np.sin(x)
X = np.atleast_2d([1., 3., 5., 6., 7., 8.]).T
X2 = np.atleast_2d([2., 4., 5.5, 6.5, 7.5]).T
y = f(X).ravel()
fixed_kernel = RBF(length_scale=1.0, length_scale_bounds="fixed")
kernel_ill_conditioned = RBF(length_scale=15.0, length_scale_bounds="fixed")
kernels = [
RBF(length_scale=1.0),
fixed_kernel,
RBF(length_scale=1.0, length_scale_bounds=(1e-3, 1e3)),
C(1.0, (1e-2, 1e2)) *
RBF(length_scale=1.0, length_scale_bounds=(1e-3, 1e3)),
C(1.0, (1e-2, 1e2)) *
RBF(length_scale=1.0, length_scale_bounds=(1e-3, 1e3)) +
C(1e-5, (1e-5, 1e2)),
# C(0.1, (1e-2, 1e2)) *
# RBF(length_scale=1.0, length_scale_bounds=(1e-3, 1e3)) +
# WhiteKernel(1e-2, (1e-5, 1e2))
]
non_fixed_kernels = [kernel for kernel in kernels
if kernel != fixed_kernel]
# kernels_ill_conditioned = [RBF(length_scale=20.0)]
@pytest.mark.parametrize('kernel', kernels)
def test_gpr_interpolation(kernel):
# Test the interpolating property for different kernels.
print(kernel)
gpr = GaussianProcessRegressor(kernel=kernel).fit(X, y)
y_pred, y_cov = gpr.predict(X, return_cov=True)
assert_almost_equal(y_pred, y)
assert_almost_equal(np.diag(y_cov), 0., decimal=10)
@pytest.mark.parametrize('kernel', kernels)
@pytest.mark.parametrize('decomposition', ['cholesky', 'eig'])
def test_cgp_interpolation(kernel, decomposition):
# Test the interpolating property for different kernels.
print(kernel)
gpr = ConjugateGaussianProcess(kernel=kernel, nugget=0, decomposition=decomposition).fit(X, y)
y_pred, y_cov = gpr.predict(X, return_cov=True)
assert_almost_equal(y_pred, y)
assert_almost_equal(np.diag(y_cov), 0., decimal=10)
Ls = [
np.array([
[7., 0, 0, 0, 0, 0],
[9, 13, 0, 0, 0, 0],
[4, 10, 6, 0, 0, 0],
[18, 1, 2, 14, 0, 0],
[5, 11, 20, 3, 17, 0],
[19, 12, 16, 15, 8, 21]
]),
np.array([
[1, 0, 0],
[2, 3, 0],
[4, 5, 6.]
]),
np.array([
[6, 0, 0],
[3, 2, 0],
[4, 1, 5.]
]),
]
pchols = [
np.array([
[3.4444, -1.3545, 4.084, 1.7674, -1.1789, 3.7562],
[8.4685, 1.2821, 3.1179, 12.9197, 0.0000, 0.0000],
[7.5621, 4.8603, 0.0634, 7.3942, 4.0637, 0.0000],
[15.435, -4.8864, 16.2137, 0.0000, 0.0000, 0.0000],
[18.8535, 22.103, 0.0000, 0.0000, 0.0000, 0.0000],
[38.6135, 0.0000, 0.0000, 0.0000, 0.0000, 0.0000]
]),
np.array([
[0.4558, 0.3252, 0.8285],
[2.6211, 2.4759, 0.0000],
[8.7750, 0.0000, 0.0000]
]),
np.array([
[3.7033, 4.7208, 0.0000],
[2.1602, 2.1183, 1.9612],
[6.4807, 0.0000, 0.0000]
]),
]
@pytest.mark.parametrize('L,pchol', zip(Ls, pchols))
def test_oracle_examples(L, pchol):
"""Inputs taken from Tensorflow-Probability, which was taken from GPyTorch"""
mat = np.matmul(L, L.T)
# np.testing.assert_allclose(pchol, pivoted_cholesky_cython(mat), atol=1e-4)
np.testing.assert_allclose(pchol, pivoted_cholesky(mat), atol=1e-4)
@pytest.mark.parametrize(
'cov',
[np.array([
[1.1072733475231495 , 1.08739145629774 , 1.029862219545639 ,
0.8286134266251773 , 0.7039334391266358 , 0.5767310930265864 ,
0.34725085649025655],
[1.08739145629774 , 1.1072733475231495 , 1.08739145629774 ,
0.940663897699325 , 0.8286134266251773 , 0.7039334391266358 ,
0.4556981608148422 ],
[1.029862219545639 , 1.08739145629774 , 1.1072733475231495 ,
1.029862219545639 , 0.9406638976993251 , 0.8286134266251776 ,
0.5767310930265866 ],
[0.8286134266251773 , 0.940663897699325 , 1.029862219545639 ,
1.1072733475231495 , 1.08739145629774 , 1.029862219545639 ,
0.8286134266251776 ],
[0.7039334391266358 , 0.8286134266251773 , 0.9406638976993251 ,
1.08739145629774 , 1.1072733475231495 , 1.08739145629774 ,
0.9406638976993251 ],
[0.5767310930265864 , 0.7039334391266358 , 0.8286134266251776 ,
1.029862219545639 , 1.08739145629774 , 1.1072733475231495 ,
1.029862219545639 ],
[0.34725085649025655, 0.4556981608148422 , 0.5767310930265866 ,
0.8286134266251776 , 0.9406638976993251 , 1.029862219545639 ,
1.1072733475231495 ]])
]
)
def test_old_vs_pchol(cov):
# This fails because the implementations are not exactly the same
pchol_cython = pivoted_cholesky_cython(cov)
pchol_lapack = pivoted_cholesky(cov)
with pytest.raises(AssertionError):
np.testing.assert_allclose(pchol_cython, pchol_lapack, atol=1e-15)
# class TestGaussianKernel(unittest.TestCase):
# """Test the Gaussian kernel.
# """
#
#
# def test_gaussian_kernel(self):
# ls = 2.5
#
# X = np.asarray([
# [0, 0],
# [0.5, 0],
# [1, 2],
# [1, 2.7],
# [3, 3],
# [3.001, 3.001]
# ])
#
# pm_cov = ExpQuad(2, ls)
# cov = gaussian(X, ls=ls)
#
# np.testing.assert_allclose(pm_cov(X).eval(), cov)
#
# def test_gaussian_kernel2(self):
# ls = 2.5
#
# X = np.linspace(0, 1, 100)[:, None]
#
# pm_cov = ExpQuad(1, ls)
# cov = gaussian(X, ls=ls)
#
# np.testing.assert_allclose(pm_cov(X).eval(), cov)
#
# if __name__ == '__main__':
# unittest.main()
| mit |
BigDataforYou/movie_recommendation_workshop_1 | big_data_4_you_demo_1/venv/lib/python2.7/site-packages/pandas/sparse/panel.py | 1 | 18704 | """
Data structures for sparse float data. Life is made simpler by dealing only
with float64 data
"""
# pylint: disable=E1101,E1103,W0231
import warnings
from pandas.compat import lrange, zip
from pandas import compat
import numpy as np
from pandas.core.index import Index, MultiIndex, _ensure_index
from pandas.core.frame import DataFrame
from pandas.core.panel import Panel
from pandas.sparse.frame import SparseDataFrame
from pandas.util.decorators import deprecate
import pandas.core.common as com
import pandas.core.ops as ops
import pandas.lib as lib
class SparsePanelAxis(object):
def __init__(self, cache_field, frame_attr):
self.cache_field = cache_field
self.frame_attr = frame_attr
def __get__(self, obj, type=None):
return getattr(obj, self.cache_field, None)
def __set__(self, obj, value):
value = _ensure_index(value)
if isinstance(value, MultiIndex):
raise NotImplementedError("value cannot be a MultiIndex")
for v in compat.itervalues(obj._frames):
setattr(v, self.frame_attr, value)
setattr(obj, self.cache_field, value)
class SparsePanel(Panel):
"""
Sparse version of Panel
Parameters
----------
frames : dict of DataFrame objects
items : array-like
major_axis : array-like
minor_axis : array-like
default_kind : {'block', 'integer'}, default 'block'
Default sparse kind for converting Series to SparseSeries. Will not
override SparseSeries passed into constructor
default_fill_value : float
Default fill_value for converting Series to SparseSeries. Will not
override SparseSeries passed in
Notes
-----
"""
ndim = 3
_typ = 'panel'
_subtyp = 'sparse_panel'
def __init__(self, frames=None, items=None, major_axis=None,
minor_axis=None, default_fill_value=np.nan,
default_kind='block', copy=False):
# deprecation #11157
warnings.warn("SparsePanel is deprecated and will be removed in a "
"future version", FutureWarning, stacklevel=2)
if frames is None:
frames = {}
if isinstance(frames, np.ndarray):
new_frames = {}
for item, vals in zip(items, frames):
new_frames[item] = SparseDataFrame(
vals, index=major_axis, columns=minor_axis,
default_fill_value=default_fill_value,
default_kind=default_kind)
frames = new_frames
if not isinstance(frames, dict):
raise TypeError('input must be a dict, a %r was passed' %
type(frames).__name__)
self.default_fill_value = fill_value = default_fill_value
self.default_kind = kind = default_kind
# pre-filter, if necessary
if items is None:
items = Index(sorted(frames.keys()))
items = _ensure_index(items)
(clean_frames, major_axis,
minor_axis) = _convert_frames(frames, major_axis, minor_axis,
kind=kind, fill_value=fill_value)
self._frames = clean_frames
# do we want to fill missing ones?
for item in items:
if item not in clean_frames:
raise ValueError('column %r not found in data' % item)
self._items = items
self.major_axis = major_axis
self.minor_axis = minor_axis
def _consolidate_inplace(self): # pragma: no cover
# do nothing when DataFrame calls this method
pass
def __array_wrap__(self, result):
return SparsePanel(result, items=self.items,
major_axis=self.major_axis,
minor_axis=self.minor_axis,
default_kind=self.default_kind,
default_fill_value=self.default_fill_value)
@classmethod
def from_dict(cls, data):
"""
Analogous to Panel.from_dict
"""
return SparsePanel(data)
def to_dense(self):
"""
Convert SparsePanel to (dense) Panel
Returns
-------
dense : Panel
"""
return Panel(self.values, self.items, self.major_axis, self.minor_axis)
def as_matrix(self):
return self.values
@property
def values(self):
# return dense values
return np.array([self._frames[item].values for item in self.items])
# need a special property for items to make the field assignable
_items = None
def _get_items(self):
return self._items
def _set_items(self, new_items):
new_items = _ensure_index(new_items)
if isinstance(new_items, MultiIndex):
raise NotImplementedError("itemps cannot be a MultiIndex")
# need to create new frames dict
old_frame_dict = self._frames
old_items = self._items
self._frames = dict((new_k, old_frame_dict[old_k])
for new_k, old_k in zip(new_items, old_items))
self._items = new_items
items = property(fget=_get_items, fset=_set_items)
# DataFrame's index
major_axis = SparsePanelAxis('_major_axis', 'index')
# DataFrame's columns / "items"
minor_axis = SparsePanelAxis('_minor_axis', 'columns')
def _ixs(self, i, axis=0):
"""
for compat as we don't support Block Manager here
i : int, slice, or sequence of integers
axis : int
"""
key = self._get_axis(axis)[i]
# xs cannot handle a non-scalar key, so just reindex here
if com.is_list_like(key):
return self.reindex(**{self._get_axis_name(axis): key})
return self.xs(key, axis=axis)
def _slice(self, slobj, axis=0, kind=None):
"""
for compat as we don't support Block Manager here
"""
axis = self._get_axis_name(axis)
index = self._get_axis(axis)
return self.reindex(**{axis: index[slobj]})
def _get_item_cache(self, key):
return self._frames[key]
def __setitem__(self, key, value):
if isinstance(value, DataFrame):
value = value.reindex(index=self.major_axis,
columns=self.minor_axis)
if not isinstance(value, SparseDataFrame):
value = value.to_sparse(fill_value=self.default_fill_value,
kind=self.default_kind)
else:
raise ValueError('only DataFrame objects can be set currently')
self._frames[key] = value
if key not in self.items:
self._items = Index(list(self.items) + [key])
def set_value(self, item, major, minor, value):
"""
Quickly set single value at (item, major, minor) location
Parameters
----------
item : item label (panel item)
major : major axis label (panel item row)
minor : minor axis label (panel item column)
value : scalar
Notes
-----
This method *always* returns a new object. It is not particularly
efficient but is provided for API compatibility with Panel
Returns
-------
panel : SparsePanel
"""
dense = self.to_dense().set_value(item, major, minor, value)
return dense.to_sparse(kind=self.default_kind,
fill_value=self.default_fill_value)
def __delitem__(self, key):
loc = self.items.get_loc(key)
indices = lrange(loc) + lrange(loc + 1, len(self.items))
del self._frames[key]
self._items = self._items.take(indices)
def __getstate__(self):
# pickling
from pandas.io.pickle import _pickle_array
return (self._frames, _pickle_array(self.items),
_pickle_array(self.major_axis),
_pickle_array(self.minor_axis), self.default_fill_value,
self.default_kind)
def __setstate__(self, state):
frames, items, major, minor, fv, kind = state
from pandas.io.pickle import _unpickle_array
self.default_fill_value = fv
self.default_kind = kind
self._items = _ensure_index(_unpickle_array(items))
self._major_axis = _ensure_index(_unpickle_array(major))
self._minor_axis = _ensure_index(_unpickle_array(minor))
self._frames = frames
def copy(self, deep=True):
"""
Make a copy of the sparse panel
Returns
-------
copy : SparsePanel
"""
d = self._construct_axes_dict()
if deep:
new_data = dict((k, v.copy(deep=True))
for k, v in compat.iteritems(self._frames))
d = dict((k, v.copy(deep=True)) for k, v in compat.iteritems(d))
else:
new_data = self._frames.copy()
d['default_fill_value'] = self.default_fill_value
d['default_kind'] = self.default_kind
return SparsePanel(new_data, **d)
def to_frame(self, filter_observations=True):
"""
Convert SparsePanel to (dense) DataFrame
Returns
-------
frame : DataFrame
"""
if not filter_observations:
raise TypeError('filter_observations=False not supported for '
'SparsePanel.to_long')
I, N, K = self.shape
counts = np.zeros(N * K, dtype=int)
d_values = {}
d_indexer = {}
for item in self.items:
frame = self[item]
values, major, minor = _stack_sparse_info(frame)
# values are stacked column-major
indexer = minor * N + major
counts.put(indexer, counts.take(indexer) + 1) # cuteness
d_values[item] = values
d_indexer[item] = indexer
# have full set of observations for each item
mask = counts == I
# for each item, take mask values at index locations for those sparse
# values, and use that to select values
values = np.column_stack([d_values[item][mask.take(d_indexer[item])]
for item in self.items])
inds, = mask.nonzero()
# still column major
major_labels = inds % N
minor_labels = inds // N
index = MultiIndex(levels=[self.major_axis, self.minor_axis],
labels=[major_labels, minor_labels],
verify_integrity=False)
df = DataFrame(values, index=index, columns=self.items)
return df.sortlevel(level=0)
to_long = deprecate('to_long', to_frame)
toLong = deprecate('toLong', to_frame)
def reindex(self, major=None, items=None, minor=None, major_axis=None,
minor_axis=None, copy=False):
"""
Conform / reshape panel axis labels to new input labels
Parameters
----------
major : array-like, default None
items : array-like, default None
minor : array-like, default None
copy : boolean, default False
Copy underlying SparseDataFrame objects
Returns
-------
reindexed : SparsePanel
"""
major = com._mut_exclusive(major=major, major_axis=major_axis)
minor = com._mut_exclusive(minor=minor, minor_axis=minor_axis)
if com._all_none(items, major, minor):
raise ValueError('Must specify at least one axis')
major = self.major_axis if major is None else major
minor = self.minor_axis if minor is None else minor
if items is not None:
new_frames = {}
for item in items:
if item in self._frames:
new_frames[item] = self._frames[item]
else:
raise NotImplementedError('Reindexing with new items not '
'yet supported')
else:
new_frames = self._frames
if copy:
new_frames = dict((k, v.copy())
for k, v in compat.iteritems(new_frames))
return SparsePanel(new_frames, items=items, major_axis=major,
minor_axis=minor,
default_fill_value=self.default_fill_value,
default_kind=self.default_kind)
def _combine(self, other, func, axis=0):
if isinstance(other, DataFrame):
return self._combineFrame(other, func, axis=axis)
elif isinstance(other, Panel):
return self._combinePanel(other, func)
elif lib.isscalar(other):
new_frames = dict((k, func(v, other))
for k, v in self.iteritems())
return self._new_like(new_frames)
def _combineFrame(self, other, func, axis=0):
index, columns = self._get_plane_axes(axis)
axis = self._get_axis_number(axis)
other = other.reindex(index=index, columns=columns)
if axis == 0:
new_values = func(self.values, other.values)
elif axis == 1:
new_values = func(self.values.swapaxes(0, 1), other.values.T)
new_values = new_values.swapaxes(0, 1)
elif axis == 2:
new_values = func(self.values.swapaxes(0, 2), other.values)
new_values = new_values.swapaxes(0, 2)
# TODO: make faster!
new_frames = {}
for item, item_slice in zip(self.items, new_values):
old_frame = self[item]
ofv = old_frame.default_fill_value
ok = old_frame.default_kind
new_frames[item] = SparseDataFrame(item_slice,
index=self.major_axis,
columns=self.minor_axis,
default_fill_value=ofv,
default_kind=ok)
return self._new_like(new_frames)
def _new_like(self, new_frames):
return SparsePanel(new_frames, self.items, self.major_axis,
self.minor_axis,
default_fill_value=self.default_fill_value,
default_kind=self.default_kind)
def _combinePanel(self, other, func):
items = self.items.union(other.items)
major = self.major_axis.union(other.major_axis)
minor = self.minor_axis.union(other.minor_axis)
# could check that everything's the same size, but forget it
this = self.reindex(items=items, major=major, minor=minor)
other = other.reindex(items=items, major=major, minor=minor)
new_frames = {}
for item in items:
new_frames[item] = func(this[item], other[item])
if not isinstance(other, SparsePanel):
new_default_fill = self.default_fill_value
else:
# maybe unnecessary
new_default_fill = func(self.default_fill_value,
other.default_fill_value)
return SparsePanel(new_frames, items, major, minor,
default_fill_value=new_default_fill,
default_kind=self.default_kind)
def major_xs(self, key):
"""
Return slice of panel along major axis
Parameters
----------
key : object
Major axis label
Returns
-------
y : DataFrame
index -> minor axis, columns -> items
"""
slices = dict((k, v.xs(key)) for k, v in self.iteritems())
return DataFrame(slices, index=self.minor_axis, columns=self.items)
def minor_xs(self, key):
"""
Return slice of panel along minor axis
Parameters
----------
key : object
Minor axis label
Returns
-------
y : SparseDataFrame
index -> major axis, columns -> items
"""
slices = dict((k, v[key]) for k, v in self.iteritems())
return SparseDataFrame(slices, index=self.major_axis,
columns=self.items,
default_fill_value=self.default_fill_value,
default_kind=self.default_kind)
# TODO: allow SparsePanel to work with flex arithmetic.
# pow and mod only work for scalars for now
def pow(self, val, *args, **kwargs):
"""wrapper around `__pow__` (only works for scalar values)"""
return self.__pow__(val)
def mod(self, val, *args, **kwargs):
"""wrapper around `__mod__` (only works for scalar values"""
return self.__mod__(val)
# Sparse objects opt out of numexpr
SparsePanel._add_aggregate_operations(use_numexpr=False)
ops.add_special_arithmetic_methods(SparsePanel, use_numexpr=False, **
ops.panel_special_funcs)
SparseWidePanel = SparsePanel
def _convert_frames(frames, index, columns, fill_value=np.nan, kind='block'):
from pandas.core.panel import _get_combined_index
output = {}
for item, df in compat.iteritems(frames):
if not isinstance(df, SparseDataFrame):
df = SparseDataFrame(df, default_kind=kind,
default_fill_value=fill_value)
output[item] = df
if index is None:
all_indexes = [x.index for x in output.values()]
index = _get_combined_index(all_indexes)
if columns is None:
all_columns = [x.columns for x in output.values()]
columns = _get_combined_index(all_columns)
index = _ensure_index(index)
columns = _ensure_index(columns)
for item, df in compat.iteritems(output):
if not (df.index.equals(index) and df.columns.equals(columns)):
output[item] = df.reindex(index=index, columns=columns)
return output, index, columns
def _stack_sparse_info(frame):
lengths = [s.sp_index.npoints for _, s in compat.iteritems(frame)]
# this is pretty fast
minor_labels = np.repeat(np.arange(len(frame.columns)), lengths)
inds_to_concat = []
vals_to_concat = []
for col in frame.columns:
series = frame[col]
if not np.isnan(series.fill_value):
raise TypeError('This routine assumes NaN fill value')
int_index = series.sp_index.to_int_index()
inds_to_concat.append(int_index.indices)
vals_to_concat.append(series.sp_values)
major_labels = np.concatenate(inds_to_concat)
sparse_values = np.concatenate(vals_to_concat)
return sparse_values, major_labels, minor_labels
| mit |
harish2rb/pyGeoNet | pygeonet_v1.0/pygeonet_processing.py | 1 | 49204 | #! /usr/bin/env python
# pygeonet_processing.py
# Run this file after setting up folder structure
# in pygeonet_parameters.py
import sys
import os
from osgeo import gdal,osr,ogr
import statsmodels.api as sm
import numpy as np
from time import clock
import pygeonet_prepare as Parameters
import pygeonet_defaults as defaults
from math import modf, floor
import matplotlib.pyplot as plt
from matplotlib import cm
from scipy.stats.mstats import mquantiles
import scipy.signal as conv2
from scipy import stats
import skfmm
from scipy import ndimage
import numpy.ma as npma
import shutil
# ----------- GRASS GIS SETUP ------------------
#setting up the environment for grass gis access
"""
# The below grass script will work assuming you have installed
# Grass GIS 7 on your machine and that the required environment
# variables are set on windows as required.
This has not been tested on linux machines yet.
"""
sys.path.append(os.path.join(os.environ['GISBASE'], "etc", "python"))
import grass.script as g
import grass.script.setup as gsetup
# -------------------------- FUNCTIONS ----------------------------
def read_dem_from_geotiff(demFileName,demFilePath):
# Open the GeoTIFF format DEM
fullFilePath = demFilePath + demFileName
#print fullFilePath
ary = []
gdal.UseExceptions()
ds = gdal.Open(fullFilePath, gdal.GA_ReadOnly)
Parameters.driver = ds.GetDriver()
geotransform = ds.GetGeoTransform()
Parameters.geotransform = geotransform
ary = ds.GetRasterBand(1).ReadAsArray()
Parameters.demPixelScale = float(geotransform[1])
Parameters.xLowerLeftCoord = float(geotransform[0])
Parameters.yLowerLeftCoord = float(geotransform[3])
Parameters.inputwktInfo = ds.GetProjection()
return ary
def anisodiff(img,niter,kappa,gamma,step=(1.,1.),option=2):
# initialize output array
img = img.astype('float32')
imgout = img.copy()
# initialize some internal variables
deltaS = np.zeros_like(imgout)
deltaE = deltaS.copy()
NS = deltaS.copy()
EW = deltaS.copy()
gS = np.ones_like(imgout)
gE = gS.copy()
for ii in xrange(niter):
# do a simple gaussian smoothing
#imgout = simple_gaussian_smoothing(imgout,5,\
# defaults.diffusionSigmaSquared)
# calculate the diffs
deltaS[:-1,: ] = np.diff(imgout,axis=0)
deltaE[: ,:-1] = np.diff(imgout,axis=1)
if option == 2:
gS = 1./(1.+(deltaS/kappa)**2.)/step[0]
gE = 1./(1.+(deltaE/kappa)**2.)/step[1]
elif option == 1:
gS = np.exp(-(deltaS/kappa)**2.)/step[0]
gE = np.exp(-(deltaE/kappa)**2.)/step[1]
# update matrices
E = gE*deltaE
S = gS*deltaS
# subtract a copy that has been shifted 'North/West' by one
# pixel. don't as questions. just do it. trust me.
NS[:] = S
EW[:] = E
NS[1:,:] -= S[:-1,:]
EW[:,1:] -= E[:,:-1]
# update the image
imgout += gamma*(NS+EW)
return imgout
def compute_dem_curvature(demArray,pixelDemScale,curvatureCalcMethod):
print 'computing DTM curvature'
#demArray[demArray<0]=np.nan
gradXArray,gradYArray = np.gradient(demArray,pixelDemScale)
slopeArrayT = np.sqrt(gradXArray**2 + gradYArray**2)
if curvatureCalcMethod=='geometric':
#Geometric curvature
print 'using geometric curvature'
gradXArrayT = np.divide(gradXArray,slopeArrayT)
gradYArrayT = np.divide(gradYArray,slopeArrayT)
#gradXArrayT[slopeArrayT==0.0]=0.0
#gradYArrayT[slopeArrayT==0.0]=0.0
elif curvatureCalcMethod=='laplacian':
# do nothing..
print 'using laplacian curvature'
gradXArrayT = gradXArray
gradYArrayT = gradYArray
gradGradXArray,tmpy = np.gradient(gradXArrayT,pixelDemScale)
tmpX,gradGradYArray = np.gradient(gradYArrayT,pixelDemScale)
curvatureDemArray = gradGradXArray + gradGradYArray
del tmpy, tmpX
return curvatureDemArray
def compute_quantile_quantile_curve(x):
print 'getting qqplot estimate'
defaults.figureNumber = defaults.figureNumber + 1
plt.figure(defaults.figureNumber)
res = stats.probplot(x, plot=plt)
res1 = sm.ProbPlot(x, stats.t, fit=True)
print res1
return res
# Write geotif to file on a disk
def write_geotif_generic(inputArray,outfilepath,outfilename):
print 'writing geotiff', outfilename
output_fileName = outfilepath + outfilename
# Get shape
ncols = inputArray.shape[0]
nrows = inputArray.shape[1]
# create the output image
driver = Parameters.driver
#print driver
outDs = driver.Create(output_fileName, nrows, ncols, 1, gdal.GDT_Float32)
if outDs is None:
print 'Could not create DemName.tif'
sys.exit(1)
outBand = outDs.GetRasterBand(1)
#outData = inputArray
#outBand.SetNoDataValue(-3.402823e+038)
# set the reference info
geotransform = Parameters.geotransform
cc = (geotransform[0],geotransform[1],geotransform[2],\
geotransform[3],geotransform[4],geotransform[5])
outDs.SetGeoTransform(cc)
outDs.SetProjection(Parameters.inputwktInfo)
# write the band
PMarray=np.array(inputArray)
outBand.WriteArray(PMarray)
# flush data to disk, set the NoData value and calculate stats
outBand.FlushCache()
del PMarray, outDs, outBand, driver
# Write filtered geotiff to disk to be used by GRASS GIS
def write_geotif_filteredDEM(filteredDemArray,filepath,filename):
print 'writing filtered DEM'
output_fileName = Parameters.pmGrassGISfileName
# Create gtif
ncols = filteredDemArray.shape[0]
nrows = filteredDemArray.shape[1]
print ncols, nrows
# create the output image
driver = gdal.GetDriverByName('GTiff')
#print driver
outDs = driver.Create(output_fileName,nrows,ncols,1, gdal.GDT_Float32)
if outDs is None:
print 'Could not create tif file'
sys.exit(1)
# set the reference info
geotransform = Parameters.geotransform
outDs.SetGeoTransform(geotransform)
outDs.SetProjection(Parameters.inputwktInfo)
# write the band
#PMarray=np.array(filteredDemArray)
outband = outDs.GetRasterBand(1)
PMarray = np.array(filteredDemArray)
print type(PMarray)
outband.WriteArray(PMarray)
outRasterSRS = osr.SpatialReference(wkt=Parameters.inputwktInfo)
authoritycode = outRasterSRS.GetAuthorityCode("PROJCS")
outRasterSRS.ImportFromEPSG(int(authoritycode))
outDs.SetProjection(outRasterSRS.ExportToWkt())
outband.FlushCache()
# finishing the writing of filtered DEM
del outDs, outband, driver,outRasterSRS
#del dataset, outData,outDs,outBand
# Flow accumulation is computed by calling GRASS GIS functions.
def flowaccumulation(filteredDemArray):
ncols = filteredDemArray.shape[0]
nrows = filteredDemArray.shape[1]
print ncols,nrows
gisbase = os.environ['GISBASE']
gisdbdir = Parameters.gisdbdir
originalGeotiff = Parameters.demDataFilePath + Parameters.demFileName
geotiff = Parameters.pmGrassGISfileName
print gsetup.init(gisbase, gisdbdir, 'demolocation', 'PERMANENT')
locationGeonet = 'geonet'
mapsetGeonet = 'geonetuser'
print 'Making the geonet location'
print g.run_command('g.proj', georef=geotiff,\
location= locationGeonet)
location = locationGeonet
mapset = mapsetGeonet
print 'Number of Mapsets after making locations'
print g.read_command('g.mapsets', flags = 'l')
print 'Setting environ'
print gsetup.init(gisbase, gisdbdir, locationGeonet, 'PERMANENT')
print g.gisenv()
print 'Making mapset now'
print g.run_command('g.mapset', flags = 'c', mapset = mapsetGeonet,\
location = locationGeonet, dbase = gisdbdir)
#after adding new mapset
print 'mapsets after making new'
print g.read_command('g.mapsets', flags = 'l')
# gsetup initialization
print gsetup.init(gisbase, gisdbdir, locationGeonet, mapsetGeonet)
# Read the filtered DEM
print 'r.in.gdal'
tmpfile = Parameters.demFileName # this reads something like skunkroi.tif
geotiffmapraster = tmpfile.split('.')[0]
print 'geotiffmapraster: ',geotiffmapraster
print g.run_command('r.in.gdal', input=geotiff, \
output=geotiffmapraster,overwrite=True)
gtf = Parameters.geotransform
print gtf
#Flow computation for massive grids (float version)
print "Calling the r.watershed command from GRASS GIS"
subbasinThreshold = defaults.thresholdAreaSubBasinIndexing
if Parameters.xDemSize > 4000 or Parameters.yDemSize > 4000:
print ('using swap memory option')
print g.run_command('r.watershed',flags ='am',overwrite=True,\
elevation=geotiffmapraster, \
threshold=subbasinThreshold, \
drainage = 'dra1v23')
print g.run_command('r.watershed',flags ='am',overwrite=True,\
elevation=geotiffmapraster, \
threshold=subbasinThreshold, \
accumulation='acc1v23')
else :
print g.run_command('r.watershed',flags ='a',overwrite=True,\
elevation=geotiffmapraster, \
threshold=subbasinThreshold, \
accumulation='acc1v23',\
drainage = 'dra1v23')
print 'r.maplac'
print g.run_command('r.mapcalc',overwrite=True,\
expression='outletmap = if(dra1v23 >= 0,null(),1)')
print 'r.to.vector'
print g.run_command('r.to.vect',overwrite=True,\
input = 'outletmap', output = 'outletsmapvec',\
type='point')
print "r.stream.basins"
print g.run_command('r.stream.basins',overwrite=True,\
direction='dra1v23',points='outletsmapvec',\
basins = 'outletbains')
# Number of rasters after computation
print g.read_command('g.list', _type = 'rast')
# Save the outputs as TIFs
outlet_filename = geotiffmapraster + '_outlets.tif'
print g.run_command('r.out.gdal',overwrite=True,\
input='outletmap', type='Float32',\
output=Parameters.geonetResultsDir +\
outlet_filename,\
format='GTiff')
outputFAC_filename = geotiffmapraster + '_fac.tif'
print g.run_command('r.out.gdal',overwrite=True,\
input='acc1v23', type='Float64',\
output=Parameters.geonetResultsDir +\
outputFAC_filename,\
format='GTiff')
outputFDR_filename = geotiffmapraster + '_fdr.tif'
print g.run_command('r.out.gdal',overwrite=True,\
input = "dra1v23", type='Float64',\
output=Parameters.geonetResultsDir+\
outputFDR_filename,\
format='GTiff')
outputBAS_filename = geotiffmapraster + '_basins.tif'
print g.run_command('r.out.gdal',overwrite=True,\
input = "outletbains", type='Int16',\
output=Parameters.geonetResultsDir+\
outputBAS_filename,\
format='GTiff')
# plot the flow directions
fdrtif = Parameters.geonetResultsDir+outputFDR_filename
dsfdr = gdal.Open(fdrtif, gdal.GA_ReadOnly)
aryfdr = dsfdr.GetRasterBand(1).ReadAsArray()
nanDemArrayfdr=np.array(aryfdr)
nanDemArrayfdrT = nanDemArrayfdr.T
del dsfdr,aryfdr
outlettif = Parameters.geonetResultsDir+outlet_filename
dsout = gdal.Open(outlettif, gdal.GA_ReadOnly)
aryfdrout = dsout.GetRasterBand(1).ReadAsArray()
nanDemArrayfdrout=np.array(aryfdrout)
del dsout,aryfdrout
outletfromtif = np.where(nanDemArrayfdrout==1)
print 'outletfromtif'
print outletfromtif
"""
Output drainage raster map contains drainage direction.
Provides the "aspect" for each cell measured CCW from East.
Multiplying positive values by 45 will give the direction
in degrees that the surface runoff will travel from that cell.
The value 0 (zero) indicates that the cell is a depression area
(defined by the depression input map).
Negative values indicate that surface runoff is leaving the boundaries
of the current geographic region. The absolute value of these
negative cells indicates the direction of flow.
"""
#outlets = np.where((nanDemArrayfdr<0) & (nanDemArrayfdr!=-3.402823e+038))
outlets = np.where(nanDemArrayfdr<0)
print "Number of outlets :", str(len(outlets[0]))
defaults.figureNumber = defaults.figureNumber + 1
plt.figure(defaults.figureNumber)
plt.imshow(nanDemArrayfdr)
plt.xlabel('X[m]')
plt.ylabel('Y[m]')
plt.title('Flow directions DEM')
if defaults.doPlot==1:
plt.show()
# plot the flow accumulation
factif = Parameters.geonetResultsDir+outputFAC_filename
dsfac = gdal.Open(factif, gdal.GA_ReadOnly)
aryfac = dsfac.GetRasterBand(1).ReadAsArray()
nanDemArrayfac=np.array(aryfac)
del dsfac,aryfac
defaults.figureNumber = defaults.figureNumber + 1
plt.figure(defaults.figureNumber)
plt.imshow(nanDemArrayfac,cmap=cm.BrBG)
plt.xlabel('X[m]')
plt.ylabel('Y[m]')
plt.title('Flow accumulations DEM')
if defaults.doPlot==1:
plt.show()
# getting the bigbasins from the r.streams.basins modules
basinstif = Parameters.geonetResultsDir+outputBAS_filename
dsbasins = gdal.Open(basinstif, gdal.GA_ReadOnly)
arybasins = dsbasins.GetRasterBand(1).ReadAsArray()
nanDemArraybasins =np.array(arybasins)
nanDemArraybasins = nanDemArraybasins.T
del dsbasins,arybasins
# outlets locations in projection of the input dataset
print outlets
outletsxx = outlets[0]
outletsxxfloat = [float(x)+0.5 for x in outletsxx]
outletsyy = outlets[1]
outletsyyfloat = [float(x)+0.5 for x in outletsyy]
"""
# The extra decimal digits is essentially a hack into
# Grass GIS r.water.outlet routine, which only, works
# with atleast 4 significant digits
"""
print gtf
outletsxxProj = float(gtf[0])+ \
float(gtf[1]) * np.array(outletsxxfloat)
outletsyyProj = float(gtf[3])+ \
float(gtf[5])*np.array(outletsyy)
return {'outlets':outlets, 'fac':nanDemArrayfac ,\
'fdr':nanDemArrayfdr,\
'outletsxxProj':outletsxxProj, 'outletsyyProj':outletsyyProj,\
'bigbasins':nanDemArraybasins}
# end of flow accumulation
# Skeleton by thresholding one grid measure e.g. flow or curvature
def compute_skeleton_by_single_threshold(inputArray, threshold):
skeletonArray = np.zeros((inputArray.shape))
#skeletonArray = skeletonArray.T
skeletonArray[np.where(inputArray> threshold)] = 1
return skeletonArray
# Skeleton by thresholding two grid measures e.g. flow and curvature
def compute_skeleton_by_dual_threshold(inputArray1, inputArray2, threshold1, threshold2):
skeletonArray = np.zeros((inputArray1.shape))
mask1 = np.where(inputArray1> threshold1,1,False)
mask2 = np.where(inputArray2>threshold2,1,False)
skeletonArray= mask1*mask2
return skeletonArray
# Normalize curvature
def normalize(inputArray):
normalizedArray = inputArray- np.min(inputArray[~np.isnan(inputArray)])
normalizedArrayR = normalizedArray/ np.max(normalizedArray[~np.isnan(normalizedArray)])
return normalizedArrayR
def compute_discrete_geodesic_v1():
# this a new version using r.drain to extract discrete goedesics
gisbase = os.environ['GISBASE']
gisdbdir = Parameters.gisdbdir
locationGeonet = 'geonet'
mapsetGeonet = 'geonetuser'
print gsetup.init(gisbase, gisdbdir, locationGeonet, mapsetGeonet)
# Read the filtered DEM
print 'r.in.gdal'
outfilepathgeodesic = Parameters.geonetResultsDir
outfilenamegeodesic = Parameters.demFileName
outfilenamegeodesic = outfilenamegeodesic.split('.')[0]+'_geodesicDistance.tif'
inputgeodesictifile = outfilepathgeodesic +'\\'+outfilenamegeodesic
print 'importing goedesic tif: ',inputgeodesictifile
print g.run_command('r.in.gdal', input=inputgeodesictifile, \
output=outfilenamegeodesic,overwrite=True)
# The maximum number of points is 1024
# --- have to add a check---
# -- seems to run for large point shapefiles without fail.
print 'importing channel heads shape file'
channeheadsshapefileName = Parameters.pointshapefileName
inputshapefilepath = Parameters.pointFileName
print g.run_command('v.in.ogr',input = inputshapefilepath,\
layer=channeheadsshapefileName,output=channeheadsshapefileName,\
geometry='Point')
print 'executing r.drain'
print g.run_command('r.drain',input=outfilenamegeodesic,\
output='discretegeodesicsras',\
start_points=channeheadsshapefileName)
print 'thining the discrete geodesic raster'
print g.run_command('r.thin',input='discretegeodesicsras',\
output='discretegeodesicsrasthin')
print 'converting the raster geodesic to vector map'
print g.run_command('r.to.vect',input = 'discretegeodesicsrasthin',\
output='discretegeovec', type='line')
print 'exporting the geodesics as shapefile'
print g.run_command('v.out.ogr', input= 'discretegeovec',\
output=Parameters.drainagelineFileName,\
format='ESRI_Shapefile')
print 'completed discrete geodesics'
# ---draining algorithm finished
# Writing channel head shapefiles
def write_channel_heads(xx,yy):
print "Writing Channel Heads shapefile"
# set up the shapefile driver
driver = ogr.GetDriverByName(Parameters.driverName)
# This will delete and assist in overwrite of the shape files
if os.path.exists(Parameters.pointFileName):
driver.DeleteDataSource(Parameters.pointFileName)
# create the data source
data_source = driver.CreateDataSource(Parameters.pointFileName)
# create the spatial reference, same as the input dataset
srs = osr.SpatialReference()
gtf = Parameters.geotransform
georef = Parameters.inputwktInfo
srs.ImportFromWkt(georef)
# Project the xx, and yy points
xxProj = float(gtf[0])+ \
float(gtf[1]) * np.array(xx)
yyProj = float(gtf[3])+ \
float(gtf[5])*np.array(yy)
# create the layer
layer = data_source.CreateLayer(Parameters.pointshapefileName,\
srs, ogr.wkbPoint)
# Add the fields we're interested in
field_name = ogr.FieldDefn("Name", ogr.OFTString)
field_name.SetWidth(24)
layer.CreateField(field_name)
field_region = ogr.FieldDefn("Region", ogr.OFTString)
field_region.SetWidth(24)
layer.CreateField(field_region)
layer.CreateField(ogr.FieldDefn("Latitude", ogr.OFTReal))
layer.CreateField(ogr.FieldDefn("Longitude", ogr.OFTReal))
# Now add the channel heads as features to the layer
for i in xrange(0,len(xxProj)):
# create the feature
feature = ogr.Feature(layer.GetLayerDefn())
# Set the attributes using the values
feature.SetField("Name", 'ChannelHead')
feature.SetField("Region", Parameters.Region)
feature.SetField("Latitude", xxProj[i])
feature.SetField("Longitude", yyProj[i])
# create the WKT for the feature using Python string formatting
wkt = "POINT(%f %f)" % (float(xxProj[i]) , float(yyProj[i]))
# Create the point from the Well Known Txt
point = ogr.CreateGeometryFromWkt(wkt)
# Set the feature geometry using the point
feature.SetGeometry(point)
# Create the feature in the layer (shapefile)
layer.CreateFeature(feature)
# Destroy the feature to free resources
feature.Destroy()
# Destroy the data source to free resources
data_source.Destroy()
# Writing drainage paths as shapefile
def write_drainage_paths(geodesicPathsCellList):
print 'Writing drainage paths'
#print geodesicPathsCellList
# set up the shapefile driver
driver = ogr.GetDriverByName(Parameters.driverName)
# This will delete and assist in overwrite of the shape files
if os.path.exists(Parameters.drainagelineFileName):
driver.DeleteDataSource(Parameters.drainagelineFileName)
# create the data source
data_source = driver.CreateDataSource(Parameters.drainagelineFileName)
# create the spatial reference, same as the input dataset
srs = osr.SpatialReference()
gtf = Parameters.geotransform
georef = Parameters.inputwktInfo
srs.ImportFromWkt(georef)
# create the layer
layer = data_source.CreateLayer(Parameters.drainagelinefileName,\
srs, ogr.wkbLineString)
# Add the fields we're interested in
field_name = ogr.FieldDefn("Name", ogr.OFTString)
field_name.SetWidth(24)
layer.CreateField(field_name)
field_region = ogr.FieldDefn("Region", ogr.OFTString)
field_region.SetWidth(24)
layer.CreateField(field_region)
layer.CreateField(ogr.FieldDefn("Latitude", ogr.OFTReal))
layer.CreateField(ogr.FieldDefn("Longitude", ogr.OFTReal))
# Now add the channel heads as features to the layer
print len(geodesicPathsCellList)
for i in xrange(0,len(geodesicPathsCellList)):
#print geodesicPathsCellList[i]
# Project the linepoints to appropriate projection
xx = geodesicPathsCellList[i][0]
yy = geodesicPathsCellList[i][1]
# Project the xx, and yy points
xxProj = float(gtf[0])+ \
float(gtf[1]) * np.array(xx)
yyProj = float(gtf[3])+ \
float(gtf[5])*np.array(yy)
# create the feature
feature = ogr.Feature(layer.GetLayerDefn())
# Set the attributes using the values
feature.SetField("Name", 'ChannelNetwork')
feature.SetField("Region", Parameters.Region)
# create the WKT for the feature using Python string formatting
line = ogr.Geometry(ogr.wkbLineString)
for j in xrange(0,len(xxProj)):
#print xxProj[j],yyProj[j]
line.AddPoint(xxProj[j],yyProj[j])
#print line
# Create the point from the Well Known Txt
#lineobject = line.ExportToWkt()
# Set the feature geometry using the point
feature.SetGeometryDirectly(line)
# Create the feature in the layer (shapefile)
layer.CreateFeature(feature)
# Destroy the feature to free resources
feature.Destroy()
# Destroy the data source to free resources
data_source.Destroy()
#---------------------------------------------------------------------------------
#------------------- MAIN FUNCTION--------------------------------------------------
#---------------------------------------------------------------------------------
def main():
print "current working directory", os.getcwd()
print "Reading input file path :",Parameters.demDataFilePath
print "Reading input file :",Parameters.demFileName
defaults.figureNumber = 0
rawDemArray = read_dem_from_geotiff(Parameters.demFileName,\
Parameters.demDataFilePath)
nanDemArraylr=np.array(rawDemArray)
nanDemArray = nanDemArraylr
nanDemArray[nanDemArray < defaults.demNanFlag]= np.nan
Parameters.minDemValue= np.min(nanDemArray[:])
Parameters.maxDemValue= np.max(nanDemArray[:])
defaults.figureNumber = defaults.figureNumber + 1
plt.figure(defaults.figureNumber)
plt.imshow(nanDemArray,cmap=cm.coolwarm)
plt.xlabel('X[m]')
plt.ylabel('Y[m]')
plt.title('Input DEM')
if defaults.doPlot==1:
plt.show()
# Area of analysis
Parameters.xDemSize=np.size(nanDemArray,0)
Parameters.yDemSize=np.size(nanDemArray,1)
# Calculate pixel length scale and assume square
Parameters.maxLowerLeftCoord = np.max([Parameters.xDemSize, \
Parameters.yDemSize])
print 'DTM size: ',Parameters.xDemSize, 'x' ,Parameters.yDemSize
#-----------------------------------------------------------------------------
# Compute slope magnitude for raw and filtered DEMs
print 'Computing slope of raw DTM'
print 'DEM pixel scale:',Parameters.demPixelScale
print np.array(nanDemArray).shape
slopeXArray,slopeYArray = np.gradient(np.array(nanDemArray),\
Parameters.demPixelScale)
slopeMagnitudeDemArray = np.sqrt(slopeXArray**2 + slopeYArray**2)
# plot the slope DEM array
slopeMagnitudeDemArrayNp = np.array(slopeMagnitudeDemArray)
print slopeMagnitudeDemArrayNp.shape
# plotting the slope DEM of non filtered DEM
defaults.figureNumber = defaults.figureNumber + 1
plt.figure(defaults.figureNumber)
plt.imshow(slopeMagnitudeDemArrayNp,cmap=cm.coolwarm)
plt.xlabel('X[m]')
plt.ylabel('Y[m]')
plt.title('Slope of unfiltered DEM')
if defaults.doPlot==1:
plt.show()
# Computation of the threshold lambda used in Perona-Malik nonlinear
# filtering. The value of lambda (=edgeThresholdValue) is given by the 90th
# quantile of the absolute value of the gradient.
print'Computing lambda = q-q-based nonlinear filtering threshold'
slopeMagnitudeDemArrayQ = slopeMagnitudeDemArrayNp
slopeMagnitudeDemArrayQ = np.reshape(slopeMagnitudeDemArrayQ,\
np.size(slopeMagnitudeDemArrayQ))
slopeMagnitudeDemArrayQ = slopeMagnitudeDemArrayQ[~np.isnan(slopeMagnitudeDemArrayQ)]
print 'dem smoothing Quantile',defaults.demSmoothingQuantile
edgeThresholdValuescipy = mquantiles(np.absolute(slopeMagnitudeDemArrayQ),\
defaults.demSmoothingQuantile)
print 'edgeThresholdValuescipy :', edgeThresholdValuescipy
# performing PM filtering using the anisodiff
print 'Performing Perona-Malik nonlinear filtering'
filteredDemArray = anisodiff(nanDemArray, defaults.nFilterIterations, \
edgeThresholdValuescipy,\
defaults.diffusionTimeIncrement, \
(Parameters.demPixelScale,\
Parameters.demPixelScale),2)
# plotting the filtered DEM
defaults.figureNumber = defaults.figureNumber + 1
plt.figure(defaults.figureNumber)
plt.imshow(filteredDemArray,cmap=cm.coolwarm)
plt.xlabel('X[m]')
plt.ylabel('Y[m]')
plt.title('Filtered DEM')
if defaults.doPlot==1:
plt.show()
# Writing the filtered DEM as a tif
write_geotif_filteredDEM(filteredDemArray,Parameters.demDataFilePath,\
Parameters.demFileName)
# Computing slope of filtered DEM
print 'Computing slope of filtered DTM'
filteredDemArraynp = filteredDemArray#np.gradient only takes an array as input
slopeXArray,slopeYArray = np.gradient(filteredDemArraynp,Parameters.demPixelScale)
slopeDemArray = np.sqrt(slopeXArray**2 + slopeYArray**2)
slopeMagnitudeDemArrayQ = slopeDemArray
slopeMagnitudeDemArrayQ = np.reshape(slopeMagnitudeDemArrayQ,\
np.size(slopeMagnitudeDemArrayQ))
slopeMagnitudeDemArrayQ = slopeMagnitudeDemArrayQ[~np.isnan(slopeMagnitudeDemArrayQ)]
print ' angle min:', np.arctan(np.percentile(slopeMagnitudeDemArrayQ,0.1))*180/np.pi
print ' angle max:', np.arctan(np.percentile(slopeMagnitudeDemArrayQ,99.9))*180/np.pi
print 'mean slope:',np.nanmean(slopeDemArray[:])
print 'stdev slope:',np.nanstd(slopeDemArray[:])
#Computing curvature
print 'computing curvature'
curvatureDemArrayIn= filteredDemArraynp
#curvatureDemArrayIn[curvatureDemArrayIn== defaults.demErrorFlag]=np.nan
curvatureDemArray = compute_dem_curvature(curvatureDemArrayIn,\
Parameters.demPixelScale,\
defaults.curvatureCalcMethod)
#Writing the curvature array
outfilepath = Parameters.geonetResultsDir
outfilename = Parameters.demFileName
outfilename = outfilename.split('.')[0]+'_curvature.tif'
write_geotif_generic(curvatureDemArray,outfilepath,outfilename)
#Computation of statistics of curvature
print 'Computing curvature statistics'
print curvatureDemArray.shape
tt = curvatureDemArray[~np.isnan(curvatureDemArray[:])]
print tt.shape
finiteCurvatureDemList = curvatureDemArray[np.isfinite(curvatureDemArray[:])]
print finiteCurvatureDemList.shape
curvatureDemMean = np.nanmean(finiteCurvatureDemList)
curvatureDemStdDevn = np.nanstd(finiteCurvatureDemList)
print ' mean: ', curvatureDemMean
print ' standard deviation: ', curvatureDemStdDevn
# plotting only for testing purposes
defaults.figureNumber = defaults.figureNumber + 1
plt.figure(defaults.figureNumber)
plt.imshow(curvatureDemArray,cmap=cm.coolwarm)
plt.xlabel('X[m]')
plt.ylabel('Y[m]')
plt.title('Curvature DEM')
plt.colorbar()
if defaults.doPlot==1:
plt.show()
#*************************************************
#Compute curvature quantile-quantile curve
# This seems to take a long time ... is commented for now
print 'Computing curvature quantile-quantile curve'
#osm,osr = compute_quantile_quantile_curve(finiteCurvatureDemList)
#print osm[0]
#print osr[0]
thresholdCurvatureQQxx = 1
# have to add method to automatically compute the thresold
# .....
# .....
#*************************************************
# Computing contributing areas
print 'Computing upstream accumulation areas using MFD from GRASS GIS'
"""
return {'outlets':outlets, 'fac':nanDemArrayfac ,\
'fdr':nanDemArrayfdr ,'basins':nanDemArraybasins,\
'outletsxxProj':outletsxxProj, 'outletsyyProj':outletsyyProj,\
'bigbasins':allbasins}
"""
# Call the flow accumulation function
flowroutingresults = flowaccumulation(filteredDemArray)
# Read out the flowroutingresults into appropriate variables
outletPointsList = flowroutingresults['outlets']
flowArray = flowroutingresults['fac']
flowDirectionsArray = flowroutingresults['fdr']
# These are actually not sub basins, if the basin threshold
# is large, then you might have as nulls, so best
# practice is to keep the basin threshold close to 1000
# default value is 10,000
#subBasinIndexArray = flowroutingresults['basins']
#subBasinIndexArray[subBasinIndexArray==-9999]=np.nan
basinIndexArray = flowroutingresults['bigbasins']
flowArray[np.isnan(filteredDemArray)]=np.nan
flowMean = np.mean(flowArray[~np.isnan(flowArray[:])])
print 'Mean upstream flow: ', flowMean
# plotting only for testing purposes
defaults.figureNumber = defaults.figureNumber + 1
plt.figure(defaults.figureNumber)
drainageMeasure = -np.sqrt(np.log10(flowArray))
plt.imshow(drainageMeasure,cmap=cm.coolwarm)
plt.plot(outletPointsList[1],outletPointsList[0],'go')
plt.xlabel('X[m]')
plt.ylabel('Y[m]')
plt.title('flowArray with outlets')
plt.colorbar()
if defaults.doPlot==1:
plt.show()
# plotting only for testing purposes
defaults.figureNumber = defaults.figureNumber + 1
plt.figure(defaults.figureNumber)
plt.imshow(basinIndexArray.T,cmap=cm.Dark2)
plt.plot(outletPointsList[1],outletPointsList[0],'go')
plt.xlabel('X[m]')
plt.ylabel('Y[m]')
plt.title('basinIndexArray with outlets')
if defaults.doPlot==1:
plt.show()
# Define a skeleton based on flow alone
skeletonFromFlowArray = \
compute_skeleton_by_single_threshold(flowArray.T,\
defaults.flowThresholdForSkeleton)
# Define a skeleton based on curvature alone
skeletonFromCurvatureArray =\
compute_skeleton_by_single_threshold(curvatureDemArray.T,\
curvatureDemMean+thresholdCurvatureQQxx*curvatureDemStdDevn)
# Define a skeleton based on curvature and flow
skeletonFromFlowAndCurvatureArray =\
compute_skeleton_by_dual_threshold(curvatureDemArray.T, flowArray.T, \
curvatureDemMean+thresholdCurvatureQQxx*curvatureDemStdDevn, \
defaults.flowThresholdForSkeleton)
# plotting only for testing purposes
defaults.figureNumber = defaults.figureNumber + 1
plt.figure(defaults.figureNumber)
plt.imshow(skeletonFromFlowAndCurvatureArray.T,cmap=cm.binary)
plt.plot(outletPointsList[1],outletPointsList[0],'go')
plt.xlabel('X[m]')
plt.ylabel('Y[m]')
plt.title('Curvature with outlets')
if defaults.doPlot==1:
plt.show()
# Writing the skeletonFromFlowAndCurvatureArray array
outfilepath = Parameters.geonetResultsDir
outfilename = Parameters.demFileName
outfilename = outfilename.split('.')[0]+'_skeleton.tif'
write_geotif_generic(skeletonFromFlowAndCurvatureArray.T,\
outfilepath,outfilename)
# Computing the percentage drainage areas
print 'Computing percentage drainage area of each indexed basin'
fastMarchingStartPointList = np.array(outletPointsList)
print fastMarchingStartPointList
#fastMarchingStartPointListFMM = np.zeros((fastMarchingStartPointList.shape))
fastMarchingStartPointListFMMx = []
fastMarchingStartPointListFMMy = []
basinsUsedIndexList = np.zeros((len(fastMarchingStartPointList[0]),1))
nx = Parameters.xDemSize
ny = Parameters.yDemSize
nDempixels = float(nx*ny)
basinIndexArray = basinIndexArray.T
for label in range(0,len(fastMarchingStartPointList[0])):
outletbasinIndex = basinIndexArray[fastMarchingStartPointList[0,label],\
fastMarchingStartPointList[1,label]]
print outletbasinIndex
numelments = basinIndexArray[basinIndexArray==outletbasinIndex]
#print type(numelments), len(numelments)
percentBasinArea = float(len(numelments)) * 100/nDempixels
print 'Basin: ',outletbasinIndex,\
'@ : ',fastMarchingStartPointList[:,label],' #Elements ',len(numelments),\
' area ',percentBasinArea,' %'
if percentBasinArea > defaults.thresholdPercentAreaForDelineation and\
len(numelments) > Parameters.numBasinsElements:
# Get the watersheds used
basinsUsedIndexList[label]= label
# Preparing the outlets used for fast marching in ROI
#fastMarchingStartPointListFMM[:,label] = fastMarchingStartPointList[:,label]
fastMarchingStartPointListFMMx.append(fastMarchingStartPointList[0,label])
fastMarchingStartPointListFMMy.append(fastMarchingStartPointList[1,label])
# finishing Making outlets for FMM
#Closing Basin area computation
fastMarchingStartPointListFMM = np.array([fastMarchingStartPointListFMMx,\
fastMarchingStartPointListFMMy])
# Computing the local cost function
print 'Preparing to calculate cost function'
# lets normalize the curvature first
if defaults.doNormalizeCurvature ==1:
curvatureDemArrayNor = normalize(curvatureDemArray)
del curvatureDemArray
curvatureDemArray = curvatureDemArrayNor
del curvatureDemArrayNor
defaults.figureNumber = defaults.figureNumber + 1
plt.figure(defaults.figureNumber)
plt.figure(defaults.figureNumber)
plt.imshow(curvatureDemArray,cmap=cm.coolwarm)
plt.title('Curvature after normalization')
plt.colorbar()
if defaults.doPlot==1:
plt.show()
print 'Curvature min: ' ,str(np.min(curvatureDemArray[~np.isnan(curvatureDemArray)])), \
' exp(min): ',str(np.exp(3*np.min(curvatureDemArray[~np.isnan(curvatureDemArray)])))
print 'Curvature max: ' ,str(np.max(curvatureDemArray[~np.isnan(curvatureDemArray)])),\
' exp(max): ',str(np.exp(3*np.max(curvatureDemArray[~np.isnan(curvatureDemArray)])))
# set all the nan's to zeros before cost function is computed
curvatureDemArray[np.isnan(curvatureDemArray)] = 0
print 'Computing cost function & geodesic distance'
# Calculate the local reciprocal cost (weight, or propagation speed in the
# eikonal equation sense). If the cost function isn't defined, default to
# old cost function.
flowArray = flowArray.T
curvatureDemArray = curvatureDemArray.T
if hasattr(defaults, 'reciprocalLocalCostFn'):
print 'Evaluating local cost func.'
reciprocalLocalCostArray = eval(defaults.reciprocalLocalCostFn)
else:
print 'Evaluating local cost func. (default)'
reciprocalLocalCostArray = flowArray + \
(flowMean*skeletonFromFlowAndCurvatureArray)\
+ (flowMean*curvatureDemArray)
del reciprocalLocalCostArray
# Forcing the evaluations
reciprocalLocalCostArray = flowArray + \
(flowMean*skeletonFromFlowAndCurvatureArray)\
+ (flowMean*curvatureDemArray)
if hasattr(defaults,'reciprocalLocalCostMinimum'):
if defaults.reciprocalLocalCostMinimum != 'nan':
reciprocalLocalCostArray[reciprocalLocalCostArray[:]\
< defaults.reciprocalLocalCostMinimum]=1.0
print '1/cost min: ', np.nanmin(reciprocalLocalCostArray[:])
print '1/cost max: ', np.nanmax(reciprocalLocalCostArray[:])
# Writing the reciprocal array
outfilepath = Parameters.geonetResultsDir
outfilename = Parameters.demFileName
outfilename = outfilename.split('.')[0]+'_costfunction.tif'
write_geotif_generic(reciprocalLocalCostArray,outfilepath,outfilename)
# Fast marching
print 'Performing fast marching'
print '# of unique basins:',np.size(np.unique(basinIndexArray))
# Now access each unique basin and get the
# outlets for it
basinIndexList = np.unique(basinIndexArray)
print 'basinIndexList:', str(basinIndexList)
print reciprocalLocalCostArray.shape
#stop
# Do fast marching for each sub basin
geodesicDistanceArray = np.zeros((basinIndexArray.shape))
geodesicDistanceArray[geodesicDistanceArray==0]=np.Inf
geodesicDistanceArray = geodesicDistanceArray.T
filteredDemArrayTr = filteredDemArray.T
basinIndexArray = basinIndexArray.T
# create a watershed outlet dictionary
outletwatersheddict = {}
defaults.figureNumber = defaults.figureNumber + 1
for i in range(0,len(fastMarchingStartPointListFMM[0])):
basinIndexList = basinIndexArray[fastMarchingStartPointListFMM[1,i],\
fastMarchingStartPointListFMM[0,i]]
print 'basin Index:',basinIndexList
print 'start point :', fastMarchingStartPointListFMM[:,i]
outletwatersheddict[basinIndexList]=fastMarchingStartPointListFMM[:,i]
maskedBasin = np.zeros((basinIndexArray.shape))
maskedBasin[basinIndexArray==basinIndexList]=1
# For the masked basin get the maximum accumulation are
# location and use that as an outlet for the basin.
maskedBasinFAC = np.zeros((basinIndexArray.shape))
maskedBasinFAC[basinIndexArray==basinIndexList]=\
flowArray[basinIndexArray==basinIndexList]
maskedBasinFAC[maskedBasinFAC==0]=np.nan
# Get the outlet of subbasin
maskedBasinFAC[np.isnan(maskedBasinFAC)]=0
# print subBasinoutletindices
# outlets locations in projection of the input dataset
outletsxx = fastMarchingStartPointList[0,i]
outletsyy = fastMarchingStartPointList[1,i]
# call the fast marching here
phi = np.nan * np.ones((reciprocalLocalCostArray.shape))
speed = np.ones((reciprocalLocalCostArray.shape))* np.nan
phi[maskedBasinFAC!=0] = 1
speed[maskedBasinFAC!=0] = reciprocalLocalCostArray[maskedBasinFAC!=0]
phi[fastMarchingStartPointListFMM[1,i],\
fastMarchingStartPointListFMM[0,i]] =-1
try:
travelTimearray = skfmm.travel_time(phi,speed, dx=1)
except IOError as e:
print 'Error in calculating skfmm travel time'
print 'Error in catchment: ',basinIndexList
# setting travel time to empty array
travelTimearray = np.nan * np.zeros((reciprocalLocalCostArray.shape))
plt.figure(defaults.figureNumber+1)
plt.imshow(speed.T,cmap=cm.coolwarm)
plt.plot(fastMarchingStartPointListFMM[1,i],\
fastMarchingStartPointListFMM[0,i],'ok')
#plt.contour(speed.T,cmap=cm.coolwarm)
plt.title('speed basin Index'+str(basinIndexList))
plt.colorbar()
plt.show()
plt.figure(defaults.figureNumber+1)
plt.imshow(phi.T,cmap=cm.coolwarm)
plt.plot(fastMarchingStartPointListFMM[1,i],\
fastMarchingStartPointListFMM[0,i],'ok')
#plt.contour(speed.T,cmap=cm.coolwarm)
plt.title('phi basin Index'+str(basinIndexList))
plt.colorbar()
plt.show()
print "I/O error({0}): {1}".format(e.errno, e.strerror)
#stop
#print travelTimearray.shape
geodesicDistanceArray[maskedBasin ==1]= travelTimearray[maskedBasin ==1]
#-----------------------------------
#-----------------------------------
# Plot the geodesic array
defaults.figureNumber = defaults.figureNumber + 1
plt.figure(defaults.figureNumber)
plt.imshow(np.log10(geodesicDistanceArray.T),cmap=cm.coolwarm)
plt.contour(geodesicDistanceArray.T,140,cmap=cm.coolwarm)
plt.title('Geodesic distance array (travel time)')
plt.colorbar()
if defaults.doPlot==1:
plt.show()
print geodesicDistanceArray.shape
# Writing the geodesic distance array
outfilepath = Parameters.geonetResultsDir
outfilename = Parameters.demFileName
outfilename = outfilename.split('.')[0]+'_geodesicDistance.tif'
write_geotif_generic(geodesicDistanceArray.T,outfilepath,outfilename)
# Locating end points
print 'Locating skeleton end points'
xySkeletonSize = skeletonFromFlowAndCurvatureArray.shape
skeletonLabeledArray, skeletonNumConnectedComponentsList =\
ndimage.label(skeletonFromFlowAndCurvatureArray)
#print skeletonNumConnectedComponentsList
"""
Through the histogram of skeletonNumElementsSortedList
(skeletonNumElementsList minus the maximum value which
corresponds to the largest connected element of the skeleton) we get the
size of the smallest elements of the skeleton, which will likely
correspond to small isolated convergent areas. These elements will be
excluded from the search of end points.
"""
print 'Counting the number of elements of each connected component'
#print "ndimage.labeled_comprehension"
lbls = np.arange(1, skeletonNumConnectedComponentsList+1)
skeletonLabeledArrayNumtuple = ndimage.labeled_comprehension(skeletonFromFlowAndCurvatureArray,\
skeletonLabeledArray,\
lbls,np.count_nonzero,\
int,0)
skeletonNumElementsSortedList = np.sort(skeletonLabeledArrayNumtuple)
print np.sqrt(len(skeletonNumElementsSortedList))
histarray,skeletonNumElementsHistogramX=np.histogram(\
skeletonNumElementsSortedList[0:len(skeletonNumElementsSortedList)-1],
np.sqrt(len(skeletonNumElementsSortedList)))
defaults.figureNumber = defaults.figureNumber + 1
plt.figure(defaults.figureNumber)
plt.imshow(skeletonLabeledArray.T,cmap=cm.coolwarm)
plt.title('Skeleton Labeled Array elements Array')
plt.colorbar()
if defaults.doPlot==1:
plt.show()
# Create skeleton gridded array
skeletonNumElementsGriddedArray = np.zeros(xySkeletonSize)
#"""
for i in range(0,xySkeletonSize[0]):
for j in range(0,xySkeletonSize[1]):
#Gets the watershed label for this specified cell and checked in
#subsequent if statement
basinIndex = basinIndexArray[i,j]
if skeletonLabeledArray[i, j] > 0:
skeletonNumElementsGriddedArray[i,j] = \
skeletonLabeledArrayNumtuple[skeletonLabeledArray[i,j]-1]
defaults.figureNumber = defaults.figureNumber + 1
plt.figure(defaults.figureNumber)
plt.imshow(skeletonNumElementsGriddedArray.T,cmap=cm.coolwarm)
plt.title('Skeleton Num elements Array')
plt.colorbar()
if defaults.doPlot==1:
plt.show()
#"""
# Elements smaller than skeletonNumElementsThreshold are not considered in the
# skeletonEndPointsList detection
print skeletonNumElementsHistogramX
skeletonNumElementsThreshold = skeletonNumElementsHistogramX[2]
print 'skeletonNumElementsThreshold',str(skeletonNumElementsThreshold)
# Scan the array for finding the channel heads
print 'Continuing to locate skeleton endpoints'
#"""
skeletonEndPointsList = []
for i in range(0,xySkeletonSize[0]):
for j in range(0,xySkeletonSize[1]):
#print i,j
# Skip this pixel if the current point is not a labeled or if the
# number of connected skeleton elements is too small
if skeletonLabeledArray[i,j]!=0 \
and skeletonNumElementsGriddedArray[i,j]>=skeletonNumElementsThreshold:
# Define search box and ensure it fits within the DTM bounds
mx = i-1
px = xySkeletonSize[0]-i
my = j-1
py = xySkeletonSize[1]-j
xMinus = np.min([defaults.endPointSearchBoxSize, mx])
xPlus = np.min([defaults.endPointSearchBoxSize, px])
yMinus = np.min([defaults.endPointSearchBoxSize, my])
yPlus = np.min([defaults.endPointSearchBoxSize, py])
# Extract the geodesic distances geodesicDistanceArray for pixels within the search box
searchGeodesicDistanceBox = geodesicDistanceArray[i-xMinus:i+xPlus, j-yMinus:j+yPlus]
# Extract the skeleton labels for pixels within the search box
searchLabeledSkeletonBox = skeletonLabeledArray[i-xMinus:i+xPlus, j-yMinus:j+yPlus]
# Look in the search box for skeleton points with the same label
# and greater geodesic distance than the current pixel at (i,j)
# - if there are none, then add the current point as a channel head
v = searchLabeledSkeletonBox==skeletonLabeledArray[i,j]
v1 = v * searchGeodesicDistanceBox > geodesicDistanceArray[i,j]
v3 = np.where(np.any(v1==True,axis=0))
if len(v3[0])==0:
skeletonEndPointsList.append([i,j])
# For loop ends here
skeletonEndPointsListArray = np.array(skeletonEndPointsList)
xx = skeletonEndPointsListArray[0:len(skeletonEndPointsListArray),0]
yy = skeletonEndPointsListArray[0:len(skeletonEndPointsListArray),1]
defaults.figureNumber = defaults.figureNumber + 1
plt.figure(defaults.figureNumber)
plt.imshow(skeletonFromFlowAndCurvatureArray.T,cmap=cm.binary)
plt.plot(xx,yy,'or')
plt.title('Skeleton Num elements Array with channel heads')
plt.colorbar()
if defaults.doPlot==1:
plt.show()
defaults.figureNumber = defaults.figureNumber + 1
plt.figure(defaults.figureNumber)
plt.imshow(np.log(geodesicDistanceArray.T),cmap=cm.coolwarm)
plt.plot(xx,yy,'or')
plt.title('Geodesic distance Array with channel heads')
plt.colorbar()
if defaults.doPlot==1:
plt.show()
# Write shapefiles of channel heads
write_channel_heads(xx,yy)
# Do compute discrete geodesics
print 'Computing discrete geodesics'
compute_discrete_geodesic_v1()
print 'Finished pyGeoNet'
if __name__ == '__main__':
t0 = clock()
main()
t1 = clock()
print "time taken to complete the script is::",t1-t0," seconds"
#plt.show()
print "script complete"
| gpl-3.0 |
0x0all/scikit-learn | examples/cluster/plot_lena_compress.py | 271 | 2229 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=========================================================
Vector Quantization Example
=========================================================
The classic image processing example, Lena, an 8-bit grayscale
bit-depth, 512 x 512 sized image, is used here to illustrate
how `k`-means is used for vector quantization.
"""
print(__doc__)
# Code source: Gaël Varoquaux
# Modified for documentation by Jaques Grobler
# License: BSD 3 clause
import numpy as np
import scipy as sp
import matplotlib.pyplot as plt
from sklearn import cluster
n_clusters = 5
np.random.seed(0)
try:
lena = sp.lena()
except AttributeError:
# Newer versions of scipy have lena in misc
from scipy import misc
lena = misc.lena()
X = lena.reshape((-1, 1)) # We need an (n_sample, n_feature) array
k_means = cluster.KMeans(n_clusters=n_clusters, n_init=4)
k_means.fit(X)
values = k_means.cluster_centers_.squeeze()
labels = k_means.labels_
# create an array from labels and values
lena_compressed = np.choose(labels, values)
lena_compressed.shape = lena.shape
vmin = lena.min()
vmax = lena.max()
# original lena
plt.figure(1, figsize=(3, 2.2))
plt.imshow(lena, cmap=plt.cm.gray, vmin=vmin, vmax=256)
# compressed lena
plt.figure(2, figsize=(3, 2.2))
plt.imshow(lena_compressed, cmap=plt.cm.gray, vmin=vmin, vmax=vmax)
# equal bins lena
regular_values = np.linspace(0, 256, n_clusters + 1)
regular_labels = np.searchsorted(regular_values, lena) - 1
regular_values = .5 * (regular_values[1:] + regular_values[:-1]) # mean
regular_lena = np.choose(regular_labels.ravel(), regular_values)
regular_lena.shape = lena.shape
plt.figure(3, figsize=(3, 2.2))
plt.imshow(regular_lena, cmap=plt.cm.gray, vmin=vmin, vmax=vmax)
# histogram
plt.figure(4, figsize=(3, 2.2))
plt.clf()
plt.axes([.01, .01, .98, .98])
plt.hist(X, bins=256, color='.5', edgecolor='.5')
plt.yticks(())
plt.xticks(regular_values)
values = np.sort(values)
for center_1, center_2 in zip(values[:-1], values[1:]):
plt.axvline(.5 * (center_1 + center_2), color='b')
for center_1, center_2 in zip(regular_values[:-1], regular_values[1:]):
plt.axvline(.5 * (center_1 + center_2), color='b', linestyle='--')
plt.show()
| bsd-3-clause |
ChanChiChoi/scikit-learn | examples/feature_stacker.py | 246 | 1906 | """
=================================================
Concatenating multiple feature extraction methods
=================================================
In many real-world examples, there are many ways to extract features from a
dataset. Often it is beneficial to combine several methods to obtain good
performance. This example shows how to use ``FeatureUnion`` to combine
features obtained by PCA and univariate selection.
Combining features using this transformer has the benefit that it allows
cross validation and grid searches over the whole process.
The combination used in this example is not particularly helpful on this
dataset and is only used to illustrate the usage of FeatureUnion.
"""
# Author: Andreas Mueller <amueller@ais.uni-bonn.de>
#
# License: BSD 3 clause
from sklearn.pipeline import Pipeline, FeatureUnion
from sklearn.grid_search import GridSearchCV
from sklearn.svm import SVC
from sklearn.datasets import load_iris
from sklearn.decomposition import PCA
from sklearn.feature_selection import SelectKBest
iris = load_iris()
X, y = iris.data, iris.target
# This dataset is way to high-dimensional. Better do PCA:
pca = PCA(n_components=2)
# Maybe some original features where good, too?
selection = SelectKBest(k=1)
# Build estimator from PCA and Univariate selection:
combined_features = FeatureUnion([("pca", pca), ("univ_select", selection)])
# Use combined features to transform dataset:
X_features = combined_features.fit(X, y).transform(X)
svm = SVC(kernel="linear")
# Do grid search over k, n_components and C:
pipeline = Pipeline([("features", combined_features), ("svm", svm)])
param_grid = dict(features__pca__n_components=[1, 2, 3],
features__univ_select__k=[1, 2],
svm__C=[0.1, 1, 10])
grid_search = GridSearchCV(pipeline, param_grid=param_grid, verbose=10)
grid_search.fit(X, y)
print(grid_search.best_estimator_)
| bsd-3-clause |
robintw/scikit-image | doc/examples/plot_line_hough_transform.py | 14 | 4465 | r"""
=============================
Straight line Hough transform
=============================
The Hough transform in its simplest form is a `method to detect straight lines
<http://en.wikipedia.org/wiki/Hough_transform>`__.
In the following example, we construct an image with a line intersection. We
then use the Hough transform to explore a parameter space for straight lines
that may run through the image.
Algorithm overview
------------------
Usually, lines are parameterised as :math:`y = mx + c`, with a gradient
:math:`m` and y-intercept `c`. However, this would mean that :math:`m` goes to
infinity for vertical lines. Instead, we therefore construct a segment
perpendicular to the line, leading to the origin. The line is represented by the
length of that segment, :math:`r`, and the angle it makes with the x-axis,
:math:`\theta`.
The Hough transform constructs a histogram array representing the parameter
space (i.e., an :math:`M \times N` matrix, for :math:`M` different values of the
radius and :math:`N` different values of :math:`\theta`). For each parameter
combination, :math:`r` and :math:`\theta`, we then find the number of non-zero
pixels in the input image that would fall close to the corresponding line, and
increment the array at position :math:`(r, \theta)` appropriately.
We can think of each non-zero pixel "voting" for potential line candidates. The
local maxima in the resulting histogram indicates the parameters of the most
probably lines. In our example, the maxima occur at 45 and 135 degrees,
corresponding to the normal vector angles of each line.
Another approach is the Progressive Probabilistic Hough Transform [1]_. It is
based on the assumption that using a random subset of voting points give a good
approximation to the actual result, and that lines can be extracted during the
voting process by walking along connected components. This returns the beginning
and end of each line segment, which is useful.
The function `probabilistic_hough` has three parameters: a general threshold
that is applied to the Hough accumulator, a minimum line length and the line gap
that influences line merging. In the example below, we find lines longer than 10
with a gap less than 3 pixels.
References
----------
.. [1] C. Galamhos, J. Matas and J. Kittler,"Progressive probabilistic
Hough transform for line detection", in IEEE Computer Society
Conference on Computer Vision and Pattern Recognition, 1999.
.. [2] Duda, R. O. and P. E. Hart, "Use of the Hough Transformation to
Detect Lines and Curves in Pictures," Comm. ACM, Vol. 15,
pp. 11-15 (January, 1972)
"""
from skimage.transform import (hough_line, hough_line_peaks,
probabilistic_hough_line)
from skimage.feature import canny
from skimage import data
import numpy as np
import matplotlib.pyplot as plt
# Construct test image
image = np.zeros((100, 100))
# Classic straight-line Hough transform
idx = np.arange(25, 75)
image[idx[::-1], idx] = 255
image[idx, idx] = 255
h, theta, d = hough_line(image)
fig, ax = plt.subplots(1, 3, figsize=(8, 4))
ax[0].imshow(image, cmap=plt.cm.gray)
ax[0].set_title('Input image')
ax[0].axis('image')
ax[1].imshow(np.log(1 + h),
extent=[np.rad2deg(theta[-1]), np.rad2deg(theta[0]),
d[-1], d[0]],
cmap=plt.cm.gray, aspect=1/1.5)
ax[1].set_title('Hough transform')
ax[1].set_xlabel('Angles (degrees)')
ax[1].set_ylabel('Distance (pixels)')
ax[1].axis('image')
ax[2].imshow(image, cmap=plt.cm.gray)
rows, cols = image.shape
for _, angle, dist in zip(*hough_line_peaks(h, theta, d)):
y0 = (dist - 0 * np.cos(angle)) / np.sin(angle)
y1 = (dist - cols * np.cos(angle)) / np.sin(angle)
ax[2].plot((0, cols), (y0, y1), '-r')
ax[2].axis((0, cols, rows, 0))
ax[2].set_title('Detected lines')
ax[2].axis('image')
# Line finding, using the Probabilistic Hough Transform
image = data.camera()
edges = canny(image, 2, 1, 25)
lines = probabilistic_hough_line(edges, threshold=10, line_length=5, line_gap=3)
fig2, ax = plt.subplots(1, 3, figsize=(8, 3))
ax[0].imshow(image, cmap=plt.cm.gray)
ax[0].set_title('Input image')
ax[0].axis('image')
ax[1].imshow(edges, cmap=plt.cm.gray)
ax[1].set_title('Canny edges')
ax[1].axis('image')
ax[2].imshow(edges * 0)
for line in lines:
p0, p1 = line
ax[2].plot((p0[0], p1[0]), (p0[1], p1[1]))
ax[2].set_title('Probabilistic Hough')
ax[2].axis('image')
plt.show()
| bsd-3-clause |
EvenStrangest/tensorflow | tensorflow/examples/skflow/iris_custom_decay_dnn.py | 3 | 1749 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from sklearn import datasets, metrics
from sklearn.cross_validation import train_test_split
import tensorflow as tf
iris = datasets.load_iris()
X_train, X_test, y_train, y_test = train_test_split(iris.data,
iris.target,
test_size=0.2,
random_state=42)
# setup exponential decay function
def exp_decay(global_step):
return tf.train.exponential_decay(
learning_rate=0.1, global_step=global_step,
decay_steps=100, decay_rate=0.001)
# use customized decay function in learning_rate
optimizer = tf.train.AdagradOptimizer(learning_rate=exp_decay)
classifier = tf.contrib.learn.DNNClassifier(hidden_units=[10, 20, 10],
n_classes=3,
optimizer=optimizer)
classifier.fit(X_train, y_train, steps=800)
score = metrics.accuracy_score(y_test, classifier.predict(X_test))
| apache-2.0 |
ryandougherty/mwa-capstone | MWA_Tools/build/matplotlib/examples/axes_grid/scatter_hist.py | 8 | 1582 | import numpy as np
import matplotlib.pyplot as plt
# the random data
x = np.random.randn(1000)
y = np.random.randn(1000)
fig = plt.figure(1, figsize=(5.5,5.5))
from mpl_toolkits.axes_grid1 import make_axes_locatable
# the scatter plot:
axScatter = plt.subplot(111)
axScatter.scatter(x, y)
axScatter.set_aspect(1.)
# create new axes on the right and on the top of the current axes
# The first argument of the new_vertical(new_horizontal) method is
# the height (width) of the axes to be created in inches.
divider = make_axes_locatable(axScatter)
axHistx = divider.append_axes("top", 1.2, pad=0.1, sharex=axScatter)
axHisty = divider.append_axes("right", 1.2, pad=0.1, sharey=axScatter)
# make some labels invisible
plt.setp(axHistx.get_xticklabels() + axHisty.get_yticklabels(),
visible=False)
# now determine nice limits by hand:
binwidth = 0.25
xymax = np.max( [np.max(np.fabs(x)), np.max(np.fabs(y))] )
lim = ( int(xymax/binwidth) + 1) * binwidth
bins = np.arange(-lim, lim + binwidth, binwidth)
axHistx.hist(x, bins=bins)
axHisty.hist(y, bins=bins, orientation='horizontal')
# the xaxis of axHistx and yaxis of axHisty are shared with axScatter,
# thus there is no need to manually adjust the xlim and ylim of these
# axis.
#axHistx.axis["bottom"].major_ticklabels.set_visible(False)
for tl in axHistx.get_xticklabels():
tl.set_visible(False)
axHistx.set_yticks([0, 50, 100])
#axHisty.axis["left"].major_ticklabels.set_visible(False)
for tl in axHisty.get_yticklabels():
tl.set_visible(False)
axHisty.set_xticks([0, 50, 100])
plt.draw()
plt.show()
| gpl-2.0 |
dsquareindia/scikit-learn | sklearn/utils/tests/test_seq_dataset.py | 79 | 2497 | # Author: Tom Dupre la Tour <tom.dupre-la-tour@m4x.org>
#
# License: BSD 3 clause
import numpy as np
from numpy.testing import assert_array_equal
import scipy.sparse as sp
from sklearn.utils.seq_dataset import ArrayDataset, CSRDataset
from sklearn.datasets import load_iris
from sklearn.utils.testing import assert_equal
iris = load_iris()
X = iris.data.astype(np.float64)
y = iris.target.astype(np.float64)
X_csr = sp.csr_matrix(X)
sample_weight = np.arange(y.size, dtype=np.float64)
def assert_csr_equal(X, Y):
X.eliminate_zeros()
Y.eliminate_zeros()
assert_equal(X.shape[0], Y.shape[0])
assert_equal(X.shape[1], Y.shape[1])
assert_array_equal(X.data, Y.data)
assert_array_equal(X.indices, Y.indices)
assert_array_equal(X.indptr, Y.indptr)
def test_seq_dataset():
dataset1 = ArrayDataset(X, y, sample_weight, seed=42)
dataset2 = CSRDataset(X_csr.data, X_csr.indptr, X_csr.indices,
y, sample_weight, seed=42)
for dataset in (dataset1, dataset2):
for i in range(5):
# next sample
xi_, yi, swi, idx = dataset._next_py()
xi = sp.csr_matrix((xi_), shape=(1, X.shape[1]))
assert_csr_equal(xi, X_csr[idx])
assert_equal(yi, y[idx])
assert_equal(swi, sample_weight[idx])
# random sample
xi_, yi, swi, idx = dataset._random_py()
xi = sp.csr_matrix((xi_), shape=(1, X.shape[1]))
assert_csr_equal(xi, X_csr[idx])
assert_equal(yi, y[idx])
assert_equal(swi, sample_weight[idx])
def test_seq_dataset_shuffle():
dataset1 = ArrayDataset(X, y, sample_weight, seed=42)
dataset2 = CSRDataset(X_csr.data, X_csr.indptr, X_csr.indices,
y, sample_weight, seed=42)
# not shuffled
for i in range(5):
_, _, _, idx1 = dataset1._next_py()
_, _, _, idx2 = dataset2._next_py()
assert_equal(idx1, i)
assert_equal(idx2, i)
for i in range(5):
_, _, _, idx1 = dataset1._random_py()
_, _, _, idx2 = dataset2._random_py()
assert_equal(idx1, idx2)
seed = 77
dataset1._shuffle_py(seed)
dataset2._shuffle_py(seed)
for i in range(5):
_, _, _, idx1 = dataset1._next_py()
_, _, _, idx2 = dataset2._next_py()
assert_equal(idx1, idx2)
_, _, _, idx1 = dataset1._random_py()
_, _, _, idx2 = dataset2._random_py()
assert_equal(idx1, idx2)
| bsd-3-clause |
r-mart/scikit-learn | sklearn/metrics/cluster/tests/test_bicluster.py | 394 | 1770 | """Testing for bicluster metrics module"""
import numpy as np
from sklearn.utils.testing import assert_equal, assert_almost_equal
from sklearn.metrics.cluster.bicluster import _jaccard
from sklearn.metrics import consensus_score
def test_jaccard():
a1 = np.array([True, True, False, False])
a2 = np.array([True, True, True, True])
a3 = np.array([False, True, True, False])
a4 = np.array([False, False, True, True])
assert_equal(_jaccard(a1, a1, a1, a1), 1)
assert_equal(_jaccard(a1, a1, a2, a2), 0.25)
assert_equal(_jaccard(a1, a1, a3, a3), 1.0 / 7)
assert_equal(_jaccard(a1, a1, a4, a4), 0)
def test_consensus_score():
a = [[True, True, False, False],
[False, False, True, True]]
b = a[::-1]
assert_equal(consensus_score((a, a), (a, a)), 1)
assert_equal(consensus_score((a, a), (b, b)), 1)
assert_equal(consensus_score((a, b), (a, b)), 1)
assert_equal(consensus_score((a, b), (b, a)), 1)
assert_equal(consensus_score((a, a), (b, a)), 0)
assert_equal(consensus_score((a, a), (a, b)), 0)
assert_equal(consensus_score((b, b), (a, b)), 0)
assert_equal(consensus_score((b, b), (b, a)), 0)
def test_consensus_score_issue2445():
''' Different number of biclusters in A and B'''
a_rows = np.array([[True, True, False, False],
[False, False, True, True],
[False, False, False, True]])
a_cols = np.array([[True, True, False, False],
[False, False, True, True],
[False, False, False, True]])
idx = [0, 2]
s = consensus_score((a_rows, a_cols), (a_rows[idx], a_cols[idx]))
# B contains 2 of the 3 biclusters in A, so score should be 2/3
assert_almost_equal(s, 2.0/3.0)
| bsd-3-clause |
plissonf/scikit-learn | examples/svm/plot_weighted_samples.py | 188 | 1943 | """
=====================
SVM: Weighted samples
=====================
Plot decision function of a weighted dataset, where the size of points
is proportional to its weight.
The sample weighting rescales the C parameter, which means that the classifier
puts more emphasis on getting these points right. The effect might often be
subtle.
To emphasize the effect here, we particularly weight outliers, making the
deformation of the decision boundary very visible.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn import svm
def plot_decision_function(classifier, sample_weight, axis, title):
# plot the decision function
xx, yy = np.meshgrid(np.linspace(-4, 5, 500), np.linspace(-4, 5, 500))
Z = classifier.decision_function(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
# plot the line, the points, and the nearest vectors to the plane
axis.contourf(xx, yy, Z, alpha=0.75, cmap=plt.cm.bone)
axis.scatter(X[:, 0], X[:, 1], c=Y, s=100 * sample_weight, alpha=0.9,
cmap=plt.cm.bone)
axis.axis('off')
axis.set_title(title)
# we create 20 points
np.random.seed(0)
X = np.r_[np.random.randn(10, 2) + [1, 1], np.random.randn(10, 2)]
Y = [1] * 10 + [-1] * 10
sample_weight_last_ten = abs(np.random.randn(len(X)))
sample_weight_constant = np.ones(len(X))
# and bigger weights to some outliers
sample_weight_last_ten[15:] *= 5
sample_weight_last_ten[9] *= 15
# for reference, first fit without class weights
# fit the model
clf_weights = svm.SVC()
clf_weights.fit(X, Y, sample_weight=sample_weight_last_ten)
clf_no_weights = svm.SVC()
clf_no_weights.fit(X, Y)
fig, axes = plt.subplots(1, 2, figsize=(14, 6))
plot_decision_function(clf_no_weights, sample_weight_constant, axes[0],
"Constant weights")
plot_decision_function(clf_weights, sample_weight_last_ten, axes[1],
"Modified weights")
plt.show()
| bsd-3-clause |
mainyanim/eyetoai | findings/jsonify.py | 1 | 13398 | import pandas as pd
import random
import json
from openpyxl import load_workbook
import pymongo
# define values check and append to arr
# define probability array
# read excel
df = pd.read_excel("output.xlsx")
wb = load_workbook('output.xlsx')
ws = wb.get_sheet_by_name('Sheet1') # Define worksheet
def get_dic_from_two_lists(keys, values):
try:
return {keys[i]: values[i] for i in range(len(keys))}
except IndexError:
pass
# Define function to normalize arr values
def normalize(items):
problist = [x / sum(items) for x in items]
# def probslist
def concatvals(row, start, stop):
prob_head = list(df)[start:stop]
width = stop - start
col = start
val_arr = []
prob_arr = []
for i in range(width):
value_temp = df.iloc[row - 2, col]
if isinstance(value_temp, float) is False:
value = [x.strip() for x in value_temp.split(',')]
len_val = len(value)
prob_arr += [prob_head[i] for _ in range(len_val)]
val_arr += value[0:len_val]
col += 1
randparameter = (", ".join(random.choices(val_arr, prob_arr, k=1)))
return randparameter
def grab_data(r, s, x, y):
ps = [concatvals(r + s, x, y)]
return ps
def create_rep(arr, row_data, fname, modality): # get findings
params = []
# to_json = []
if fname == 'mass' and modality == 'Mammography':
for i in range(len(arr)):
try:
params += grab_data(row_data, 0, 14, 19)
row_data += 1
except IndexError:
continue
elif fname == 'calcifications' and modality == 'Mammography':
for i in range(len(arr)):
params += grab_data(row_data, 3, 14, 19)
row_data += 1
elif fname == 'assymetry' and modality == 'Mammography':
for i in range(len(arr)):
params += grab_data(row_data, 6, 14, 19)
row_data += 1
elif fname == 'lymphNodes' and modality == 'Mammography':
for i in range(len(arr)):
params += grab_data(row_data, 7, 14, 19)
row_data += 1
elif fname == 'mass' and modality == 'US':
for i in range(len(arr)):
params += grab_data(row_data, 8, 14, 19)
row_data += 1
elif fname == 'calcificationsUs' and modality == 'US':
for i in range(len(arr)):
params += grab_data(row_data, 12, 14, 19)
row_data += 1
elif fname == 'lymphNodes' and modality == 'US':
for i in range(len(arr)):
params += grab_data(row_data, 13, 14, 19)
row_data += 1
elif fname == 'specialCases' and modality == 'US':
for i in range(len(arr)):
params += grab_data(row_data, 14, 14, 19)
row_data += 1
elif fname == 'mass' and modality == 'MRI':
for i in range(len(arr)):
params += grab_data(row_data, 15, 14, 19)
row_data += 1
elif fname == 'mriFeatures' and modality == 'MRI':
for i in range(len(arr)):
params += grab_data(row_data, 18, 14, 19)
row_data += 1
elif fname == 'kineticCurveAssessment' and modality == 'MRI':
for i in range(len(arr)):
params += grab_data(row_data, 19, 14, 19)
row_data += 1
elif fname == 'nonMassEnhancement(NME)' and modality == 'MRI':
for i in range(len(arr)):
params += grab_data(row_data, 20, 14, 19)
row_data += 1
elif fname == 'nonEnhancingFindings' and modality == 'MRI':
for i in range(len(arr)):
params += grab_data(row_data, 22, 14, 19)
row_data += 1
elif fname == 'lymphNodes' and modality == 'MRI':
for i in range(len(arr)):
params += grab_data(row_data, 22, 14, 19)
row_data += 1
elif fname == 'fatContainingLesions' and modality == 'MRI':
for i in range(len(arr)):
params += grab_data(row_data, 24, 14, 19)
row_data += 1
fs = get_dic_from_two_lists(arr, params)
return fs
def get_name(infile):
with open(infile, 'r') as f:
contents_of_file = f.read()
lines = contents_of_file.splitlines()
line_number = random.randrange(0, len(lines))
person_name = lines[line_number]
return person_name
def get_numcond():
names = len(df.Name.unique())
return names
def get_cond_name():
name_arr = df.Name.unique()
n = list(name_arr)
n_arr = []
for i in range(len(name_arr)):
if (isinstance(n[i], float)) is False:
n_arr += [n[i]]
rand_cond_name = random.choice(n_arr)
return rand_cond_name
def camelCase(st):
output = ''.join(x for x in st.title() if x.isalpha())
return output[0].lower() + output[1:]
class AutoTree(dict):
def __missing__(self, key):
value = self[key] = type(self)()
return value
def check_row(cond_name):
from xlrd import open_workbook
book = open_workbook("output.xlsx")
for sheet in book.sheets():
for rowidx in range(sheet.nrows):
row = sheet.row(rowidx)
for colidx, cell in enumerate(row):
if cell.value == cond_name:
return rowidx + 1
def get_birad(row, col, width):
val_head = list(df)[2:9]
val_arr = []
prob_arr = []
for i in range(width):
value = df.iloc[row - 2, col]
val_arr += [val_head[i]]
prob_arr += [value]
col += 1
randp = (", ".join(random.choices(val_arr, prob_arr, k=1)))
return randp
# Create random with parameter of report numbers
def generate_report(infile, items):
for c in range(items):
filename = 'report' + str(c) + '.json'
name = get_cond_name()
row = check_row(name)
# Read BiRads Probabilities into list
# Read BiRads into list
person_name = get_name(infile)
p_id = random.randrange(100)
p_age = random.randrange(25, 65)
num_cond = random.randrange(1, 5)
"create list of values and slice empty entities from list"
rm = df['Relevant modalities'].values.tolist()[0:26]
r = 'Mammography'
#r = random.choice(rm)
# mammo params
report = {}
report['patient_id'] = p_id
report['patient_name'] = person_name
report['relevantModality'] = r
report['conditions_number'] = num_cond
report['conditions'] = []
if r == 'Mammography':
f_temp = df['Relevant findings'].values.tolist()[0:8]
f_list = [x for i, x in enumerate(f_temp) if i == f_temp.index(x)]
f_num_total = len(f_list)
f_rand = random.randrange(1, f_num_total + 1)
iter_params_mass = ['shape', 'margin', 'density']
iter_params_calc = ['typicallyBenign', 'suspiciousMorphology', 'distribution']
iter_params_a = ['assymetry']
iter_params_lymph = ['lymphNodes']
#print(f)
for k in range(num_cond):
#br = get_birad(row, 2, 7)
cond = camelCase(get_cond_name())
f_togo = [random.choice(f_list) for _ in range(f_rand)]
f = camelCase(random.choice(f_togo))
if f == 'mass':
rep_temp = create_rep(iter_params_mass, row, f, r)
report['conditions'] += [{'condition_name': cond,
'condition_details': [
{'relevant_finding': [{'finding_name': f,
'finding_parameters': rep_temp}
]}
]
}]
elif f == 'calcifications':
rep_temp = create_rep(iter_params_calc, row, f, r)
report['conditions'] += [{'condition_name': cond,
'condition_details': [
{'relevant_finding': [{'finding_name': f,
'finding_parameters': rep_temp}
]}
]
}]
json_t = json.dumps(report, indent=2)
print(json_t)
""""
elif f == 'assymetry':
rep_temp = create_rep(iter_params_a, row, f, r)
findings[cond]['relevantFinding'] += [{f:rep_temp}]
pass
elif f == 'lymphNodes':
rep_temp = create_rep(iter_params_lymph, row, f, r)
findings[cond]['relevantFinding'] += [{f:rep_temp}]
pass
with open(filename, 'w') as f:
json.dump(findings, f, indent = 4)
elif r == 'US':
f_temp = df['Relevant findings'].values.tolist()[8:15]
f_list = [x for i, x in enumerate(f_temp) if i == f_temp.index(x)]
f_num_total = len(f_list)
f_rand = random.randrange(1, f_num_total + 1)
us_params_mass = ['shape', 'margin', 'echo', 'posterior']
us_params_calc = ['calcifications']
us_params_l_nodes = ['lymphNodes']
us_params_sp_cases = ['specialCases']
for i in range(num_cond):
br = get_birad(row, 2, 7)
cond = camelCase(get_cond_name())
findings[cond]['biRad'] = br
findings[cond]['relevantFinding'] = []
# f = 'mass'
for k in range(f_rand + 1):
f = camelCase(random.choice(f_list))
if f == 'mass':
rep_temp = create_rep(us_params_mass, row, f, r)
findings[cond]['relevantFinding'] += [{f: rep_temp}]
elif f == 'calcificationsUs':
rep_temp = create_rep(us_params_calc, row, f, r)
findings[cond]['relevantFinding'] += [{f: rep_temp}]
elif f == 'lymphNodes':
rep_temp = create_rep(us_params_l_nodes, row, f, r)
findings[cond]['relevantFinding'] += [{f: rep_temp}]
else:
rep_temp = create_rep(us_params_sp_cases, row, f, r)
findings[cond]['relevantFinding'] += [{f: rep_temp}]
with open(filename, 'w') as f:
json.dump(findings, f, indent = 4)
elif r == 'MRI':
f_temp = df['Relevant findings'].values.tolist()[15:25]
f_list = [x for i, x in enumerate(f_temp) if i == f_temp.index(x)]
f_num_total = len(f_list)
f_rand = random.randrange(1, f_num_total + 1)
mri_params_mass = ['shape', 'margin', 'internalEnhancement']
mri_params_mri_f = ['mriFeatures']
mri_params_kin_c_a = ['kineticCurveAssessment']
mri_params_nme = ['distribution', 'internalEnhancementPatterns']
mri_params_nef = ['nonEnhancingPatterns']
mri_params_l_nodes = ['lymphNodes']
mri_params_fcl = ['fatContainingLesions']
for i in range(num_cond):
br = get_birad(row, 2, 7)
cond = camelCase(get_cond_name())
findings[cond]['biRad'] = br
findings[cond]['relevantFinding'] = []
# f = 'mass'
for k in range(f_rand + 1):
f = camelCase(random.choice(f_list))
if f == 'mass':
rep_temp = create_rep(mri_params_mass, row, f, r)
findings[cond]['relevantFinding'] += [{f: rep_temp}]
elif f == 'mriFeatures':
rep_temp = create_rep(mri_params_mri_f, row, f, r)
findings[cond]['relevantFinding'] += [{f: rep_temp}]
elif f == 'kineticCurveAssessment':
rep_temp = create_rep(mri_params_kin_c_a, row, f, r)
findings[cond]['relevantFinding'] += [{f: rep_temp}]
elif f == 'nonMassEnhancement(NME)':
rep_temp = create_rep(mri_params_nme, row, f, r)
findings[cond]['relevantFinding'] += [{f: rep_temp}]
elif f == 'nonEnhancingFindings':
rep_temp = create_rep(mri_params_nef, row, f, r)
findings[cond]['relevantFinding'] += [{f: rep_temp}]
elif f == 'lymphNodes':
rep_temp = create_rep(mri_params_l_nodes, row, f, r)
findings[cond]['relevantFinding'] += [{f: rep_temp}]
elif f == 'fatContainingLesions':
rep_temp = create_rep(mri_params_fcl, row, f, r)
findings[cond]['relevantFinding'] += [{f: rep_temp}]
with open(filename, 'w') as f:
json.dump(findings, f, indent = 4)
"""
if __name__ == "__main__":
generate_report("first-names.txt", 1)
| mit |
jmargeta/scikit-learn | examples/svm/plot_iris.py | 4 | 1951 | """
==================================================
Plot different SVM classifiers in the iris dataset
==================================================
Comparison of different linear SVM classifiers on the iris dataset. It
will plot the decision surface for four different SVM classifiers.
"""
print(__doc__)
import numpy as np
import pylab as pl
from sklearn import svm, datasets
# import some data to play with
iris = datasets.load_iris()
X = iris.data[:, :2] # we only take the first two features. We could
# avoid this ugly slicing by using a two-dim dataset
Y = iris.target
h = .02 # step size in the mesh
# we create an instance of SVM and fit out data. We do not scale our
# data since we want to plot the support vectors
C = 1.0 # SVM regularization parameter
svc = svm.SVC(kernel='linear', C=C).fit(X, Y)
rbf_svc = svm.SVC(kernel='rbf', gamma=0.7, C=C).fit(X, Y)
poly_svc = svm.SVC(kernel='poly', degree=3, C=C).fit(X, Y)
lin_svc = svm.LinearSVC(C=C).fit(X, Y)
# create a mesh to plot in
x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1
y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, h),
np.arange(y_min, y_max, h))
# title for the plots
titles = ['SVC with linear kernel',
'SVC with RBF kernel',
'SVC with polynomial (degree 3) kernel',
'LinearSVC (linear kernel)']
for i, clf in enumerate((svc, rbf_svc, poly_svc, lin_svc)):
# Plot the decision boundary. For that, we will asign a color to each
# point in the mesh [x_min, m_max]x[y_min, y_max].
pl.subplot(2, 2, i + 1)
Z = clf.predict(np.c_[xx.ravel(), yy.ravel()])
# Put the result into a color plot
Z = Z.reshape(xx.shape)
pl.contourf(xx, yy, Z, cmap=pl.cm.Paired)
pl.axis('off')
# Plot also the training points
pl.scatter(X[:, 0], X[:, 1], c=Y, cmap=pl.cm.Paired)
pl.title(titles[i])
pl.show()
| bsd-3-clause |
dialounke/pylayers | pylayers/antprop/channel.py | 1 | 179616 | # -*t coding:Utf-8 -*-
"""
.. currentmodule:: pylayers.antprop.channel
.. autosummary::
:members:
"""
from __future__ import print_function
import doctest
import pdb
import numpy as np
import numpy.ma as ma
import numpy.linalg as la
import scipy as sp
import scipy.signal as si
import pylab as plt
import struct as stru
import scipy.stats as st
import numpy.fft as fft
from scipy.io import loadmat
import pylayers.util.pyutil as pyu
import pylayers.signal.bsignal as bs
import pylayers.util.geomutil as geu
import pylayers.antprop.antenna as ant
from pylayers.util.project import *
from mpl_toolkits.axes_grid1 import make_axes_locatable
from mpl_toolkits.mplot3d import Axes3D
try:
import h5py
except:
print('h5py is not installed: Ctilde(object cannot be saved)')
class AFPchannel(bs.FUsignal):
""" Angular Frequency Profile channel
Attributes
----------
x : np.array
frequency ,Nf
y : np.array
Amplitude Na,Nf
tx : np.array
tx coordinate (,3)
rx : np.array
rx coordinates (,3)
az : np.array (,Na)
AFP azimutal range in radians
theta : link elevation angle
phi : link (txrx) azimuth angle (with offset)
tau : link delay (ns)
offset : float angle in radians
azimuth offset w.r.t global frame
"""
def __init__(self,x = np.array([]),
y = np.array([]),
tx = np.array([]),
rx = np.array([]),
az = np.array([])
):
bs.FUsignal.__init__(self,x=x,y=y,label='AFP')
if len(self.x)!=0:
self.fcGHz = self.x[len(self.x)/2]
self.tx = tx
self.rx = rx
self.ang_offset = 0
self.refinement = False
self.az = az
self._filename = ''
self.refinement = False
def __repr__(self):
cv = 180/np.pi
s = 'Angular Frequency Profile object \n'
s = s + 'Tx : '+str(self.tx)+'\n'
s = s + 'Rx : '+str(self.rx)+'\n'
return(s)
def loadmes(self,_filename,_filecal,fcGHz=32.6,BW=1.6,win='rect',ang_offset=0.37,ext='txt',dirmeas='meas/Espoo',refinement=False):
""" Load measurement file
Measurement files and the associated back to back calibration files
are placed in the mes directory of the project.
Parameters
----------
_filename : string
data matfile name
_filecal : string
calibration matfile name
fcGHz : float
center frequency
BW : float
measurement bandwidth
win : string
window type in ['rect','hamming','blackman']
ang_offset : float
angle in radian
ext : string
file extension 'txt' | '.mat'
diremeas : string
Notes
-----
This function updates :
+ self.x (frequency GHz)
+ self.y
+ self.az azimuth radians
The calibration file _filecal (.mat file) should be added in the data directory
In practice for Espoo B2B.mat
See Also
--------
pylayers.util.pyutil.getlong
"""
self._filename = _filename
self.BW = BW
self.fcGHz = fcGHz
self.fmin = fcGHz-BW/2.
self.fmax = fcGHz+BW/2.
self.win = win
self.refinement = refinement
# read calibration file (Matlab file) in the same directory as measurments (convention)
filecal = pyu.getlong(_filecal,dirmeas)
U = loadmat(filecal)
cal_trf = U['cal_trf'][:,0]
# read measurement file (.txt or Mat file)
filename = pyu.getlong(_filename,dirmeas)
if ext=='txt':
D = np.loadtxt(filename,skiprows=2)# load Back 2 Back calibration file
amp = D[:,2::2]
ang = D[:,3::2]
else:
D = loadmat(filename)
amp = D['amp']
ang = D['ang']
rotationangle = D['rotationangle'].squeeze()
# load Back 2 Back calibration file
#
# Transfer function reconstruction
#
self.Na = amp.shape[0]
self.Nf = amp.shape[1]
#
# select apodisation window
#
if win=='hamming':
window = np.hamming(self.Nf)
elif win=='blackman':
window = np.blackman(self.Nf)
else:
window = np.ones(self.Nf)
#
# complex transfer function
#
self.x = np.linspace(self.fmin,self.fmax,self.Nf)
self.fcGHz = self.x[len(self.x)/2]
self.y = amp*np.exp(1j*ang*np.pi/180.)*cal_trf[None,:]*window
if self.fcGHz==83.:
fGHz = np.linspace(self.fmin,self.fmax,self.Nf)
t0 = 2 # retarder le signal de 2 ns
self.y = amp*np.exp(1j*ang*np.pi/180.)*np.exp(-2j*np.pi*fGHz*t0)*cal_trf[None,:]*window
#
# if extension is txt file comes from ESPOO measurement
#
# self.az : 5.86 -> 1.94
if ext=='txt':
self.azmes = (360-D[:,0])*np.pi/180.
self.az = self.azmes + ang_offset - 2*np.pi
u = np.where(self.az<0)
self.az[u] = self.az[u] + 2*np.pi
else:
self.azmes = rotationangle*np.pi/180.
self.az = ang_offset - self.azmes
u = np.where(self.az<0)
self.az[u] = self.az[u] + 2*np.pi
def toadp(self,imax=-1,win='rect'):
""" convert afp into adp (frequency->delay)
"""
# x : delay (starting at 0 ns)
# y : ifft axis 1 (frequency)
x = np.linspace(0,(len(self.x)-1)/(self.x[-1]-self.x[0]),len(self.x))
if win=='han':
y = np.fft.ifft(self.y*np.hanning(len(self.x))[None,:],axis=1)
else:
y = np.fft.ifft(self.y,axis=1)
if imax!=-1:
y = y[:,0:imax]
x = x[0:imax]
adp = ADPchannel(x=x,
y=y,
az=self.az,
tx=self.tx,
rx=self.rx,
fcGHz=self.fcGHz,
_filename=self._filename,
refinement=self.refinement,
ang_offset = self.ang_offset)
return adp
class ADPchannel(bs.TUsignal):
""" Angular Delay Profile channel
Attributes
----------
az : array
azimuth in radian
ang_offset :
theta : float
phi : float
tau : float
_filename : string
short filename for saving
"""
def __init__(self,
x=np.array([]),
y=np.array([]),
az=np.array([]),
tx=np.array([]),
rx=np.array([]),
fcGHz=28,
_filename='',
refinement=False,
ang_offset = 0,
):
"""
Parameters
----------
x : np.array
delay
y : np.array
angle x delay
az : np.array
azimuth angle
tx : np.array
tx coordinates
rx : np.array
rx coordinates
_filename :
offset :
"""
bs.TUsignal.__init__(self,x=x,y=y,label='ADP')
self.az = az
self.tx = tx
self.rx = rx
self._filename = _filename
self.fcGHz = fcGHz
self.ang_offset = ang_offset
v = self.tx - self.rx
distLOS = np.linalg.norm(v)
self.taulos_geo = distLOS/0.3
self.anglos_geo = np.arctan2(v[1],v[0])*180/np.pi
LFS = -(32.4+20*np.log10(fcGHz)+20*np.log10(distLOS))
self.alphalos_geo = 10**(LFS/10.)
if self.anglos_geo<0:
self.anglos_geo = 2*np.pi+self.anglos_geo
alphapeak,taupeak,angpeak = self.peak(refinement=refinement)
self.angpeak_est = angpeak*180/np.pi
self.taupeak_est = taupeak
self.alphapeak_est = alphapeak
self._filename = _filename
def __repr__(self):
cv = 180/np.pi
s = 'Angular Delay Profile object \n'
s = s + 'Angular offset (degrees) : '+str(cv*self.ang_offset)+'\n'
s = s + 'agmin : '+str(self.az[0]*cv)+' agmax : '+str(self.az[-1]*cv)+'\n'
s = s + 'alpha (geo): '+ str(self.alphalos_geo)+' (est): '+str(self.alphapeak_est**2)+ ' GdB : '+str(10*np.log10(self.alphapeak_est**2/self.alphalos_geo))+' \n'
s = s + 'tau (geo): '+ str(self.taulos_geo)+' (est): '+str(self.taupeak_est)+' \n'
s = s + 'ang (geo): '+ str(self.anglos_geo)+' (est): '+str(self.angpeak_est)+'\n'
return(s)
def peak(self,refinement=False):
""" evaluate peak of PADP
Parameters
----------
refinment : boolean
provide a refined version of angular estimation
Returns
-------
alphapeak, taupeak , phipeak
"""
alphapeak = np.max(np.abs(self.y))
iphi,itau = np.where(np.abs(self.y)==alphapeak)
taupeak = self.x[itau][0]
if refinement:
pr = np.abs(self.y)[iphi-1:iphi+2,itau].squeeze()
azr = self.az[iphi-1:iphi+2]
Id = np.sum(pr)
In = np.sum(pr*azr)
phipeak = In/Id
else:
phipeak = self.az[iphi]
return alphapeak,taupeak,phipeak
def cut(self,imin=0,imax=1000):
self.y = self.y[:,imin:imax]
self.x = self.x[imin:imax]
def correlate(self,adp,thresholddB=-105):
""" correlate ADP with an other ADP
Parameters
----------
adp : ADPchannel
Returns
-------
rhoE : energy ratio of padp Eadp/Eself
rhoEc : energy ratio of centered padp Ecadp/Ecself
rho : normalized intercorrelation : <self-mean(self),adp-mean(adp)>/Eself
rhon : intercorrelation of normalized padp <self_normalized,adp_normalized>
Notes
-----
This can be used to compare a measured PADP with a Ray tracing PADP
"""
#import ipdb
#ipdb.set_trace()
#
# apply the min dB level thresholding
#
tmp_self = np.abs(self.y)
tmp_adp = np.abs(adp.y)
u1 = np.where(20*np.log10(tmp_self)>thresholddB)
u2 = np.where(20*np.log10(tmp_adp)>thresholddB)
padp_self = np.zeros(tmp_self.shape)
padp_adp = np.zeros(tmp_adp.shape)
padp_self[u1] = tmp_self[u1]
padp_adp[u2] = tmp_adp[u2]
padpc_self = padp_self-np.mean(padp_self)
padpc_adp = padp_adp-np.mean(padp_adp)
Eself = np.max(si.correlate2d(padp_self,padp_self,mode='same'))
Ecself = np.max(si.correlate2d(padpc_self,padpc_self,mode='same'))
Eadp = np.max(si.correlate2d(padp_adp,padp_adp,mode='same'))
Ecadp = np.max(si.correlate2d(padpc_adp,padpc_adp,mode='same'))
#Eself = np.sum(padp_self*padp_self)
#Ecself = np.sum(padpc_self*padpc_self)
#Eadp = np.sum(padp_adp*padp_adp)
#Ecadp = np.sum(padpc_adp*padpc_adp)
padpcn_self = padpc_self/np.sqrt(Ecself)
padpcn_adp = padpc_adp/np.sqrt(Ecadp)
rhoE = Eadp/Eself
rhoEc = Ecadp/Ecself
#rho = np.sum(padpc_self*padpc_adp)/Eself
#rhoc = np.sum(padpc_self*padpc_adp)/Ecself
#rhon = np.sum(padpcn_self*padpcn_adp)
rho = np.max(si.correlate2d(padpc_self,padpc_adp,mode='same'))/Eself
rhoc = np.max(si.correlate2d(padpc_self,padpc_adp,mode='same'))/Ecself
rhon = np.max(si.correlate2d(padpcn_self,padpcn_adp,mode='same'))
return rhoE,rhoEc,rho,rhoc,rhon
def svd(self):
""" perform singular value decomposition of the PADP
"""
[U,S,V]=la.svd(self.y)
self.d = {}
for k,sv in enumerate(S):
b = sv*np.dot(U[:,k][:,None],V[k,:][None,:])
self.d[k] = {'sv':sv,'b':b}
def imshow(self,**kwargs):
""" show Angular Delay Profile
"""
defaults = {'origin':'lower',
'vmax' : -65,
'vmin' : -120,
'interpolation' : 'nearest',
'aspect' : 'auto',
'imin':0,
'imax':-1,
'dB' : True,
'fonts':18,
'colorbar':False,
'fig' :[],
'ax' : [],
'label':'',
'ang_offset':450,
'orientation':-1,
'alpha':1,
'blos':True
}
for k in defaults:
if k not in kwargs:
kwargs[k] = defaults[k]
imin = kwargs.pop('imin')
imax = kwargs.pop('imax')
dB = kwargs.pop('dB')
fig = kwargs.pop('fig')
ax = kwargs.pop('ax')
fonts = kwargs.pop('fonts')
label = kwargs.pop('label')
blos = kwargs.pop('blos')
orientation = kwargs.pop('orientation')
interpolation = kwargs.pop('interpolation')
aspect = kwargs.pop('aspect')
bcolorbar = kwargs.pop('colorbar')
ang_offset = kwargs.pop('ang_offset')
if fig==[]:
fig = plt.figure()
if ax==[]:
ax = fig.add_subplot(111)
rd2deg = 180/np.pi
#extent = (self.az[-1]*rd2deg+agoffset,
# self.az[0]*rd2deg+agoffset,
# self.x[imin],self.x[imax])
#extent = (self.az[0]*rd2deg,
# self.az[-1]*rd2deg,
# self.x[imin],self.x[imax])
extent = (0,360,self.x[imin],self.x[imax])
padp = np.abs(self.y)[:,imin:imax].T
if dB:
padp = 20*np.log10(padp)
im = ax.imshow(padp,extent=extent,interpolation=interpolation,aspect=aspect,**kwargs)
plt.axis('equal')
if blos:
a1 = ang_offset + orientation*self.angpeak_est
a2 = ang_offset + orientation*self.anglos_geo
ax.scatter(a1,self.taupeak_est,marker='*',s=70,color='k')
ax.scatter(a2,self.taulos_geo,marker='D',s=70,color='k')
if bcolorbar:
cbar = plt.colorbar(im)
if dB:
cbar.set_label(label+' dB',fontsize=fonts)
else:
cbar.set_label(label+' linear',fontsize=fonts)
for t in cbar.ax.get_yticklabels():
t.set_fontsize(fonts)
ax.set_ylabel('Propagation delay [ns]',fontsize=fonts)
ax.set_xlabel('Angle[deg]',fontsize=fonts)
for tick in ax.yaxis.get_major_ticks():
tick.label.set_fontsize(fonts)
return fig,ax
# def imshow_mamadou(self,**kwargs):
# """ show Angular Delay Profile
# """
# defaults = {'origin':'lower',
# 'vmax' : -65,
# 'vmin' : -120,
# 'interpolation' : 'nearest',
# 'imin':0,
# 'imax':-1,
# 'dB' : True,
# 'fontsize':18,
# 'fig' :[],
# 'ax' : [],
# 'label':'',
# 'ang_offset':450,
# 'orientation':-1,
# 'alpha':1,
# 'blos':True
# }
# for k in defaults:
# if k not in kwargs:
# kwargs[k] = defaults[k]
# imin = kwargs.pop('imin')
# imax = kwargs.pop('imax')
# dB = kwargs.pop('dB')
# fig = kwargs.pop('fig')
# ax = kwargs.pop('ax')
# fontsize = kwargs.pop('fontsize')
# label = kwargs.pop('label')
# blos = kwargs.pop('blos')
# orientation = kwargs.pop('orientation')
# ang_offset = kwargs.pop('ang_offset')
# if fig==[]:
# fig = plt.figure()
# if ax==[]:
# ax = fig.add_subplot(111)
# rd2deg = 180/np.pi
# #extent = (self.az[-1]*rd2deg+agoffset,
# # self.az[0]*rd2deg+agoffset,
# # self.x[imin],self.x[imax])
# #extent = (self.az[0]*rd2deg,
# # self.az[-1]*rd2deg,
# # self.x[imin],self.x[imax])
# #extent = (0,360,self.x[imin],self.x[imax])
# # import ipdb
# # ipdb.set_trace()
# # extent = (0,360,self.x[imin],self.x[imax])
# extent = (45,270,self.x[imin],self.x[imax])
# padp = np.abs(self.y)[:,imin:imax].T
# if dB:
# padp = 20*np.log10(padp)
# im = ax.imshow(padp,extent=extent,**kwargs)
# if blos:
# a1 = ang_offset + orientation*self.angpeak_est
# a2 = ang_offset + orientation*self.anglos_geo
# ax.scatter(a1,self.taupeak_est,marker='*',s=70,color='k')
# ax.scatter(a2,self.taulos_geo,marker='D',s=70,color='k')
# plt.axis('auto')
# cbar = plt.colorbar(im)
# if dB:
# cbar.set_label(label+' dB',fontsize=fontsize)
# else:
# cbar.set_label(label+' linear',fontsize=fontsize)
# for t in cbar.ax.get_yticklabels():
# t.set_fontsize(fontsize)
# plt.ylabel('Propagation delay [ns]',fontsize=fontsize)
# plt.xlabel('Angle[deg]',fontsize=fontsize)
# #ax.title('PADP',fontsize=fonts)
# plt.xticks(fontsize=fontsize)
# plt.yticks(fontsize=fontsize)
# =======
# if bcolorbar:
# cbar = plt.colorbar(im)
# if dB:
# cbar.set_label(label+' dB',fontsize=fonts)
# else:
# cbar.set_label(label+' linear',fontsize=fonts)
# for t in cbar.ax.get_yticklabels():
# t.set_fontsize(fonts)
# ax.set_ylabel('Propagation delay [ns]',fontsize=fonts)
# ax.set_xlabel('Angle[deg]',fontsize=fonts)
# #ax.title('PADP',fontsize=fonts)
# for tick in ax.xaxis.get_major_ticks():
# tick.label.set_fontsize(fonts)
# for tick in ax.yaxis.get_major_ticks():
# tick.label.set_fontsize(fonts)
# >>>>>>> af9c9afcb43c82b16b8147df1b4de05c4e8766c7
# return fig,ax
def clean(self,threshold_dB=20):
""" clean ADP
Parameters
-----------
threshold_dB : float
Notes
-----
All values below Max -threshold are set to zero
"""
Na = self.y.shape[0]
P = np.real(self.y*np.conj(self.y))
MaxdB = 10*np.log10(np.max(P))
u = np.where(10*np.log10(P) < MaxdB-threshold_dB)
self.y[u] = 0+0j
def pap(self,
fcGHz=28,
fontsize=18,
figsize=(10,10),
Gmax=22.68,
Gmin=19,
threshdB=-95,
label='',
color='k',
fig=[],
ax=[],
xlabel=True,
ylabel=True,
legend=True):
""" Calculate Power Angular Profile
Parameters
----------
fcGHz : float
fontsize : int
figsize : tuple
fig :
ax :
xlabel : boolean
ylabel : boolean
legen : boolean
Returns
-------
fig,ax
"""
Na = self.y.shape[0]
# integration over frequency
# adp (angle)
Gtyp = (Gmax+Gmin)/2.
Py = np.real(self.y*np.conj(self.y))
pdp0 = np.sum(Py,axis=0)
pdp0dB = 10*np.log10(pdp0)
u = pdp0dB>threshdB
adp = np.sum(Py[:,u],axis=1)
#mPya = np.median(Py,axis=0)
#mPya = np.mean(Py,axis=0)
#sPy = Py-mPya[None,:]
#adp = np.sum(Pyz,axis=1)
u = np.where(adp==max(adp))[0]
if fig==[]:
fig = plt.figure(figsize=figsize)
else:
fig = fig
if ax == []:
ax = fig.add_subplot(111)
else:
ax = ax
#ax.plot(self.az*180/np.pi,10*np.log10(adp),color='r',label=r'$10\log_{10}(\sum_{\tau} PADP(\phi,\tau))$',linewidth=1.5)
ag = np.linspace(45,260,len(adp))
ax.plot(ag, #360self.az*180/np.pi,
10*np.log10(adp)-Gtyp,
color=color,
label=label,
linewidth=1.5)
#ax.vlines(self.tau,ymin=-130,ymax=-40,linestyles='dashed',color='blue')
ax.hlines(-120,xmin=45,xmax=260,linestyles='dashed',color='black')
#ax.set_ylim(-80,-60)
if xlabel:
ax.set_xlabel('Angle [deg]',fontsize=fontsize)
if ylabel:
ax.set_ylabel('level (dB)',fontsize=fontsize)
#ax.set_title(self._filename,fontsize=fontsize)
if legend:
plt.legend(loc='best')
return fig,ax,adp
def pap_mamadou(self,fcGHz=28,fontsize=25,color='r',figsize=(10,10),fig=[],ax=[],label='',xlabel=True,ylabel=True,legend=True):
""" Calculate Power Angular Profile
Parameters
----------
fcGHz : float
"""
# import ipdb
# ipdb.set_trace()
# adp = np.real(np.sum(self.y*np.conj(self.y),axis=1))
pap = np.mean(self.y*np.conj(self.y),axis=1)
if fig==[]:
fig = plt.figure(figsize=figsize)
else:
fig = fig
if ax == []:
ax = fig.add_subplot(111)
else:
ax = ax
# import ipdb
# ipdb.set_trace()
az = np.linspace(0,360,73,dtype=int)
#az = np.linspace(45,270,46,dtype=int)
#ax.plot(az,10*np.log10(np.abs(pap))-10*np.log10(np.abs(pap)).max(),label=label,color=color,linewidth=1.5,fontsize=fontsize)
ax.plot(az,10*np.log10(np.abs(pap))-10*np.log10(np.abs(pap)).max(),label=label,color=color,linewidth=1.5)
#ax.plot(self.az*180/np.pi,10*np.log10(adp),color='r',label=r'$10\log_{10}(\sum_{\tau} PADP(\phi,\tau))$',linewidth=1.5)
#ax.vlines(self.tau,ymin=-130,ymax=-40,linestyles='dashed',color='blue')
ll = ax.get_xticklabels()+ax.get_yticklabels()
for l in ll:
l.set_fontsize(fontsize)
if xlabel:
ax.set_xlabel('Angle [degrees]',fontsize=fontsize)
if ylabel:
ax.set_ylabel('Normalized level [dB]',fontsize=fontsize)
ax.grid('on')
ax.set_title(self._filename)
if legend:
plt.legend(loc='best',fontsize=fontsize)
return fig,ax,pap # return fig,ax,pap,self.az*180/np.pi
def app(self,**kwargs):
""" Calculate Angular Power Profile
"""
Na = self.y.shape[0]
app = np.real(np.sum(self.y*np.conj(self.y),axis=1))
def pltcir(self,phideg,Gain=21):
phi = phideg*np.pi/180.
dang = np.abs(self.az - phi)
u = np.where(dang==np.min(dang))[0][0]
fig = plt.figure()
ax = fig.add_subplot(111)
FS = -(32.4+20*np.log10(self.x*0.3)+20*np.log10(self.fcGHz))
plt.semilogx(self.x,20*np.log10(np.abs(self.y[u,:]))-Gain)
plt.semilogx(self.x,FS,'k',linewidth=2)
plt.show()
return fig,ax,u
def pdp_v(self,**kwargs):
""" Calculate and plot Power Delay Profile
Parameters
----------
fcGHz : float
"""
defaults = { 'figsize':(10,10),
'fontsize':18,
'fig' : [],
'ax': [],
'xlabel': True,
'ylabel': True,
'legend': True,
'losdelay': True,
'freespace': True,
'desembeded': False,
'noisefloor': False,
'typic':True,
'semilogx':True,
'bcir':False,
'raw': False,
'Gmax':22.68,
'Gmin':19,
'threshdB':75,
'imax':-1,
'Tilt':10,
'HPBW':10,
'dphi':5,
'marker':'*',
'color':'k',
'label':'',
'linewidth':1
}
for k in defaults:
if k not in kwargs:
kwargs[k]=defaults[k]
Gmax = kwargs.pop('Gmax')
Gmin = kwargs.pop('Gmin')
imax = kwargs.pop('imax')
threshdB = kwargs.pop('threshdB')
Gtyp = (Gmax+Gmin)/2.
# get peak value of the PADP
alpha,tau,phi = self.peak()
Na = self.y.shape[0]
# pdp : power delay profie
Py = np.real(self.y*np.conj(self.y))
pap0 = np.sum(Py,axis=1)
pap0dB = 10*np.log10(pap0)
u = pap0dB>np.percentile(pap0dB,threshdB)
pdp = np.sum(Py[u,:],axis=0)
pdp = pdp[0:imax]
x = self.x[0:imax]
# spdp : square root of power delay profie
spdp = TUchannel(x=x,y=np.sqrt(pdp))
u = np.where(pdp==max(pdp))[0]
FS = -(32.4+20*np.log10(x*0.3)+20*np.log10(self.fcGHz))
AttmaxdB = 20*np.log10(alpha)
#Gmax = AttmaxdB-FS[u]
#Gmax_r = np.round(Gmax[0]*100)/100.
#
# The -3dB is specific to the Aalto measurement and desembeding (1/2)
#
pdp_min = 10*np.log10(pdp)-Gmax-1
pdp_max = 10*np.log10(pdp)-Gmin-1
pdp_typ = 10*np.log10(pdp)-Gtyp-1
uflashing = np.where(pdp_typ>FS)
umin = np.where(pdp_min>-118)
pdp_min_thr = pdp_min[umin]
umax = np.where(pdp_max>-118)
pdp_max_thr = pdp_max[umax]
PL = -10*np.log10(np.sum(10**(pdp_min_thr/10.)))
if kwargs['fig']==[]:
fig = plt.figure(figsize=kwargs['figsize'])
else:
fig = kwargs['fig']
if kwargs['ax'] == []:
ax = fig.add_subplot(111)
else:
ax = kwargs['ax']
if kwargs['semilogx']:
if kwargs['raw']:
ax.semilogy(10*np.log10(pdp),x,color='r',label=r'$10\log_{10}(\sum_{\phi} PADP(\phi))$',linewidth=0.5)
#ax.semilogx(np.array([tau]),np.array([AttmaxdB]),color='k')
if kwargs['desembeded']:
ax.semilogy(pdp_min,x,label=r'$10\log_{10}(\sum_{\phi} PADP(\phi)) - $'+str(Gmax),color='green')
ax.semilogy(pdp_max,x,label=r'$10\log_{10}(\sum_{\phi} PADP(\phi)) - $'+str(Gmin),color='red')
if kwargs['typic']:
ax.semilogy(pdp_typ,x,label=kwargs['label'],color=kwargs['color'],linewidth=kwargs['linewidth'])
ax.semilogy(pdp_typ[uflashing],x[uflashing],label=kwargs['label'],color='red',linewidth=kwargs['linewidth'])
if kwargs['freespace']:
if kwargs['typic']:
ax.semilogy(FS,x,color=kwargs['color'],linewidth=kwargs['linewidth']+1,label='Free Space path profile')
else:
ax.semilogy(FS,x,color='k',linewidth=2,label='Free Space path profile')
if kwargs['losdelay']:
ax.hlines(self.taupeak_est,xmin=-130,xmax=-40,linestyles='dashed',color='blue')
ax.hlines(self.taulos_geo,xmin=-130,xmax=-40,linestyles='dashed',color='red')
if kwargs['noisefloor']:
ax.vlines(-130,ymin=0,ymax=x[-1],linestyles='dashed',color='black')
#ax.set_xlim(10,1000)
if kwargs['xlabel']:
ax.set_ylabel('Delay (ns) log scale',fontsize=kwargs['fontsize'])
if kwargs['bcir']:
phi = self.angpeak_est*np.pi/180.
dang = np.abs(self.az - phi)
u = np.where(dang==np.min(dang))[0][0]
ax.semilogx(20*np.log10(np.abs(self.y[u,:]))-Gmax,x,color='r')
ax.semilogx(20*np.log10(np.abs(self.y[u,:]))-Gmin,x,color='g')
else:
if kwargs['raw']:
ax.plot(10*np.log10(pdp),x,color='r',label=r'$10\log_{10}(\sum_{\phi} PADP(\phi))$',linewidth=0.5)
ax.plot(np.array([AttmaxdB]),np.array([tau]),color='k')
if kwargs['desembeded']:
ax.plot(pdp_min,x,label=r'$10\log_{10}(\sum_{\phi} PADP(\phi)) - $'+str(Gmax))
ax.plot(pdp_max,x,label=r'$10\log_{10}(\sum_{\phi} PADP(\phi)) - $'+str(Gmin))
if kwargs['typic']:
ax.plot(pdp_typ,x,label=kwargs['label'],color=kwargs['color'])
ax.scatter(pdp_typ[uflashing],x[uflashing],s=80,c='red')
if kwargs['freespace']:
if kwargs['typic']:
ax.plot(FS,x,color=kwargs['color'],linewidth=kwargs['linewidth']+1,label='Free Space path profile')
ax.plot(FS-(Gmax-Gmin)/2,x,color='blue',linewidth=0.5,label='Free Space path profile')
ax.plot(FS+(Gmax-Gmin)/2,x,color='blue',linewidth=0.5,label='Free Space path profile')
else:
ax.plot(FS,x,color='k',linewidth=2,label='Free Space path profile')
if kwargs['losdelay']:
ax.hlines(self.taupeak_est,xmin=-130,xmax=-40,linestyles='dashed',color='blue')
ax.hlines(self.taulos_geo,xmin=-130,xmax=-40,linestyles='dashed',color='red')
if kwargs['noisefloor']:
ax.vlines(-130,ymin=0,ymax=x[-1],linestyles='dashed',color='red')
#ax.set_xlim(0,1000)
if kwargs['xlabel']:
ax.set_ylabel('Delay (ns)',fontsize=kwargs['fontsize'])
if kwargs['bcir']:
phi = self.angpeak_est*np.pi/180.
dang = np.abs(self.az - phi)
u = np.where(dang==np.min(dang))[0][0]
ax.plot(20*np.log10(np.abs(self.y[u,:]))-Gmax,x,'r')
ax.plot(20*np.log10(np.abs(self.y[u,:]))-Gmin,x,'g')
if kwargs['ylabel']:
ax.set_xlabel('level (dB)',fontsize=kwargs['fontsize'])
#ax.set_title(self._filename+' '+str(PL))
if kwargs['legend']:
plt.legend(loc='best')
ax.set_ylim(0,x[-1])
return fig,ax
def pdp_first(self,**kwargs):
""" Calculate and plot Power Delay Profile
Parameters
----------
fcGHz : float
"""
defaults = { 'figsize':(10,10),
'fontsize':18,
'fig' : [],
'ax': [],
'xlabel': True,
'ylabel': True,
'legend': True,
'losdelay': True,
'freespace': True,
'desembeded': False,
'typic':True,
'semilogx':True,
'bcir':False,
'raw': False,
'Gmax':22.68,
'Gmin':19,
'Tilt':10,
'HPBW':10,
'dphi':5,
'marker':'*',
'color':'k',
'label':'',
'linewidth':1
}
for k in defaults:
if k not in kwargs:
kwargs[k]=defaults[k]
Gmax = kwargs.pop('Gmax')
Gmin = kwargs.pop('Gmin')
Gtyp = (Gmax+Gmin)/2.
# get peak value of the PADP
alpha,tau,phi = self.peak()
Na = self.y.shape[0]
# pdp : power delay profie
pdp = np.real(np.sum(self.y*np.conj(self.y),axis=0))
# spdp : square root of power delay profie
spdp = TUchannel(x=self.x,y=np.sqrt(pdp))
u = np.where(pdp==max(pdp))[0]
FS = -(32.4+20*np.log10(self.x*0.3)+20*np.log10(self.fcGHz))
AttmaxdB = 20*np.log10(alpha)
#Gmax = AttmaxdB-FS[u]
#Gmax_r = np.round(Gmax[0]*100)/100.
#
# The -3dB is specific to the Aalto measurement and desembeding (1/2)
#
pdp_min = 10*np.log10(pdp)-Gmax-1
pdp_max = 10*np.log10(pdp)-Gmin-1
pdp_typ = 10*np.log10(pdp)-Gtyp-1
umin = np.where(pdp_min>-118)
pdp_min_thr = pdp_min[umin]
umax = np.where(pdp_max>-118)
pdp_max_thr = pdp_max[umax]
PL = -10*np.log10(np.sum(10**(pdp_min_thr/10.)))
if kwargs['fig']==[]:
fig = plt.figure(figsize=kwargs['figsize'])
else:
fig = kwargs['fig']
if kwargs['ax'] == []:
ax = fig.add_subplot(111)
else:
ax = kwargs['ax']
if kwargs['semilogx']:
if kwargs['raw']:
ax.semilogx(self.x,10*np.log10(pdp),color='r',marker=kwargs['marker'],label=r'$10\log_{10}(\sum_{\phi} PADP(\phi))$',linewidth=0.5)
#ax.semilogx(np.array([tau]),np.array([AttmaxdB]),color='k')
if kwargs['desembeded']:
ax.semilogx(self.x,pdp_min,label=r'$10\log_{10}(\sum_{\phi} PADP(\phi)) - $'+str(Gmax),color='green')
ax.semilogx(self.x,pdp_max,label=r'$10\log_{10}(\sum_{\phi} PADP(\phi)) - $'+str(Gmin),color='red')
if kwargs['typic']:
ax.semilogx(self.x,pdp_typ,label=kwargs['label'],color=kwargs['color'],linewidth=kwargs['linewidth'])
if kwargs['freespace']:
if kwargs['typic']:
ax.semilogx(self.x,FS,color=kwargs['color'],linewidth=kwargs['linewidth']+3,label='FSPL @ '+str(self.fcGHz)+' GHz')
else:
ax.semilogx(self.x,FS,color='k',linewidth=2,label='Free Space path profile')
if kwargs['losdelay']:
ax.vlines(self.taupeak_est,ymin=-130,ymax=-40,linestyles='dashed',color='blue')
ax.vlines(self.taulos_geo,ymin=-130,ymax=-40,linestyles='dashed',color='red')
#ax.set_xlim(10,1000)
if kwargs['xlabel']:
ax.set_xlabel('Delay [ns] log scale',fontsize=kwargs['fontsize'])
if kwargs['bcir']:
phi = self.angpeak_est*np.pi/180.
dang = np.abs(self.az - phi)
u = np.where(dang==np.min(dang))[0][0]
ax.semilogx(self.x,20*np.log10(np.abs(self.y[u,:]))-Gmax,color='r')
ax.semilogx(self.x,20*np.log10(np.abs(self.y[u,:]))-Gmin,color='g')
else:
if kwargs['raw']:
ax.plot(self.x,10*np.log10(pdp),color='r',label=r'$10\log_{10}(\sum_{\phi} PADP(\phi))$',linewidth=0.5)
ax.plot(np.array([tau]),np.array([AttmaxdB]),color='k')
if kwargs['desembeded']:
ax.plot(self.x,pdp_min,label=r'$10\log_{10}(\sum_{\phi} PADP(\phi)) - $'+str(Gmax))
ax.plot(self.x,pdp_max,label=r'$10\log_{10}(\sum_{\phi} PADP(\phi)) - $'+str(Gmin))
if kwargs['typic']:
ax.plot(self.x,pdp_typ,label=kwargs['label'],color=kwargs['color'])
if kwargs['freespace']:
if kwargs['typic']:
ax.plot(self.x,FS,color=kwargs['color'],linewidth=kwargs['linewidth']+1,label='FSPL')
else:
ax.plot(self.x,FS,color='k',linewidth=2,label='Free Space path profile')
if kwargs['losdelay']:
ax.vlines(self.taupeak_est,ymin=-130,ymax=-40,linestyles='dashed',color='blue')
ax.vlines(self.taulos_geo,ymin=-130,ymax=-40,linestyles='dashed',color='red')
#ax.set_xlim(0,1000)
if kwargs['xlabel']:
ax.set_xlabel('Delay (ns)',fontsize=kwargs['fontsize'])
if kwargs['bcir']:
phi = self.angpeak_est*np.pi/180.
dang = np.abs(self.az - phi)
u = np.where(dang==np.min(dang))[0][0]
ax.plot(self.x,20*np.log10(np.abs(self.y[u,:]))-Gmax,'r')
ax.plot(self.x,20*np.log10(np.abs(self.y[u,:]))-Gmin,'g')
ll = ax.get_xticklabels()+ax.get_yticklabels()
for l in ll:
l.set_fontsize(kwargs['fontsize'])
if kwargs['ylabel']:
ax.set_ylabel('level (dB)',fontsize=kwargs['fontsize'])
ax.set_title(self._filename+' '+str(PL))
if kwargs['legend']:
plt.legend(loc='best',fontsize=30)
return fig,ax,pdp
def pdp(self,**kwargs):
""" Calculate and plot Power Delay Profile
Parameters
----------
fcGHz : float
figsize':(1010)
fontsize':18
fig' : []
ax': []
xlabel': True
ylabel': True
legend': True
losdelay': True
freespace': True
desembeded': False
typic':True
semilogx':True
bcir':False
raw': False
Gmax':22.68
Gmin':19
Tilt':10
HPBW':10
"""
defaults = { 'figsize':(10,10),
'fontsize':18,
'fig' : [],
'ax': [],
'xlabel': True,
'ylabel': True,
'legend': True,
'losdelay': True,
'freespace': True,
'desembeded': False,
'typic':True,
'semilogx':True,
'bcir':False,
'raw': False,
'bplot':True,
'Gmax':22.68,
'Gmin':19,
'Tilt':10,
'HPBW':10,
'dphi':5,
'marker':'*',
'color':'k',
'label':'',
'linewidth':1
}
for k in defaults:
if k not in kwargs:
kwargs[k] = defaults[k]
# get antenna gain extremum
# typical value is chosen as the mean value
Gmax = kwargs.pop('Gmax')
Gmin = kwargs.pop('Gmin')
Gtyp = (Gmax+Gmin)/2.
# get peak value of the PADP
# it is assume that this retreave the LOS component
alpha, tau, phi = self.peak()
# Na : number of angular steps
Na = self.y.shape[0]
# pdp : power delay profile
pdp = np.real(np.sum(self.y*np.conj(self.y),axis=0))
# delay index of pdp maximum
u = np.where(pdp==max(pdp))[0]
# omnidirectional free space path loss
FS = -(32.4+20*np.log10(self.x*0.3)+20*np.log10(self.fcGHz))
AttmaxdB = 20*np.log10(alpha)
#Gmax = AttmaxdB-FS[u]
#Gmax_r = np.round(Gmax[0]*100)/100.
#
# The -3dB is specific to the Aalto measurement and desembeding (1/2)
#
pdpdB = 10*np.log10(pdp)
pdp_min = pdpdB-Gmax-1
pdp_max = pdpdB-Gmin-1
pdp_typ = pdpdB-Gtyp-1
umin = np.where(pdp_min>-118)
pdp_min_thr = pdp_min[umin]
umax = np.where(pdp_max>-118)
pdp_max_thr = pdp_max[umax]
PL = -10*np.log10(np.sum(10**(pdp_min_thr/10.)))
if kwargs['bplot']:
if kwargs['fig']==[]:
fig = plt.figure(figsize=kwargs['figsize'])
else:
fig = kwargs['fig']
if kwargs['ax'] == []:
ax = fig.add_subplot(111)
else:
ax = kwargs['ax']
if kwargs['semilogx']:
if kwargs['raw']:
ax.semilogx(self.x,10*np.log10(pdp),color='r',label=r'$10\log_{10}(\sum_{\phi} PADP(\phi))$',linewidth=0.5)
#ax.semilogx(np.array([tau]),np.array([AttmaxdB]),color='k')
if kwargs['desembeded']:
ax.semilogx(self.x,pdp_min,label=r'$10\log_{10}(\sum_{\phi} PADP(\phi)) - $'+str(Gmax),color='green')
ax.semilogx(self.x,pdp_max,label=r'$10\log_{10}(\sum_{\phi} PADP(\phi)) - $'+str(Gmin),color='red')
if kwargs['typic']:
ax.semilogx(self.x,pdp_typ,label=kwargs['label'],color=kwargs['color'],linewidth=kwargs['linewidth'])
if kwargs['freespace']:
if kwargs['typic']:
ax.semilogx(self.x,FS,color=kwargs['color'],linewidth=kwargs['linewidth']+1,label='Free Space path profile')
else:
ax.semilogx(self.x,FS,color='k',linewidth=2,label='Free Space path profile')
if kwargs['losdelay']:
ax.vlines(self.taupeak_est,ymin=-130,ymax=-40,linestyles='dashed',color='blue')
ax.vlines(self.taulos_geo,ymin=-130,ymax=-40,linestyles='dashed',color='red')
#ax.set_xlim(10,1000)
if kwargs['xlabel']:
ax.set_xlabel('Delay (ns) log scale',fontsize=kwargs['fontsize'])
if kwargs['bcir']:
phi = self.angpeak_est*np.pi/180.
dang = np.abs(self.az - phi)
u = np.where(dang==np.min(dang))[0][0]
ax.semilogx(self.x,20*np.log10(np.abs(self.y[u,:]))-Gmax,color='r')
ax.semilogx(self.x,20*np.log10(np.abs(self.y[u,:]))-Gmin,color='g')
else:
if kwargs['raw']:
ax.plot(self.x,10*np.log10(pdp),color='r',label=r'$10\log_{10}(\sum_{\phi} PADP(\phi))$',linewidth=0.5)
ax.plot(np.array([tau]),np.array([AttmaxdB]),color='k')
if kwargs['desembeded']:
ax.plot(self.x,pdp_min,label=r'$10\log_{10}(\sum_{\phi} PADP(\phi)) - $'+str(Gmax))
ax.plot(self.x,pdp_max,label=r'$10\log_{10}(\sum_{\phi} PADP(\phi)) - $'+str(Gmin))
if kwargs['typic']:
ax.plot(self.x,pdp_typ,label=kwargs['label'],color=kwargs['color'])
if kwargs['freespace']:
if kwargs['typic']:
ax.plot(self.x,FS,color=kwargs['color'],linewidth=kwargs['linewidth']+1,label='Free Space path profile')
else:
ax.plot(self.x,FS,color='k',linewidth=2,label='Free Space path profile')
if kwargs['losdelay']:
ax.vlines(self.taupeak_est,ymin=-130,ymax=-40,linestyles='dashed',color='blue')
ax.vlines(self.taulos_geo,ymin=-130,ymax=-40,linestyles='dashed',color='red')
#ax.set_xlim(0,1000)
if kwargs['xlabel']:
ax.set_xlabel('Delay (ns)',fontsize=kwargs['fontsize'])
if kwargs['bcir']:
phi = self.angpeak_est*np.pi/180.
dang = np.abs(self.az - phi)
u = np.where(dang==np.min(dang))[0][0]
ax.plot(self.x,20*np.log10(np.abs(self.y[u,:]))-Gmax,'r')
ax.plot(self.x,20*np.log10(np.abs(self.y[u,:]))-Gmin,'g')
if kwargs['ylabel']:
ax.set_ylabel('level (dB)',fontsize=kwargs['fontsize'])
ax.set_title(self._filename+' '+str(PL))
if kwargs['legend']:
plt.legend(loc='best')
return fig,ax
else:
return (self.x,pdp)
def pdp_mamadou(self,**kwargs):
""" Calculate and plot Power Delay Profile
Parameters
----------
fcGHz : float
"""
defaults = { 'figsize':(10,10),
'fontsize':18,
'fig' : [],
'ax': [],
'bpdp':True,
'xlabel': True,
'ylabel': True,
'legend': True,
'losdelay': False,
'freespace': False,
'desembeded': False,
'typic':False,
'semilogx':False,
'bcir':False,
'bcir1':False,
'raw': False,
'Gmax':22.68,
'Gmin':19,
'Tilt':10,
'HPBW':10,
'dphi':5,
'marker':'*',
'color':'k',
'label':'',
'linewidth':1
}
for k in defaults:
if k not in kwargs:
kwargs[k]=defaults[k]
Gmax = kwargs.pop('Gmax')
Gmin = kwargs.pop('Gmin')
Gtyp = (Gmax+Gmin)/2.
# get peak value of the PADP
alpha,tau,phi = self.peak()
Na = self.y.shape[0]
pdp = np.mean(self.y*np.conj(self.y),axis=0)
spdp = TUchannel(x=self.x,y=np.sqrt(pdp))
u = np.where(pdp==max(pdp))[0]
FS = -(32.4+20*np.log10(self.x*0.3)+20*np.log10(self.fcGHz))
AttmaxdB = 20*np.log10(alpha)
#Gmax = AttmaxdB-FS[u]
#Gmax_r = np.round(Gmax[0]*100)/100.
#
# The -3dB is specific to the Aalto measurement and desembeding (1/2)
#
pdp_min = 10*np.log10(pdp)-Gmax-3
pdp_max = 10*np.log10(pdp)-Gmin-3
pdp_typ = 10*np.log10(pdp)-Gtyp-3
umin = np.where(pdp_min>-118)
pdp_min_thr = pdp_min[umin]
umax = np.where(pdp_max>-118)
pdp_max_thr = pdp_max[umax]
PL = -10*np.log10(np.sum(10**(pdp_min_thr/10.)))
if kwargs['fig']==[]:
fig = plt.figure(figsize=kwargs['figsize'])
else:
fig = kwargs['fig']
if kwargs['ax'] == []:
ax = fig.add_subplot(111)
else:
ax = kwargs['ax']
if kwargs['bpdp']:
ax.plot(self.x,10*np.log10(pdp),color=kwargs['color'],label=kwargs['label'],linewidth=kwargs['linewidth'])
if kwargs['bcir1']:
# import ipdb
# ipdb.set_trace()
phi = self.angpeak_est*np.pi/180.
dang = np.abs(self.az - phi)
u = np.where(dang==np.min(dang))[0][0]
ax.plot(self.x,20*np.log10(np.abs(self.y[u,:])),color=kwargs['color'],label=kwargs['label'])
if kwargs['semilogx']:
if kwargs['raw']:
ax.semilogx(self.x,10*np.log10(pdp),color='r',label=r'$10\log_{10}(\sum_{\phi} PADP(\phi))$',linewidth=1)
#ax.semilogx(np.array([tau]),np.array([AttmaxdB]),color='k')
if kwargs['desembeded']:
ax.semilogx(self.x,pdp_min,label=r'$10\log_{10}(\sum_{\phi} PADP(\phi)) - $'+str(Gmax),color='green')
ax.semilogx(self.x,pdp_max,label=r'$10\log_{10}(\sum_{\phi} PADP(\phi)) - $'+str(Gmin),color='red')
if kwargs['typic']:
ax.semilogx(self.x,pdp_typ,label=kwargs['label'],color=kwargs['color'],linewidth=kwargs['linewidth'])
if kwargs['freespace']:
if kwargs['typic']:
ax.semilogx(self.x,FS,color=kwargs['color'],linewidth=kwargs['linewidth']+1,label='Free Space path profile')
else:
ax.semilogx(self.x,FS,color='k',linewidth=2,label='Free Space path profile')
if kwargs['losdelay']:
ax.vlines(self.taupeak_est,ymin=-130,ymax=-40,linestyles='dashed',color='blue')
ax.vlines(self.taulos_geo,ymin=-130,ymax=-40,linestyles='dashed',color='red')
#ax.set_xlim(10,1000)
if kwargs['xlabel']:
ax.set_xlabel('Delay (ns) log scale',fontsize=kwargs['fontsize'])
if kwargs['bcir']:
phi = self.angpeak_est*np.pi/180.
dang = np.abs(self.az - phi)
u = np.where(dang==np.min(dang))[0][0]
ax.semilogx(self.x,20*np.log10(np.abs(self.y[u,:]))-Gmax,color='r')
ax.semilogx(self.x,20*np.log10(np.abs(self.y[u,:]))-Gmin,color='g')
# else:
# if kwargs['raw']:
# ax.plot(self.x,10*np.log10(pdp),color='r',label=r'$10\log_{10}(\sum_{\phi} PADP(\phi))$',linewidth=0.5)
# ax.plot(np.array([tau]),np.array([AttmaxdB]),color='k')
# if kwargs['desembeded']:
# ax.plot(self.x,pdp_min,label=r'$10\log_{10}(\sum_{\phi} PADP(\phi)) - $'+str(Gmax))
# ax.plot(self.x,pdp_max,label=r'$10\log_{10}(\sum_{\phi} PADP(\phi)) - $'+str(Gmin))
# if kwargs['typic']:
# ax.plot(self.x,pdp_typ,label=kwargs['label'],color=kwargs['color'])
# if kwargs['freespace']:
# if kwargs['typic']:
# ax.plot(self.x,FS,color=kwargs['color'],linewidth=kwargs['linewidth']+1,label='Free Space path profile')
# else:
# ax.plot(self.x,FS,color='k',linewidth=2,label='Free Space path profile')
# if kwargs['losdelay']:
# ax.vlines(self.taupeak_est,ymin=-130,ymax=-40,linestyles='dashed',color='blue')
# ax.vlines(self.taulos_geo,ymin=-130,ymax=-40,linestyles='dashed',color='red')
# #ax.set_xlim(0,1000)
# if kwargs['xlabel']:
# ax.set_xlabel('Delay (ns)',fontsize=kwargs['fontsize'])
# if kwargs['bcir']:
# phi = self.angpeak_est*np.pi/180.
# dang = np.abs(self.az - phi)
# u = np.where(dang==np.min(dang))[0][0]
# ax.plot(self.x,20*np.log10(np.abs(self.y[u,:]))-Gmax,'r')
# ax.plot(self.x,20*np.log10(np.abs(self.y[u,:]))-Gmin,'g')
ll = ax.get_xticklabels()+ax.get_yticklabels()
for l in ll:
l.set_fontsize(kwargs['fontsize'])
if kwargs['ylabel']:
ax.set_ylabel('PDP [dB]',fontsize=kwargs['fontsize'])
if kwargs['xlabel']:
ax.set_xlabel('Propagation Delay [ns]',fontsize=kwargs['fontsize'])
#ax.set_title(self._filename+' '+str(PL))
if kwargs['legend']:
plt.legend(loc='best',fontsize=kwargs['fontsize'])
return fig,ax,pdp
def tomap(self,L,**kwargs):
""" surimpose PADP on the Layout
Parameters
----------
L : Layout
xmin : 10
xmax : 400
ymin : 10
ymax : 400,
Nx :3000,
Ny :3000,
'cmap':'jet',
'mode':'image',
'excess':'los',
'figsize':(20,20),
'thmindB':-110,
'thmaxdB':-108,
'vmindB':-110,
'vmaxdB':-60,
'offset':0,
'display':True,
'compensated':True,
'tauns_excess':0
"""
defaults = {'xmin':10,
'xmax':400,
'ymin':10,
'ymax':400,
'Nx':3000,
'Ny':3000,
'cmap':'jet',
'mode':'sbounce',
'excess':'los',
'figsize':(20,20),
'thmindB':-110,
'thmaxdB':-108,
'vmindB':-110,
'vmaxdB':-60,
'offset':0,
'display':True,
'compensated':False,
'tauns_excess':0}
for k in defaults:
if k not in kwargs:
kwargs[k]=defaults[k]
xmin = kwargs.pop('xmin')
ymin = kwargs.pop('ymin')
xmax = kwargs.pop('xmax')
ymax = kwargs.pop('ymax')
mode = kwargs.pop('mode')
vmindB = kwargs.pop('vmindB')
vmaxdB = kwargs.pop('vmaxdB')
thmindB = kwargs.pop('thmindB')
thmaxdB = kwargs.pop('thmaxdB')
Nx = kwargs.pop('Nx')
Ny = kwargs.pop('Ny')
cmap = kwargs.pop('cmap')
offset = kwargs.pop('offset')
excess = kwargs.pop('excess')
display = kwargs.pop('display')
compensated = kwargs.pop('compensated')
tauns_excess = kwargs.pop('tauns_excess')
figsize = kwargs.pop('figsize')
if 'fig' not in kwargs:
fig = plt.figure(figsize=figsize)
else:
fig = kwargs['fig']
#
# Prepare the array for spatial information in horizontal plane x,y
# Nx and Ny should be large enough
#
Z = np.zeros((Nx,Ny),dtype=complex)
#
# spatial indexation in x and y
#
xr = np.linspace(xmin,xmax,Nx)
yr = np.linspace(xmin,xmax,Ny)
# distance Tx Rx in the horizontal plane (2D)
dtx_rx_2D = np.sqrt((self.tx[0]-self.rx[0])**2+(self.tx[1]-self.rx[1])**2)
# distance Tx Rx in the horizontal plane (3D)
dtx_rx = np.sqrt((self.tx[0]-self.rx[0])**2+(self.tx[1]-self.rx[1])**2+(self.tx[2]-self.rx[2])**2)
# distance Tx ground Rx (3D)
dtx_gr_rx = np.sqrt(dtx_rx_2D**2+(self.tx[2]+self.rx[2])**2)
assert(dtx_gr_rx>dtx_rx)
# difference of heights beween Tx and Rx
deltah = np.abs(self.tx[2]-self.rx[2])
#
# Dt = vec(P,Tx)
# Dr = vec(Rx,P)
#
dxt =(self.tx[0]-xr)[:,None]
dyt =(self.tx[1]-yr)[None,:]
#
# nwt : distance between Tx and each point of the plane
# nwr : distance between Rx and each point of the plane
#
nwt = np.sqrt(dxt*dxt+dyt*dyt)
dxr =(xr-self.rx[0])[:,None]
dyr =(yr-self.rx[1])[None,:]
nwr = np.sqrt(dxr*dxr+dyr*dyr)
# dsbounce : elliposidal distance (single bounce hypothesis)
dsbounce = nwt+nwr
# maximal ellipsoidal distance on the Z selected region
dmax = dsbounce.max()
taumax = dmax/0.3
# import ipdb
# ipdb.set_trace()
# determine index of maximal distance
if self.x.max()>taumax:
itaumax = np.where(self.x>taumax)[0][0]
else:
itaumax=len(self.x)-1
# convert maximal distance into maximal delay (self.x is delay)
taumax = self.x[itaumax]
# determine coefficient between delay and index ( ns --> integer)
tau2idx = taumax/itaumax
# Determine the angle of arraival
# direction of arrival normalization of the vector
dxrn = dxr/nwr
dyrn = dyr/nwr
# angle of arrival in [-pi,pi]
phi = np.arctan2(dyrn,dxrn)-offset*np.pi/180
# back in [0-2pi]
phi = (1-np.sign(phi))*np.pi+phi
#iphi=((315-phi*180/np.pi)/5).astype(int)
iphi=((360-phi*180/np.pi)/5).astype(int)
drpt = np.sqrt(dxr*dxr+dyr*dyr+dxt*dxt+dyt*dyt)
dpr = np.sqrt(dxr*dxr+dyr*dyr)
if mode=='sbounce':
iid = np.round((np.sqrt(dxt*dxt+dyt*dyt)+np.sqrt(dxr*dxr+dyr*dyr))/(0.3*tau2idx)).astype('int')
else:
#d = np.round(np.sqrt(dxr*dxr+dyr*dyr)/(0.3*0.625)).astype('int')
#d = np.round(np.sqrt(dxr*dxr+dyr*dyr)/(0.3*0.625)).astype('int')
alpha = np.arctan(deltah/drpt)
dv = dpr/np.cos(alpha)
iid = np.round(dv/(0.3*tau2idx)).astype('int')
#pdb.set_trace()
#
# create indexation for spatial region Z
#
ix = np.arange(Nx)[:,None]
iy = np.arange(Ny)[None,:]
# ird : index for delays (d for delays)
ird = iid[ix,iy].ravel()
# irp : index for directio of arrival (p for phi)
irp = iphi[ix,iy].ravel()
#
# (d < dmax ) and (d>dlos+tauns_excess)
# iphi >= 0 and iphi < Nphimax
ilos = np.round((dtx_rx/(0.3*tau2idx))).astype(int)
iground = np.round((dtx_gr_rx/(0.3*tau2idx))).astype(int)
iexcess = np.round(tauns_excess/tau2idx).astype(int)
if excess=='los':
ud = np.where((ird<itaumax) & (ird>ilos+iexcess))
if excess=='ground':
ud = np.where((ird<itaumax) & (ird>iground+iexcess))
up = np.where((irp>=0) & (irp<len(self.az)))
# determine the index of points in a corona wich satisfy jointly the
# condition on delays and angles
#
u = np.intersect1d(ud,up)
# rebvelize Z (2D -> 1D)
rz = Z.ravel()
# filling rz with self.y nphi,Ntau
rz[u] = self.y[irp[u],ird[u]]
#
# back to matrix form
#
Z = rz.reshape(Nx,Ny)
pdb.set_trace()
lmbda = 0.3/self.fcGHz
sqG = 10
Z_compensated = Z*(4*np.pi*dtx_rx)/(sqG*lmbda)
if compensated:
ZdB = 20*np.log10(np.abs(Z_compensated.T))
else:
ZdB = 20*np.log10(np.abs(Z.T))
mask = ((ZdB.all()>thmindB) and (ZdB.all()<thmaxdB))
#mzdB = ma.masked_array(ZdB,mask)
ZdBmax = ZdB.max()
ZdBmin = ZdB.min()
#
# constructing figure
#
if display:
#fig=plt.figure(figsize=figsize)
fig,ax = L.showG('s',fig=fig,labels=0)
plt.axis('on')
ax.imshow(ZdB,extent=(xr[0],xr[-1],yr[0],yr[-1]),
cmap=cmap,
origin='lower',
alpha=0.9,
vmin=ZdBmax-25,
vmax=ZdBmax,interpolation='nearest')
#plt.imshow(mzdB,alpha=0.9,origin='lower')
ax.plot(self.tx[0],self.tx[1],'or')
ax.plot(self.rx[0],self.rx[1],'ob')
plt.colorbar()
ax.set_title(self._filename)
#plt.savefig(self._filename+'.png')
#return Z,np.linspace(xr[0],xr[-1],Nx),np.linspace(yr[0],yr[-1],Ny)
return fig,ax
def polarplot(self,**kwargs):
""" polar plot of PADP
Parameters
-----------
fig
ax
figsize
typ : string
Ndec : int
decimation factor
imax : int
max value
vmin : float
vmax : float
cmap : colormap
title : PADP
"""
defaults = { 'fig':[],
'ax':[],
'figsize':(10,10),
'typ':'l20',
'Ndec':1,
'vmin':-110,
'vmax':-50,
'imax':150,
'cmap': plt.cm.jet,
'title':'PADP'
}
cvel = 0.3
for k in defaults:
if k not in kwargs:
kwargs[k] = defaults[k]
if kwargs['fig'] == []:
fig = plt.figure(figsize=kwargs['figsize'])
else:
fig = kwargs.pop('fig')
if kwargs['ax'] == []:
ax = fig.add_subplot(111,polar=True)
else:
ax = kwargs.pop('ax')
imax = kwargs.pop('imax')
Ndec = kwargs.pop('Ndec')
vmin = kwargs.pop('vmin')
vmax = kwargs.pop('vmax')
cmap = kwargs.pop('cmap')
title = kwargs.pop('title')
rho,theta = np.meshgrid(self.x*cvel,self.az)
# convert y data in desired format
dt,ylabels = self.cformat(**kwargs)
val = dt[:,0::Ndec][:,0:imax/Ndec]
th = theta[:,0::Ndec][:,0:imax/Ndec]
rh = rho[:,0::Ndec][:,0:imax/Ndec]
#vmin = np.min(val)
vmax = np.max(val)
#Dynamic = max_val-vmin
pc = ax.pcolormesh(th,rh,val,cmap=cmap,vmin=vmin,vmax=vmax)
ptx = ax.plot(self.az,self.tau*cvel,'or')
fig.colorbar(pc,orientation='vertical')
ax.set_title(title)
ax.axis('equal')
def toafp(self):
return afp
class TBchannel(bs.TBsignal):
""" radio channel in non uniform delay domain
"""
def __init__(self, x=np.array([]), y=np.array([]),label=[]):
#super(TUsignal,self).__init__(x,y,label)
bs.TBsignal.__init__(self,x,y,label)
def tau_Emax(self):
""" calculate the delay of max energy peak
.. math::
\max_{\tau} y^{2}(\tau)
"""
y2 = (self.y) ** 2
maxy2 = max(y2)
u = np.nonzero(y2 == maxy2)[0]
tau_Emax = self.x[u]
return(tau_Emax)
def tau_moy(self, alpha=0.1, threshold_dB = 20, tau0=0):
""" calculate mean excess delay starting from delay tau0
Parameters
----------
alpha : float
tau0 : float
"""
t = self.x
y = self.y
#cdf, vary = self.ecdf()
u = np.max(self.y*self.y)
v = 10**(np.log10(u)-threshold_dB/10.)
uf = np.where(self.y*self.y>v)
num = np.sum(self.y[uf]*self.y[uf]*self.x[uf[-1]])
den = np.sum(self.y[uf]*self.y[uf])
taum = num/den
return(taum)
def delays(self):
r""" calculate delay parameters and orthogonality factor from cir
Returns
-------
taum :
mean excess delay
delayspread
rms delay spread
of :
orthogonality factor
Neelesh Metha, Andreas Molish, Lary Greenstein "Orthogonality Factor in WCDMA Donlinks in Urban Macrocellular
environments"
.. :math:
\beta0 = 1 \frac{\sum_i=1^L}|\alpha_i|^4}{\left(\sum_i=1^L|\alpha_i|^2)^2}
"""
self.flatteny(reversible=True)
y2 = self.yf*self.yf
y4 = y2*y2
taum = sum(self.x*y2,axis=0)/sum(y2,axis=0)
delayspread = np.sqrt(sum((self.x-taum)*(self.x-taum)*y2)/sum(y2,axis=0))
of = 1 - sum(y4,axis=0)/sum(y2,axis=0)**2
return taum,delayspread,of
def Kfactor(self,threshold_dB=20,dB=True):
""" determine Ricean K factor
Parameters
-----------
Threshold_dB : float
Only the energy above threshold is taken into account
dB : boolean
if True value in dB is returned
"""
t = self.x
y = self.y
u = np.max(self.y*self.y)
v = 10**(np.log10(u)-threshold_dB/10.)
vmax = np.where(self.y*self.y==u)
Pmax = self.y[vmax]*self.y[vmax]
uf = np.where(self.y*self.y>v)
Ptot = np.sum(self.y[uf]*self.y[uf])
K = Pmax/(Ptot-Pmax)
if dB:
K=10*np.log10(K)
return K[0]
def tau_rms(self, alpha=0.1,threshold_dB=20, tau0=0):
r""" calculate root mean square delay spread starting from delay tau_0
Parameters
----------
alpha : float
threshold : float
( delay interval is defined between :math:`\tau(\alpha)` and :math:`\tau(1 -\alpha)` )
tau0 : float
argument for specifying the delay start
Notes
-----
.. math::
\sqrt{\frac{\int_{\tau(\alpha)}^{\tau(1-\alpha)} (\tau-\tau_m)^{2} PDP(\tau) d\tau} {\int_{\tau(\alpha)}^{\tau(1-\alpha)} PDP(\tau) d\tau}}
See Also
--------
TUsignal.ecdf
TUsignal.tau_moy
"""
t = self.x
y = self.y
#cdf, vary = self.ecdf()
#pdp = np.diff(cdf.y)
u = np.max(self.y*self.y)
v = 10**(np.log10(u)-threshold_dB/10.)
uf = np.where(self.y*self.y>v)
taum = self.tau_moy(tau0,threshold_dB=threshold_dB)
num = np.sum(self.y[uf]*self.y[uf]*(self.x[uf[-1]]-taum)**2)
den = np.sum(self.y[uf]*self.y[uf])
taurms = np.sqrt(num/den)
return taurms
def toFD(self,fGHz=np.linspace(2,5,256)):
""" Transform to Frequency domain
Returns
-------
H : Tchannel
"""
z = np.sum(self.y[:,None]*np.exp(-2*1j*fGHz[None,:]*np.pi*self.x[:,None]),axis=0)
H = Tchannel(x=fGHz,y=z,tau=self.x)
return H
def SalehValenzuela(self,**kwargs):
""" generic Saleh and Valenzuela Model
Parameters
----------
Lam : clusters Poisson Process parameter (ns)
lam : rays Poisson Process parameter (ns)
Gam : clusters exponential decay factor
gam : rays exponential decay factor
T : observation duration
Examples
--------
>>> from pylayers.antprop.channel import *
>>> C=TBchannel()
>>> C.SalehValenzuela()
>>> f,a = C.stem()
"""
defaults = { 'Lam' : .1,
'lam' : .5,
'Gam' : 30,
'gam' : 5 ,
'T' : 100}
for k in defaults:
if k not in kwargs:
kwargs[k]=defaults[k]
Lam = kwargs['Lam']
lam = kwargs['lam']
Gam = kwargs['Gam']
gam = kwargs['gam']
T = kwargs['T']
Nr = 1.2*T/Lam
Nc = 1.2*T/lam
e1 = st.expon(1./Lam)
e2 = st.expon(1./lam)
# cluster time of arrival
tc = np.cumsum(e1.rvs(Nr))
tc = tc[np.where(tc<T)]
Nc = len(tc)
tauc = np.kron(tc,np.ones((1,Nr)))[0,:]
# rays time of arrival
taur = np.cumsum(e2.rvs((Nr,Nc)),axis=0).ravel()
# exponential decays of cluster and rays
etc = np.exp(-tauc/(1.0*Gam))
etr = np.exp(-taur/(1.0*gam))
et = etc*etr
tau = tauc+taur
# filtering < T and reordering in delay domain
tau = tau[np.where(tau<T)]
et = et[np.where(tau<T)]
u = np.argsort(tau)
taus = tau[u]
ets = et[u]*np.sign(np.random.rand(len(u))-0.5)
# delays and amplitudes
self.x = taus
self.y = ets
class TUchannel(TBchannel,bs.TUsignal):
""" Uniform channel in delay domain
"""
def __init__(self, x=np.array([]), y=np.array([]),label=[]):
super(TBchannel,self).__init__(x,y,label)
def toa_max2(self):
""" calculate time of arrival max2 method
"""
THRE = array([])
V = array([])
VL = array([])
M = max(self.y)
n = np.nonzero(self.y == M)[0]
thre = M
v = 1
vl = 0
THRE = np.hstack((THRE, thre))
V = np.hstack((V, v))
VL = np.hstack((VL, vl))
step = M / 1e2
thre = M - step
# while thre > M/1e2:
while vl < 20:
# while v < 50:
u = np.nonzero(self.y > thre)[0]
v = nbint(u)
h = np.nonzero(u > n)[0]
g = np.delete(u, h)
vl = nbint(g) - 1
THRE = np.hstack((THRE, thre))
V = np.hstack((V, v))
VL = np.hstack((VL, vl))
thre = thre - step
plt.plot(1 - THRE / M, V, 'b', drawstyle='steps',
label='interval number')
plt.plot(1 - THRE / M, VL, '-r', drawstyle='steps',
label='interval(Left) number')
plt.xlabel('Gamma/Vmax')
plt.legend(loc=2)
# ylabel('Interval Number')
plt.show()
def toa_new(self):
""" estimate time of arrival (new method)
"""
t = self.x
Max = max(self.y)
nmax = np.nonzero(self.y == Max)[0]
n = nmax
step = Max / 1e2
thre = Max - step
delta = 100
d = 0
nint = 0
N = np.array([])
N = np.hstack((N, n))
while delta > 4 * Max / 1e2:
u = np.nonzero(self.y > thre)[0]
hr = np.nonzero(u > n)[0]
g = np.delete(u, hr)
if nmax >= 6000:
#set the fenetre=6000*0.005=30ns
hl = np.nonzero(g < nmax - 6000)[0]
u = np.delete(g, hl)
else:
u = g
n_int = nbint(u) - 1
if n_int == 0:
d = d + step
else:
delta = d + step
d = 0
n = u[0]
N = np.hstack((N, n))
#print(N)
thre = thre - step
if thre < 0:
break
if len(N) >= 3:
nn = N[-3]
else:
nn = N[0]
tau = t[nn]
return tau
def toa_win(self, w):
""" calulate time of arrival (window method)
Parameters
----------
w : parameter between 0 and 100
Lei takes w = 9
"""
t = self.x
maxbruit = max(self.y[0:1000])
Max = max(self.y)
nmax = np.nonzero(self.y == Max)[0]
n = nmax
step = Max / 1e2
thre = Max - step
delta = 100
d = 0
nint = 0
N = np.array([])
N = np.hstack((N, n))
# tant delta est plus grande que w% du Max
while delta > w * Max / 1e2:
u = np.nonzero(self.y > thre)[0]
hr = np.nonzero(u > n)[0]
g = np.delete(u, hr)
if nmax >= 6000:
#set the fenetre=6000*0.005=30ns
hl = np.nonzero(g < nmax - 6000)[0]
u = np.delete(g, hl)
else:
u = g
n_int = nbint(u) - 1
if n_int == 0:
thre = thre - step
d = d + step
else:
delta = Max - maxbruit - d - step
d = d + step
n = u[0]
N = np.hstack((N, n))
thre = thre - step
if thre < 0:
break
if len(N) >= 2:
nn = N[-2]
else:
nn = N[0]
tau = t[nn]
return tau
def toa_max(self, nint):
""" calculate time of arrival
descendant threshold based toa estimation
Parameters
----------
nint : integer
number of intervals
"""
#
# seek fot the maximum value of the signal
#
M = self.y.max()
step = M / 1e2
# plot(self.x,self.y)
thre = M - step
while step > M / 1e5:
# axhline(y=thre,color='green')
u = np.where(self.y > thre)[0]
# nbint : number of contiguous intervals
if pyu.nbint(u) < nint:
# down
thre = thre - step
else:
# up + step reduction
thre = thre + step
step = step / 2.
# plt.show()
tau = self.x[u[0]]
return tau
def toa_th(self, thlos, thnlos, visibility=0):
""" calculate time of arrival
threshold based toa estimation using energy peak
"""
#
# ( ) ^2
#
y2 = (self.y) ** 2
maxy2 = max(y2)
t = self.x
if visibility == 'LOS':
th = thlos * maxy2
else:
th = thnlos * maxy2
#
#In the W1-M1 measurement
#thlos=0.05 thnlos=0.15
#
v = np.nonzero(y2 >= th)[0]
toa = t[v[0]]
return toa
def toa_cum(self, th):
""" calculate time of arrival
threshold based toa estimation using cumulative energy
"""
t = self.x
y = self.y
cdf, vary = self.ecdf()
#
#In the W1-M1 measurement th=0.15
#
v = np.nonzero(cdf.y >= th)[0]
toa = t[v[0]]
return toa
def toa_th_tmtm(self):
""" calculate time of arrival
"""
y2 = (self.y) ** 2
maxy2 = max(y2)
t = self.x
alpha = (np.sqrt(self.Etot()) - np.sqrt(self.Emax())) / \
(np.sqrt(self.Etot()) + np.sqrt(self.Emax()))
th = alpha * maxy2
v = np.nonzero(y2 >= th)[0]
toa = t[v[0]]
return toa
def toa_th_tm(self):
""" calculate time of arrival
"""
y2 = (self.y) ** 2
maxy2 = max(y2)
t = self.x
alpha = np.sqrt(self.Emax()) / np.sqrt(self.Etot())
print(alpha)
th = alpha * maxy2
v = np.nonzero(y2 >= th)[0]
toa = t[v[0]]
return toa
def toa_th_tmt(self):
""" calculate time of arrival
"""
y2 = (self.y) ** 2
maxy2 = max(y2)
t = self.x
alpha = (np.sqrt(self.Etot(
)) - np.sqrt(self.Emax())) / np.sqrt(self.Etot())
print(alpha)
th = alpha * maxy2
v = np.nonzero(y2 >= th)[0]
toa = t[v[0]]
return toa
def toa_cum_tm(self):
""" calculate time of arrival
"""
y2 = (self.y) ** 2
t = self.x
maxy2 = max(y2)
u = np.nonzero(y2 == maxy2)[0]
cdf, vary = self.ecdf()
alpha = np.sqrt(cdf.y[u]) / np.sqrt(cdf.y[-1])
v = np.nonzero(cdf.y >= alpha * cdf.y[u])[0]
toa = t[v[0]]
return toa
def toa_cum_tmtm(self):
""" calculate time of arrival
"""
y2 = (self.y) ** 2
t = self.x
maxy2 = max(y2)
u = np.nonzero(y2 == maxy2)[0]
cdf, vary = self.ecdf()
alpha = (np.sqrt(cdf.y[-1]) - np.sqrt(
cdf.y[u])) / (np.sqrt(cdf.y[-1]) + np.sqrt(cdf.y[u]))
v = np.nonzero(cdf.y >= alpha * cdf.y[u])[0]
toa = t[v[0]]
return toa
def toa_cum_tmt(self):
""" calculate time of arrival
"""
y2 = (self.y) ** 2
t = self.x
maxy2 = max(y2)
u = np.nonzero(y2 == maxy2)[0]
cdf, vary = self.ecdf()
alpha = (np.sqrt(cdf.y[-1]) - np.sqrt(cdf.y[u])) / np.sqrt(cdf.y[-1])
v = np.nonzero(cdf.y >= alpha * cdf.y[u])[0]
toa = t[v[0]]
return toa
def psd(self, Tpns=100, R=50,periodic=True):
""" calculate power spectral density
Parameters
----------
R : Resistance (default 50 Ohms)
Ohms
Tpns : real
Signal period PRP (default 100 ns)
.. note::
Notice this interesting property that if time is represented in ns
the resulting PSD is expressed in dBm/MHz because there is the
same scale factor 1e-9 between second and nanosecond as between
dBW/Hz and dBm/MHz
If periodic is False the signal duration is taken as period.
"""
P = self.esd(mode='unilateral')
if periodic:
P.y = P.y / (R * Tpns)
else:
P.y = P.y/ (R* (P.x[-1]-P.x[0]))
return P
def awgn(self,PSDdBmpHz=-174,snr=0,seed=1,typ='psd',R=50):
""" add a white Gaussian noise
Parameters
----------
PSDdBmpHz : float
snr : float
seed : float
typ : string
'psd' | 'snr'
R : float
Returns
-------
n
sn
See Also
--------
bsignal.Noise
"""
ti = self.x[0]
tf = self.x[-1]
tsns = self.x[1]-self.x[0]
fsGHz = 1./tsns
if typ=='snr':
Ps = self.energy()/(R*(tf-ti))
PW = Ps/10**(snr/10.)
pWpHz = PW/(fsGHz*1e9)
pmWpHz = pWpHz*1e3
PSDdBmpHz = 10*np.log10(pmWpHz)
n = Noise(ti = ti,
tf = tf+tsns,
fsGHz = fsGHz,
PSDdBmpHz = PSDdBmpHz,
R = R,
seed = seed)
sn.y = self.y + n.y[0:len(self.x)]
sn.x = self.x
return sn,n
def Etau0(self, tau0=0.0, Tint=1, sym=0.25, dB=True):
""" calculate energy around delay tau0
Parameters
----------
tau0 : (ns) (0)
Tint : Integration time (ns) (1) include the system error
sym : symetrie factor 0.5 = symetric (0.25)
dB : logscale indicator (True)
"""
#u = nonzero((tau0 + Tint*(1-sym) > self.x) & (self.x > tau0 - Tint*sym))
u = nonzero((tau0 + Tint > self.x) & (self.x > tau0))
etau0 = self.dx() * sum(self.y[u] * np.conj(self.y[u]))
if dB:
etau0 = 10 * np.log10(etau0)
return(etau0)
def Ewin(self, tau, Tint=1, sym=0.25, dB=False):
""" integrate energy around delay tau
Parameters
----------
tau : (ns) (0)
Tint : Integration time (ns) (1) include the system error
sym : symetrie factor 0.5 = symetric (0.25)
dB : logscale indicator (True)
"""
tstart = tau - Tint * sym
tstop = tau + Tint * (1 - sym)
u = np.nonzero((self.x > tstart) & (self.x < tstop))
energy = self.dx() * sum(self.y[u] * np.conj(self.y[u]))
if dB:
energy = 10 * np.log10(energy)
return(energy)
def Etot(self, tau0=0.0, taumax=200, dB=False):
""" Etot calculate the energy of the signal
Parameters
----------
tau0 : start value for integration
dB : (False default) if True value in dB
usage :
s.Etot(tau0=10,dB=True)
"""
u = (self.x > tau0) & (self.x < taumax)
etot = self.dx() * sum(self.y[u] * np.conj(self.y[u]))
if dB:
etot = 10 * np.log10(etot)
return(etot)
def Efirst(self, toa, Tint=1, sym=0.25, dB=True):
""" calculate the energy of the first path
Parameters
----------
toa : float
delay value
Tint : float
duration value (1)
sym : float
symmetry around delay value ( 0.25)
dB : Boolean
Returns
-------
Efirst : Energy amount in the window (in dB if dB)
"""
u = np.nonzero((toa + Tint > self.x) & (self.x > toa))
efirst = self.dx() * sum(self.y[u] * np.conj(self.y[u]))
if dB:
efirst = 10 * np.log10(efirst)
return(efirst)
def Efirst_corr(self, tau0, Sx, Sy, dB=True):
""" calculate Efirst utilizing the correlation of signal emission et reponse impulsionnelle
Parameters
----------
tau0
Sx
Sy
dB
"""
te = self.dx()
E0 = sum(Sy * Sy) * te
n = int(np.ceil(tau0 / te))
Correlation = np.correlate(self.y, Sy, mode='full')
seuil = max(Correlation[len(Sx):len(Sx) + n - 200])
v = np.nonzero(Correlation[len(Sx) + n - 200:] > seuil)[0]
if len(v) == 0:
ff = seuil / E0
else:
w = v[1:] - v[0:-1]
w0 = np.nonzero(w != 1)[0]
if len(w0) == 0:
ff = max(Correlation[len(Sx) + n - 200:][v]) / E0
else:
vv = v[0:w0[0] + 1]
ff = max(Correlation[len(Sx) + n - 200:][vv]) / E0
if dB:
Ef = 20 * np.log10(ff)
return(Ef)
def Efirst_toath(self, tau0, Tint=1, sym=0.25, dB=True):
""" calculate Efirst
Parameters
----------
tau0 : Time of flight
Tint
sym
dB : if True return value in dBnJ
"""
te = self.dx()
n = int(np.ceil(tau0 / te))
seuil = max(self.y[:n])
v = np.nonzero(self.y[n:] > seuil)[0]
if len(v) == 0:
toa = n * te
else:
w = v[1:] - v[0:-1]
w0 = np.nonzero(w != 1)[0]
if len(w0) == 0:
r = max(self.y[n:][v])
toa = np.nonzero(self.y == r)[0] * te
else:
vv = v[0:w0[0] + 1]
r = max(self.y[n:][vv])
toa = np.nonzero(self.y == r)[0] * te
u = np.nonzero((toa + Tint * (1 - sym) > self.x) & (
self.x > toa - Tint * sym))
efirst = te * sum(self.y[u] * np.conj(self.y[u]))
if dB:
efirst = 10 * np.log10(efirst)
return(efirst)
def Epercent(self, N=10):
""" return N percentile delay of a cdf
Parameters
----------
N : 10
"""
cdf, vary = self.ecdf()
t = cdf.x
Cdf = cdf.y
pc = array([])
for i in range(N - 1):
u = np.nonzero(Cdf > (i + 1.) / N)
tp = t[u[0]]
pc = np.hstack((pc, tp))
return(pc)
def Emax(self, Tint=1, sym=0.5, dB=False):
""" calculate the maximum of Energy integrated over a duration Tint
A symetry of sym around the max value of the squared signal
Parameters
----------
Tint: float
Integration time (ns) default 1
sym : float
Symmetry factor (default 0.5)
dB : boolean
default False
Notes
-----
W1-M1
te = 0.005 ns
left = 12
Nright = 33
Tint = 45*te = 0.225 ns
sym = 0.25
"""
#
# ( ) ^2
#
y2 = (self.y) ** 2
#
# determine time of maximum value of ()^2
#
maxy2 = max(y2)
u = np.nonzero(y2 == maxy2)[0]
te = self.dx()
Npt = int(np.ceil(Tint / te))
Nleft = int(np.ceil(sym * Npt))
Nright = int(np.ceil((1 - sym) * Npt))
#
# Integration around the maximum value of E^2
# In the W1_M1 measurement
# te = 0.005 ns
# Nleft = 12
# Nright = 33
# Tint = 45*te = 0.225 ns
# sym = 0.25
#
Y = y2[u - Nleft:u + Nright]
cumY = np.cumsum(Y)
maxY = cumY[-1]
Emax = maxY * te
if dB:
return(10 * np.log10(Emax))
return(Emax)
def tau_Emax(self):
""" calculate the delay of max energy peak
"""
y2 = (self.y) ** 2
t = self.x
maxy2 = max(y2)
u = np.nonzero(y2 == maxy2)[0]
tau_Emax = t[u]
return(tau_Emax)
def aggcir(self,alphak,tauk):
""" aggregation of CIR from (alphak,tauk)
Parameters
----------
alphak : ndarray
CIR path amplitude
tauk : ndarray
CIR delay values
Examples
--------
.. plot::
:include-source:
>>> from pylayers.signal.bsignal import *
>>> import numpy as np
>>> alphak = 10*np.random.rand(7)
>>> tauk = 100*np.random.rand(7)
>>> tau = np.arange(0,150,0.1)
>>> y = np.zeros(len(tau))
>>> # CIR = TUsignal(tau,y)
>>> # CIR.aggcir(alphak,tauk)
>>> # f,a =CIR.plot(typ=['v'])
"""
shy = np.shape(self.y)
x = self.x
eps = (x[1]-x[0])/2
u = map(lambda t: np.where( (x>t-eps) & (x<=t+eps))[0][0],tauk)
ynew = np.zeros(len(x))
ynew[u] = alphak
if len(shy)>1:
self.y = np.vstack((self.y,ynew))
else:
self.y = ynew[None,:]
self.y = np.delete(self.y,0,0)
def readcir(self,filename,outdir=[]):
""" read channel impulse response
Parameters
----------
filename : string
long file name if outdir is []
short file name is outdir is != []
outdir : string
output directory
"""
if outdir != []:
outdir = 'output/'+outdir
filename = getlong(filename, outdir)
cir = ios.loadmat(filename)
self.x = cir['t'].ravel()
self.y = cir['cir'].ravel()
def readuwb(self, _filename):
""" read Waveform from Matlab file
Parameters
----------
_filename : file name with extension (.mat)
"""
outdir = 'output/'+outdir
filename = getlong(_filename, outdir)
wfm = ios.loadmat(filename)
d = wfm['data'][0][0]
T0 = d.T0[0][0] / 1e-9
Tres = d.Tres[0][0] / 1e-9
s = d.WformOut1
N = len(s)
self.x = np.linspace(T0, T0 + (N - 1) * Tres, N)
self.y = s.reshape(len(s))
def ecdf(self, Tnoise=10, rem_noise=True, in_positivity=True, display=False, normalize=True, delay=0):
""" calculate energy cumulative density function
Parameters
----------
Tnoise :
Time duration of noise only portion (default=5ns)
rem_noise :
remove noise if True
in_positivity :
inforce positivity if True
normalize :
normalize if True (Not implemented)
display :
display ecdf if True
delay :
give a delay for vizualization
Returns
-------
ecdf , vary
"""
#
# ( ) ^2
#
t = self.x
y = self.y
te = self.dx()
y2 = y ** 2
#
f1 = np.cumsum(y2) * te
# retrieve the noise only portion at the beginning of TUsignal
#
Nnoise = int(np.ceil(Tnoise / te))
tn = t[0:Nnoise]
fn = f1[0:Nnoise]
stdy = np.std(y[0:Nnoise])
vary = stdy * stdy
y = t * vary
#
# y : linear interpolation of noise ecdf (over whole time base)
#
#(ar,br)= polyfit(tn,fn,1)
#print ar
#y = polyval([ar,br],t)
if rem_noise:
f = f1 - y
else:
f = f1
#
# inforce positivity
#
if in_positivity:
pdf = np.diff(f)
u = np.nonzero(pdf < 0)[0]
pdf[u] = 0
ecdf = np.cumsum(pdf)
else:
ecdf = f
#
# Normalization step
#
E = ecdf[-1]
#print E
if normalize:
ecdf = ecdf / E
#
# Resizing
#
Nt = len(t)
Necdf = len(ecdf)
N = min(Nt, Necdf)
ecdf = bs.TUsignal(t[0:N], ecdf[0:N])
#
# Display
#
if display:
plt.subplot(211)
ecdf.plot()
if normalize:
plt.plot(t, 2 * vary * np.sqrt(2 * t) / E, 'r')
plt.plot(t, -2 * vary * np.sqrt(2 * t) / E, 'r')
else:
plt.plot(t, 3 * vary * np.sqrt(2 * t), 'r')
plt.plot(t, -3 * vary * np.sqrt(2 * t), 'r')
plt.axvline(x=delay, color='red')
plt.subplot(212)
plt.plot(t, y, color='red')
plt.plot(t, f1, color='black')
plt.plot(t, f, color='blue')
plt.show()
return ecdf, vary
class TUDchannel(TUchannel):
""" Uniform channel in Time domain with delay
Attributes
----------
x : ndarray
y : ndarray
taud : ndarray
direct delay
taue : ndarray
excess delay
"""
def __init__(self,x=np.array([]),y=np.array([]),taud=np.array([]),taue=np.array([])):
super(TUDchannel,self).__init__(x,y)
#TUsignal.__init__(self, x, y)
self.taud = taud
self.taue = taue
def __repr__(self):
s1 = "Time domain channel with delay \n"
s = TUchannel.__repr__(self)
s = s1+s
return(s)
def fig(self, N):
""" plot a figure of the N first signals
Parameters
----------
N : int
number of y signal to plot
"""
x = self.x
min = self.y.min()
max = self.y.max()
ec = max - min
ecmax = ec.max()
sh = np.shape(self.y)
Nmax = sh[0]
N1 = int(minimum(N, Nmax))
y1 = self.y[0, :] + (N1 - 1) * ecmax
yN1 = self.y[N1 - 1, :]
for k in range(N):
gk = str(N) + str(1) + str(k)
plt.subplot(gk)
plot(x, yN1[k, :])
#r.plot(x, yN1, main='Ray response', xlab='Time (ns)', ylab='y', type='l', col='black' ,frame='False', ylim=r.range(y1,yN1) )
#for i in range(N1-1):
# yi = self.y[i+1,:] + (N1-i)*ecmax
# r.lines(x,yi,col='black')
class Mchannel(bs.FUsignal):
""" Handle the measured channel
"""
def __init__(self,
x ,
y ,
**kwargs):
""" class constructor
Parameters
----------
x : , nfreq
frequency GHz
y : Nm x Nr x Nt x Nf
measured channel
"""
defaults = {
'Aat': [],
'Aar': [],
'calibrated':True,
'label' :'',
'filename':'',
'mes':''
}
for k in defaults:
if k not in kwargs:
kwargs[k]=defaults[k]
self.calibrated = kwargs.pop('calibrated')
self.label = kwargs.pop('label')
self.filename = kwargs.pop('filename')
self.mes = kwargs.pop('mes')
self.Aat = kwargs.pop('Aat')
self.Aar = kwargs.pop('Aar')
sh = y.shape
self.Nm = sh[0]
self.Nr = sh[1]
self.Nt = sh[2]
self.Nf = sh[3]
bs.FUsignal.__init__(self,x=x,y=y,label='Mchannel')
def __repr__(self):
st = bs.FUsignal.__repr__(self)
if self.calibrated:
st = st + 'Calibrated'
else:
st = st + 'Not calibrated'
return(st)
def eig(self,HdH=False):
""" calculate eigen values of the transfer matrix.
it involves H and Hd against svd() which acts only over H.
Returns
-------
HdH : Hermitian transfer matrix (nf x nt x nt )
U : Unitary tensor (nf x nt x nt )
S : Singular values (nf x nt)
V : = Ud (in that case because HdH Hermitian) (nf x nt x nt)
HdH = U L U^{\dagger}
"""
# H : nm x nr x nt x nf
H = self.y
# Hd : nm x nt x nr x nf
Hd = np.conj(self.y.swapaxes(1,2))
if HdH:
#T : nm x nt x nt x nf
T = np.einsum('uijk,ujlk->uilk',Hd,H)
else:
#T : nm x nr x nr x nf
T = np.einsum('uijk,ujlk->uilk',H,Hd)
# HdH : nm x nf x nr x nr
T = T.swapaxes(1,3)
#U : nm x nf x (nr|nt) x (nr|nt)
#S : nm x nf x (nr|nt)
#V : nm x nf x (nr|nt) x (nr|nt)
U,S,V = la.svd(T)
return (U,S,V)
def Bcapacity(self,Pt=np.array([1e-3]),Tp=273):
""" calculates BLAST deterministic MIMO channel capacity
Parameters
----------
Pt : np.array (,NPt)
the total power is assumed uniformaly distributed over the whole bandwidth
Tp : Receiver Temperature (K)
Returns
-------
C : sum rate or spectral efficiency (bit/s)
np.array (Nf,NPt)
rho : SNR
np.array (Nf,Nt,NPt)
log_2(det(I+(Et/(N0Nt))HH^{H})
Notes
-----
The returned value is homogeneous to bit/s the aggregated capacity is obtrained by a simple summation
of the returned quantity. To obtain the sum rate or the spectral efficiency in (bit/s/Hz ) the returned
value should be divided by the frequency step dfGHz
"""
fGHz = self.x
Nf = len(fGHz)
BGHz = fGHz[-1]-fGHz[0]
dfGHz = fGHz[1]-fGHz[0]
if type(Pt)==float:
Pt=np.array([Pt])
# White Noise definition
#
# Boltzman constantf = len(fGHz)
kB = 1.03806488e-23
# N0 ~ J ~ W/Hz ~ W.s
N0 = kB*Tp
# Evaluation of the transfer tensor
#
# HdH :
U,S,V = self.eig(HdH=True)
Ps = Pt/(self.Nt)
Pb = N0*BGHz*1e9 # Watt
# S : nm x nf x nr
# rho : nm x nf x nr x power
#
rho = (Ps[None,None,None,:]/Pb)*S[:,:,:,None]
CB = dfGHz*np.sum(np.log(1+rho)/np.log(2),axis=2)
return(rho,CB)
def WFcapacity(self,Pt=np.array([1e-3]),Tp=273):
""" calculates deterministic MIMO channel capacity
Parameters
----------
Pt : the total power to be distributed over the different spatial
channels using water filling
Tp : Receiver Noise Temperature (K)
Returns
-------
C : capacity (bit/s)
rho : SNR (in linear scale)
log_2(det(It + HH^{H})
"""
fGHz = self.x
Nf = len(fGHz)
# Bandwidth
BGHz = fGHz[-1]-fGHz[0]
# Frequency step
dfGHz = fGHz[1]-fGHz[0]
# White Noise definition
#
# Boltzman constant
kB = 1.03806488e-23
# N0 ~ J ~ W/Hz ~ W.s
N0 = kB*Tp
# Evaluation of the transfer HHd tensor
U,ld,V = self.eig(HdH=True)
#
# Iterative implementation of Water Filling algorithm
#
# pb : (nm,nf,nt) noise power (Watt)
pb = N0*dfGHz*1e9*np.ones((self.Nm,self.Nf,self.Nt))
# pt : (nm,nf,nt,power) Total power uniformly spread over (nt*nf-1)
pt = Pt[None,None,None,:]/((self.Nf-1)*self.Nt)
mu = pt
Q0 = np.maximum(0,mu-pb[:,:,:,None]/ld[:,:,:,None])
u = np.where(Q0>0)[0]
Peff = np.sum(np.sum(Q0,axis=1),axis=1)
deltamu = pt
while (np.abs(Peff-Pt)>1e-16).any():
mu = mu + deltamu
Q = np.maximum(0,mu-pb[:,:,:,None]/ld[:,:,:,None])
Peff = np.sum(np.sum(Q,axis=1),axis=1)
#print "mu , Peff : ",mu,Peff
usup = np.where(Peff>Pt)[0]
mu[:,:,:,usup] = mu[:,:,:,usup]- deltamu[:,:,:,usup]
deltamu[:,:,:,usup] = deltamu[:,:,:,usup]/2.
Qn = Q/pb[:,:,:,None]
rho = Qn*ld[:,:,:,None]
Cwf = dfGHz*np.sum(np.log(1+rho)/np.log(2),axis=2)
return(rho,Cwf)
def plot2(self,fig=[],ax=[],mode='time'):
if fig ==[]:
fig = plt.gcf()
if ax ==[]:
ax = plt.gca()
if mode=='time':
cir = self.ift(ffts=1)
y = cir.y
x = cir.x
else:
y = self.y
x = self.x
my = np.mean(np.abs(y),axis=0)
yc = np.abs(y)-my[None,...] # TD centered ; TDc.shape : (85, 4, 8, 801)
yc2 = np.abs(yc)**2 # square of TD centered ; TDc2.shape : (85, 4, 8, 801)
vary = np.mean(yc2,axis=0) #variance of TD ; varTD.shape : (4, 8, 801)
cpt = 0
for r in range(self.Nr):
for t in range(self.Nt):
#cpt=cpt+1
#ax = plt.subplot(self.Nr,self.Nt,cpt)
#l1, = ax.plot(self.x,np.sqrt(vary[r,t,:]),color='k',linewidth=1,alpha=1)
#l1, = ax.plot(self.x,np.sqrt(vary[r,t,:]),linewidth=1,alpha=1)
#l2, = ax.plot(self.x,my[r,t,:],color='r',linewidth=1,alpha=1)
l2, = ax.plot(x,my[r,t,:],linewidth=1,alpha=1)
ticksx = ax.axes.get_xticklabels()
ticksy = ax.axes.get_yticklabels()
plt.setp(ticksx, visible=True)
plt.setp(ticksy, visible=True)
if (r == 0) & (t==1):
#l1, = ax.plot(self.x,np.sqrt(vary[r,t,:]),color='k',label='sd',linewidth=1,alpha=1)
l2, = ax.plot(x,np.abs(my[r,t,:]),color='r',label='mean',linewidth=1,alpha=1)
if (r == 3) & (t==0):
plt.setp(ticksx, visible=True)
ax.axes.set_xticks(np.arange(x[0],x[-1],0.2))
plt.setp(ticksy, visible=True)
if (r == 0) & (t==3):
plt.title(r'Evolution of the mean and the standard deviation of $\mathbf{H}(f)$',fontsize=12)
if (r == 1) & (t==0):
ax.axes.set_ylabel('Amplitude (linear scale $\in [0,1]$)',fontsize=15)
if (r == 3) & (t == 3):
ax.axes.set_xlabel('Frequency (GHz)',fontsize=15)
class Tchannel(bs.FUsignal):
""" Handle the transmission channel
The transmission channel TChannel is obtained through combination of the propagation
channel and the antenna transfer functions from both transmitter and receiver.
This channel contains all the spatial information for each individual ray.
Warning : This is a frequency domain channel deriving from bs.FUsignal
Attributes
----------
ray transfer functions (nray,nfreq)
dod :
direction of depature (rad) [theta_t,phi_t] nray x 2
doa :
direction of arrival (rad) [theta_r,phi_r] nray x 2
tau :
delay ray k in ns
Methods
-------
imshow()
apply(W)
applywavB(Wgam)
applywavC(Wgam)
chantap(fcGHz,WGHz,Ntap)
doddoa()
wavefig(w,Nray)
rayfig(w,Nray)
rssi(ufreq)
See Also
--------
pylayers.antprop.Ctilde.prop2tran
"""
def __init__(self,
x = np.arange(0,2,1),
y = np.arange(0,2,1),
tau = np.array(([],)),
dod = np.array(([[],[]])).T,
doa = np.array(([[],[]])).T,
label = ''):
""" class constructor
Parameters
----------
x : , nfreq
frequency GHz
y : nray x nfreq
path amplitude
tau : 1 x nray
path delay (ns)
dod : direction of departure (nray x 2)
doa : direction of arrival (nray x 2)
"""
self.taud = tau
self.taue = np.zeros(len(tau))
# FUDsignal.__init__(self, x, y,taud)
self.dod = dod
self.doa = doa
# , Nf
# Nd x Nf x Np x Nu
self.label = label
self.win = 'rect'
self.isFriis = False
self.windowed = False
self.calibrated = False
self.filcal="calibration.mat"
bs.FUsignal.__init__(self,x=x,y=y,label='Channel')
def __repr__(self):
st = 'Tchannel : Ray transfer function (Nray x Nr x Nt x Nf)\n'
st = st+'-----------------------------------------------------\n'
st = st + 'freq : '+str(self.x[0])+' '+str(self.x[-1])+' '+str(len(self.x))+"\n"
st = st + 'shape : '+str(np.shape(self.y))+"\n"
st = st + 'tau (min, max) : '+str(min(self.taud))+' '+str(max(self.taud))+"\n"
st = st + 'dist (min,max) : '+str(min(0.3*self.taud))+' '+str(max(0.3*self.taud))+"\n"
if self.isFriis:
st = st + 'Friis factor -j c/(4 pi f) has been applied'
if self.calibrated:
st = st+'\n calibrated : Yes\n'
else:
st = st+'\n calibrated : No\n'
if self.windowed:
st = st+' windowed : Yes\n'
st = st+self.win+'\n'
else:
st = st+' windowed : No\n'
return(st)
return(st)
def saveh5(self,Lfilename,idx,a,b,Ta,Tb):
""" save Ctilde object in hdf5 format
Parameters
----------
Lfilename : string
Layout filename
Tilde
file identifier number
a : np.ndarray
postion of point a (transmitter)
b : np.ndarray
postion of point b (receiver)
Ta : np.ndarray
rotation matrice of antenna a
Tb : np.ndarray
rotation matrice of antenna b
"""
_Lfilename=Lfilename.split('.')[0]
filename= _Lfilename +'_' + str(idx).zfill(5) + '.h5'
filenameh5=pyu.getlong(filename,pstruc['DIRH'])
f=h5py.File(filenameh5,'w')
# try/except to avoid loosing the h5 file if
# read/write error
try:
f.attrs['a']=a
f.attrs['b']=b
f.attrs['Ta']=Ta
f.attrs['Tb']=Tb
# keys not saved as attribute of h5py file
for k,va in self.__dict__.items():
f.create_dataset(k,shape = np.shape(va),data=va)
f.close()
except:
f.close()
raise NameError('Channel Tchannel: issue when writting h5py file')
def loadh5(self,Lfilename,idx, output = True):
""" load Ctilde object in hdf5 format
Parameters
----------
Lfilename : string
Layout filename
idx : int
file identifier number
output : bool
return an output precised in return
Returns
-------
if output:
(a,b,Ta,Tb)
with
a = np.ndarray
position of point a (transmitter)
b = np.ndarray
position of point b (receiver)
Ta = np.ndarray
rotation matrice of antenna a
Tb = np.ndarray
rotation matrice of antenna b
"""
filename = Lfilename.split('.')[0] +'_' + str(idx).zfill(5) + '.h5'
filenameh5 = pyu.getlong(filename,pstruc['DIRH'])
f=h5py.File(filenameh5, 'r')
try:
# keys not saved as attribute of h5py file
for k,va in f.items():
# if k != 'tau1':
# setattr(self,str(k),va[:])
# else :
setattr(self,str(k),va)
a = f.attrs['a']
b = f.attrs['b']
Ta = f.attrs['Ta']
Tb = f.attrs['Tb']
f.close()
self.__init__(self.x, self.y, self.taud, self.dod, self.doa)
if output :
return a,b,Ta,Tb
except:
f.close()
raise NameError('Channel Tchannel: issue when reading h5py file')
def _saveh5(self,filenameh5,grpname):
""" save Tchannel object in hdf5 format compliant with Link Class
Parameters
----------
filenameh5 : str
file name of h5py file Link format
grpname : int
groupname in filenameh5
"""
filename=pyu.getlong(filenameh5,pstruc['DIRLNK'])
# try/except to avoid loosing the h5 file if
# read/write error
try:
fh5=h5py.File(filename,'a')
if not grpname in fh5['H'].keys():
fh5['H'].create_group(grpname)
else :
print('Warning : H/'+grpname +'already exists in '+filenameh5)
f=fh5['H/'+grpname]
for k,va in self.__dict__.items():
#print(k,va)
f.create_dataset(k,shape = np.shape(va),data=va)
fh5.close()
except:
fh5.close()
raise NameError('Channel Tchannel: issue when writting h5py file')
def _loadh5(self,filenameh5,grpname,**kwargs):
""" Load H object in hdf5 format compliant with Link Class
Parameters
----------
filenameh5 : str
file name of h5py file Link format
grpname : int
groupname in filenameh5
"""
filename=pyu.getlong(filenameh5,pstruc['DIRLNK'])
try:
fh5=h5py.File(filename,'r')
f = fh5['H/'+grpname]
# keys not saved as attribute of h5py file
for k,va in f.items():
if k !='isFriis':
try:
setattr(self,str(k),va[:])
except:
setattr(self,str(k),va)
else :
setattr(self,str(k),va)
fh5.close()
self.__init__(self.x, self.y, self.taud, self.dod, self.doa)
except:
fh5.close()
raise NameError('Channel Tchannel: issue when reading h5py file')
def apply(self, W=[]):
""" apply FUsignal W to the Tchannel
Parameters
----------
W : Bsignal.FUsignal
It exploits multigrid convolution from Bsignal.
Returns
-------
V : FUDAsignal
Notes
-----
Returns :math:`W(f) H_k(f)`
+ W may have a more important number of points and a smaller frequency band.
+ If the frequency band of the waveform exceeds the one of the
transmission channel, a warning is sent.
+ W is a FUsignal whose shape doesn't need to be homogeneous with FUChannel H
"""
if W!=[]:
U = W * self
else:
U = self
V = Tchannel(x= U.x, y = U.y, tau = self.taud, dod = self.dod, doa= self.doa)
return(V)
def applywav(self, Wgam=[]):
""" apply waveform (time domain ) to obtain the
rays impulses response
this is the 2015 vectorized method for applying
wav on Tchannel
Parameters
----------
Wgam : waveform
Returns
-------
rir : array,
impulse response for each ray separately
the size of the array is (nb_rays, support_length)
support_length is calculated in regard of the
delays of the channel
Notes
------
The overall received signal is built in time domain
Wgam is applied on each Ray Transfer function
See Also
--------
pylayers.signal.channel.rir
"""
# product in frequency domain between Channel (self) and waveform
Y = self.apply(Wgam)
# back in time domain
rir = Y.rir(Nz=500,ffts=1)
return rir
def getcir(self,BWGHz=1,Nf=40000,fftshift=False):
""" get the channel impulse response
Parameters
----------
BWGHz : Bandwidth
Nf : Number of frequency points
fftshift : boolean
See Also
--------
pylayers.simul.link.DLink.plt_cir
"""
fGHz = np.linspace(0,BWGHz,Nf)
dfGHz = fGHz[1]-fGHz[0]
tauns = np.linspace(0,1/dfGHz,Nf)
# E : r x nr x nt x f
E = np.exp(-2*1j*np.pi*self.taud[:,None,None,None]*fGHz[None,None,None,:])
# self.y : r x nr x nt x f
if self.y.shape[3]==E.shape[3]:
H = np.sum(E*self.y,axis=0)
else:
if self.y.shape[3]==1:
H = np.sum(E*self.y,axis=0)
else:
H = np.sum(E*self.y[:,:,:,0][:,:,:,None],axis=0)
# back in time - last axis is frequency (axis=2)
cir = np.fft.ifft(H,axis=2)
if fftshift:
cir = np.fft.fftshift(cir,axes=2)
tauns = np.linspace(-Nf/(2*BWGHz),Nf/(2*BWGHz)-1/BWGHz,Nf)
cir = bs.TUsignal(x=tauns,y=cir)
return(cir)
def get_cir(self,Wgam=[]):
""" get Channel impulse response of the channel
for a given waveform
Parameters
----------
Wgam : waveform
Returns
-------
ri : TUsignal
impulse response for each ray separately
See Also
--------
pylayers.antprop.channel.rir
"""
rir = self.applywav(Wgam)
cir = np.sum(rir.y,axis=0)
return bs.TUsignal(rir.x, cir)
def applywavC(self, w, dxw):
""" apply waveform method C
DEPRECATED
Parameters
----------
w :
waveform
dxw :
Notes
-----
The overall received signal is built in time domain
w is apply on the overall CIR
"""
print(DeprecationWarning(
'WARNING : Tchannel.applywavC is going to be replaced by Tchannel.applywav'))
H = self.H
h = H.ft1(500, 1)
dxh = h.dx()
if (abs(dxh - dxw) > 1e-10):
if (dxh < dxw):
# reinterpolate w
f = interp1d(w.x, w.y)
x_new = arange(w.x[0], w.x[-1], dxh)[0:-1]
y_new = f(x_new)
w = bs.TUsignal(x_new, y_new)
else:
# reinterpolate h
f = interp1d(h.x, h.y)
x_new = arange(h.x[0], h.x[-1], dxw)[0:-1]
y_new = f(x_new)
h = bs.TUsignal(x_new, y_new)
ri = h.convolve(w)
return(ri)
def baseband(self,**kwargs):
""" Channel transfer function in baseband
Parameters
----------
fcGHz : center frequency
WMHz : bandwidth in MHz
Nf : Number of frequency points
"""
defaults = {'fcGHz':4.5,
'WMHz':20,
'Nf':100}
for key, value in defaults.items():
if key not in kwargs:
kwargs[key] = value
fcGHz = kwargs['fcGHz']
WMHz = kwargs['WMHz']
Nf = kwargs['Nf']
# self.y : Nray x Nr x Nt x Nf
# self.taud : (,Nray)
# complex amplitude in baseband
# Nray x Nr x Nt x Nf1
abb = self.y*np.exp(-2 * 1j * np.pi *self.taud[:,None,None,None] * fcGHz )
fMHz = np.linspace(-WMHz/2.,WMHz/2,Nf)
E = np.exp(-2*1j*fMHz[None,None,None,:]*1e-3*self.taud[:,None,None,None])
y = np.sum(abb*E,axis=0)
H = bs.FUsignal(x=fMHz,y=y)
return(H)
def chantap(self,**kwargs):
""" channel tap
Parameters
----------
fcGHz : center frequency
WGHz : bandwidth
Ntap : int
"""
defaults = {'fcGHz':4.5,
'WGHz':1,
'Ntap':100}
for key, value in defaults.items():
if key not in kwargs:
kwargs[key] = value
fcGHz=kwargs['fcGHz']
WGHz=kwargs['WGHz']
Ntap=kwargs['Ntap']
# yb : tau x f x 1
yb = self.y[:,:,None]*np.exp(-2 * 1j * np.pi *self.taud[:,None,None] * fcGHz )
# l : 1 x 1 x tap
l = np.arange(Ntap)[None,None,:]
# l : tau x 1 x 1
tau = self.tau0[:,None,None]
# S : tau x f x tap
S = np.sinc(l-tau*WGHz)
# htap : f x tap
htap = np.sum(yb*S,axis=0)
htapi = np.sum(htap,axis=0)
return htapi
def applywavB(self, Wgam):
""" apply waveform method B (time domain )
DEPRECATED
Parameters
----------
Wgam : waveform
Returns
-------
ri : TUDsignal
impulse response for each ray separately
Notes
------
The overall received signal is built in time domain
Wgam is applied on each Ray Transfer function
See Also
--------
pylayers.signal.bsignal.TUDsignal.ft1
"""
print(DeprecationWarning(
'WARNING : Tchannel.applywavB is going to be replaced by Tchannel.applywav'))
# product in frequency domain between Channel (self) and waveform
Y = self.apply(Wgam)
# back in time domain
ri = Y.ft1(Nz=500,ffts=1)
return(ri)
def applywavA(self, Wgam, Tw):
""" apply waveform method A
DEPRECATED
Parameters
----------
Wgam :
Tw :
The overall received signal is built in frequency domain
See Also
--------
pylayers.signal.bsignal
"""
print(DeprecationWarning(
'WARNING : Tchannel.applywavA is going to be replaced by Tchannel.applywav'))
Hab = self.H.ft2(0.001)
HabW = Hab * Wgam
RI = HabW.symHz(10000)
ri = RI.ifft(0,'natural')
ri.translate(-Tw)
return(ri)
def plotd (self, d='doa', **kwargs):
"""plot direction of arrival and departure
Parameters
----------
d: 'doa' | 'dod'
display direction of departure | arrival
fig : plt.figure
ax : plt.axis
phi: tuple (-180, 180)
phi angle
normalize: bool
energy normalized
reverse : bool
inverse theta and phi represenation
polar : bool
polar representation
cmap: matplotlib.cmap
mode: 'center' | 'mean' | 'in'
see bsignal.energy
s : float
scatter dot size
fontsize: float
edgecolors: bool
colorbar: bool
title : bool
"""
defaults = {
'fig': [],
'ax': [],
'phi':(-180, 180),
'normalize':False,
'reverse' : True,
'cmap':plt.cm.hot_r,
'mode':'center',
's':30,
'fontsize':12,
'edgecolors':'none',
'b3d':False,
'polar':False,
'colorbar':False,
'title':False,
'xa':[],
'xb':[]
}
for key, value in defaults.items():
if key not in kwargs:
kwargs[key] = value
di = getattr(self, d, 'doa')
# remove non plt.scatter kwargs
phi = kwargs.pop('phi')
# b3d = kwargs.pop('b3d')
the = (0,180)
fontsize = kwargs.pop('fontsize')
polar = kwargs.pop('polar')
fig = kwargs.pop('fig')
ax = kwargs.pop('ax')
colorbar = kwargs.pop('colorbar')
reverse = kwargs.pop('reverse')
normalize = kwargs.pop('normalize')
mode =kwargs.pop('mode')
title =kwargs.pop('title')
xa = kwargs.pop('xa')
xb = kwargs.pop('xb')
if fig == []:
fig = plt.figure()
Etot = self.energy(mode=mode) + 1e-15
if normalize:
Emax = max(Etot)
Etot = Etot / Emax
#
# col = 1 - (10*log10(Etot)-Emin)/(Emax-Emin)
# WARNING polar plot require radian angles
#
if polar :
al = 1.
alb = 180. / np.pi
phi=np.array(phi)
the=np.array(the)
if reverse :
phi[0] = phi[0]*np.pi/180
phi[1] = phi[1]*np.pi/180
the[0] = the[0]
the[1] = the[1]
else :
phi[0] = phi[0]
phi[1] = phi[1]
the[0] = the[0]*np.pi/180
the[1] = the[1]*np.pi/180
else :
al = 180. / np.pi
alb = 180. / np.pi
col = 10 * np.log10(Etot)
kwargs['c'] = col
if len(col) != len(di):
print("len(col):", len(col))
print("len(di):", len(di))
if ax == []:
ax = fig.add_subplot(111, polar=polar)
if reverse :
scat = ax.scatter(di[:, 1] * al, di[:, 0] * alb, **kwargs)
ax.axis((phi[0], phi[1], the[0], the[1]))
ax.set_xlabel('$\phi(^{\circ})$', fontsize=fontsize)
ax.set_ylabel("$\\theta_t(^{\circ})$", fontsize=fontsize)
else:
scat = ax.scatter(di[:, 0] * al, di[:, 1] * alb, **kwargs)
ax.axis((the[0], the[1], phi[0], phi[1]))
ax.set_xlabel("$\\theta_t(^{\circ})$", fontsize=fontsize)
ax.set_ylabel('$\phi(^{\circ})$', fontsize=fontsize)
if title:
ax.set_title(d, fontsize=fontsize+2)
if colorbar:
b = plt.colorbar(scat,cax=ax)
if normalize:
b.set_label('dB')
else:
b.set_label('Path Loss (dB)')
for t in b.ax.get_yticklabels():
t.set_fontsize(fontsize)
return (fig, ax)
def plotad(self,a='phi', **kwargs):
"""plot angular delays
Parameters
----------
d: 'doa' | 'dod'
display direction of departure | arrival
typ : 'ns' | 'm'
display delays in nano seconds ( ns) or meter (m)
fig : plt.figure
ax : plt.axis
a : str
angle 'theta' | 'phi'
normalize: bool
energy normalized
reverse : bool
inverse theta and phi represenation
polar : bool
polar representation
cmap: matplotlib.cmap
mode: 'center' | 'mean' | 'in'
see bsignal.energy
s : float
scatter dot size
fontsize: float
edgecolors: bool
colorbar: bool
titel : bool
'clipval': float
remove values below clipval in dB
"""
defaults = { 'fig': [],
'ax': [],
'normalize':False,
'cmap':plt.cm.hot_r,
'mode':'center',
's':30,
'fontsize':12,
'edgecolors':'none',
'polar':False,
'colorbar':False,
'taumin':[],
'taumax':[],
'typ':'m',
'title':False,
'clipval': -2500,
'd':'doa'
}
for key, value in defaults.items():
if key not in kwargs:
kwargs[key] = value
# remove non plt.scatter kwargs
fontsize = kwargs.pop('fontsize')
polar = kwargs.pop('polar')
fig = kwargs.pop('fig')
ax = kwargs.pop('ax')
colorbar = kwargs.pop('colorbar')
normalize = kwargs.pop('normalize')
mode =kwargs.pop('mode')
dmin = kwargs.pop('taumin')
dmax = kwargs.pop('taumax')
title = kwargs.pop('title')
typ = kwargs.pop('typ')
clipval = kwargs.pop('clipval')
do = kwargs.pop('d')
if fig == []:
fig = plt.figure()
if do=='doa':
di = self.doa
elif do=='dod':
di = self.dod
if a == 'theta':
ang = np.array((0,180))
else :
ang = np.array((-180,180))
delay = self.taud
if typ =='m':
delay = delay*0.3
if dmin == []:
dmin = 0.#min(delay)
if dmax == []:
dmax= max(delay)
Etot = self.energy(mode=mode) + 1e-15
if normalize:
Emax = max(Etot)
Etot = Etot / Emax
#
#
#
# col = 1 - (10*log10(Etot)-Emin)/(Emax-Emin)
# WARNING polar plot require radian angles
#
#
if polar :
al = 1.
else :
al = 180. / np.pi
col = 10 * np.log10(Etot)
cv = np.where(col >= clipval)[0]
kwargs['c'] = col[cv]
if len(col) != len(di):
print("len(col):", len(col))
print("len(di):", len(dir))
if ax == []:
ax = fig.add_subplot(111, polar=polar)
if a == 'phi':
scat = ax.scatter(di[cv, 1] * al, delay[cv], **kwargs)
ax.axis((ang[0], ang[1], dmin, dmax))
ax.set_xlabel(r"$\phi(^{\circ})$", fontsize=fontsize)
if typ == 'm' :
ax.set_ylabel("distance (m)", fontsize=fontsize-2)
else :
ax.set_ylabel(r"$\phi(^{\circ})$", fontsize=fontsize-2)
elif a == 'theta':
scat = ax.scatter(di[cv, 0] * al, delay[cv], **kwargs)
ax.axis((ang[0], ang[1], dmin,dmax))
ax.set_xlabel(r"$\\theta_t(^{\circ})$", fontsize=fontsize)
if typ == 'm' :
ax.set_ylabel("distance (m)", fontsize=fontsize-2)
else :
ax.set_ylabel(r"$\phi(^{\circ})$", fontsize=fontsize-2)
if title :
ax.set_title('DoA vs delay (ns)', fontsize=fontsize+2)
if colorbar:
b=fig.colorbar(scat)
if normalize:
b.set_label('dB')
else:
b.set_label('Path Loss (dB)')
return (fig, ax)
def doadod(self, **kwargs):
""" doadod scatter plot
Parameters
----------
phi: tuple (-180, 180)
phi angle
normalize: bool
energy normalized
reverse : bool
inverse theta and phi represenation
polar : bool
polar representation
cmap: matplotlib.cmap
mode: 'center' | 'mean' | 'in'
see bsignal.energy
s : float
scatter dot size
fontsize: float
edgecolors: bool
colorbar bool
Summary
--------
scatter plot of the DoA-DoD channel structure
the energy is colorcoded over all couples of DoA-DoD
"""
defaults = {
'phi':(-180, 180),
'normalize':False,
'reverse' : True,
'cmap':plt.cm.hot_r,
'mode':'center',
's':30,
'fontsize':12,
'edgecolors':'none',
'polar':False,
'mode':'mean',
'b3d':False,
'xa':0,
'xb':0
}
fig = plt.figure()
for key, value in defaults.items():
if key not in kwargs:
kwargs[key] = value
ax1 = fig.add_subplot(121,polar=kwargs['polar'])
ax2 = fig.add_subplot(122,polar=kwargs['polar'])
if kwargs['xa']<kwargs['xb']:
fig,ax = self.plotd(d='dod',fig=fig,ax=ax1,**kwargs)
fig,ax = self.plotd(d='doa',fig=fig,ax=ax2,**kwargs)
else:
fig,ax = self.plotd(d='doa',fig=fig,ax=ax1,**kwargs)
fig,ax = self.plotd(d='dod',fig=fig,ax=ax2,**kwargs)
return fig,ax
def field(self):
tau = self.tau[:,None,None,None]
fGHz = self.x[None,None,None,:]
E = np.exp(-2*1j*tau*fGHz)
F = self.y*E
return np.sum(F,axis=0)
#f = bs.FUsignal(x=self.x,y=np.sum(F,axis=0))
#return(f)
def energy(self,mode='mean',sumray=False):
""" calculates channel energy including antennas spatial filtering
Parameters
----------
mode : string
center | mean | integ (different manner to get the value)
Friis : boolean
apply the Frris coeff(2/(4p pi f)
sumray: boolean
ray energy cummulation indicator
"""
#
# r x f
# axis 1 : ray
# axis 1 : frequency
#
if self.isFriis:
Etot = bs.FUsignal.energy(self,axis=1,mode=mode,Friis=False)
else:
Etot = bs.FUsignal.energy(self,axis=1,mode=mode,Friis=True)
if sumray:
Etot = np.sum(Etot,axis=0)
return Etot
def wavefig(self, w, Nray=5):
""" display
Parameters
----------
w : waveform
Nray : int
number of rays to be displayed
"""
# Construire W
W = w.ft()
# Appliquer W
Y = self.apply(W)
# r.require('graphics')
# r.postscript('fig.eps')
# r('par(mfrow=c(2,2))')
# Y.fig(Nray)
y = Y.iftd(100, 0, 50, 0)
y.fig(Nray)
# r.dev_off()
# os.system("gv fig.eps ")
# y.fidec()
# Sur le FUsignal retourn
# A gauche afficher le signal sur chaque rayon
# A droite le meme signal decal
# En bas a droite le signal resultant
def rayfig(self, k, W, col='red'):
""" build a figure with rays
Parameters
----------
k : ray index
W : waveform (FUsignal)
Notes
-----
W is apply on k-th ray and the received signal is built in time domain
"""
# get the kth Ray Transfer function
Hk = bs.FUDsignal(self.H.x, self.H.y[k,:])
dxh = Hk.dx()
dxw = W.dx()
w0 = W.x[0] # fmin W
hk0 = Hk.x[0] # fmin Hk
# on s'arrange pour que hk0 soit egal a w0 (ou hk0 soit legerement inferieur a w0)
if w0 < hk0:
np = ceil((hk0 - w0) / dxh)
hk0_new = hk0 - np * dxh
x = arange(hk0_new, hk0 + dxh, dxh)[0:-1]
Hk.x = hstack((x, Hk.x))
Hk.y = hstack((zeros(np), Hk.y))
if (abs(dxh - dxw) > 1e-10):
if (dxh < dxw):
# reinterpolate w
print(" resampling w")
x_new = arange(W.x[0], W.x[-1] + dxh, dxh)[0:-1]
Wk = W.resample(x_new)
dx = dxh
else:
# reinterpolate h
print(" resampling h")
x_new = arange(Hk.x[0], Hk.x[-1] + dxw, dxw)[0:-1]
Hk = Hk.resample(x_new)
dx = dxw
Wk = W
# qHk.x[0]==Wk.x[0]
def rssi(self,ufreq=0) :
""" Compute RSSI value for a frequency index
Parameters
----------
ufreq : int
index in the frequency range
Returns
-------
RSSI: float
RSSI value in dB
Notes
-----
This function will be deprecated by energy function
"""
# Amplitude
Ak = self.y[:, ufreq]
# Power
Pr = np.sum(Ak*np.conj(Ak))
# Complex amplitude
akp = Ak*np.exp(-2*1j*np.pi*self.x[ufreq]*self.taud)
Prp = np.abs(np.sum(akp))**2
PrdB = 10*np.log10(Pr)
PrpdB = 10*np.log10(Prp)
return PrdB,PrpdB
def cut(self,threshold=0.99):
""" cut the signal at an Energy threshold level
Parameters
----------
threshold : float
default 0.99
"""
self.sort(typ='energy')
E = self.eprfl()
cumE = np.cumsum(E)/sum(E)
v = np.where(cumE[0,:]<threshold)[0]
self.taud = self.taud[v]
self.taue = self.taue[v]
#self.tau = self.tau[v]
self.doa = self.doa[v,:]
self.dod = self.dod[v,:]
self.y = self.y[v,...]
def sort(self,typ='tau'):
""" sort FUD signal
Parameters
----------
typ : string
which parameter to sort '
'tau' : (default)
'energy'
"""
if typ == 'tau':
u = np.argsort(self.taud+self.taue)
if typ == 'energy':
E = self.eprfl()
u = np.argsort(E,axis=0)[::-1]
u = u[:,0,0]
self.taud = self.taud[u]
self.taue = self.taue[u]
self.doa = self.doa[u]
self.dod = self.dod[u]
self.y = self.y[u,...]
return(u)
def showtap(self,**kwargs):
""" show tap
Parameters
----------
same as tap
See Also
--------
tap
"""
# f x s x m x tap
htap = self.tap(**kwargs)
# sum over time m
Et_htap = np.sqrt(np.sum(htap*np.conj(htap),axis=i-1))/Nm
# sum over s
Er_htap = np.sum(htap,axis=1)/Ns
corrtap = correlate(Er_htap[0,:,0],np.conj(Er_htap[0,:,0]))
def tap(self,**kwargs):
""" calculate channel tap
Parameters
----------
fcGHz : float
center frequency
WMHz : float
bandwidth
Ntap : int
number of taps (related to bandwith)
as the bandwith increases the potential number of taps increases
Ns : int
number of spatial realizations
Nm : int
number of time samples
the channel is sampled along a distance of half a wavelength
Va : velocity of link termination a
Vb : velocity of link termination b
theta_va : float
theta velocity termination a (in radians)
phi_va :
phi velocity termination a (in radians)
theta_vb:
theta velocity termination b (in radians)
phi_vb :
phi velocity termination b (in radians)
Examples
--------
>>> from pylayers.signal.bsignal import *
"""
defaults = {'fcGHz':4.5,
'WMHz':1,
'Ntap':3,
'Ns':8,
'Nm':10,
'Va':1, #meter/s
'Vb':1, #meter/s
'theta_va':0,
'phi_va':0,
'theta_vb':0,
'phi_vb':0 }
for key, value in defaults.items():
if key not in kwargs:
kwargs[key] = value
fcGHz=kwargs['fcGHz']
WMHz=kwargs['WMHz']
Ntap=kwargs['Ntap']
Ns=kwargs['Ns']
Nm=kwargs['Nm']
Va = kwargs['Va']
Vb = kwargs['Vb']
# direction of link termination velocity vectors
theta_va = kwargs['theta_va']
theta_vb = kwargs['theta_vb']
phi_va = kwargs['phi_va']
phi_vb = kwargs['phi_vb']
Nf = len(self.x)
mmax = 0.3*WMHz*1e6/(2*fcGHz*(Va+Vb))
lam = 0.3/fcGHz
lamo2 = lam/2.
fmaHz = (Va/0.3)*fcGHz
fmbHz = (Vb/0.3)*fcGHz
# Coherence Time
Tca = 9/(14*np.pi*fmaHz)
Tcb = 9/(14*np.pi*fmbHz)
Tc = 9/(14*np.pi*(fmaHz+fmbHz))
# DoD DoA
theta_a = self.dod[:,0]
phi_a = self.dod[:,1]
theta_b = self.doa[:,0]
phi_b = self.doa[:,1]
# 3 x r
ska = np.array([np.cos(theta_a)*np.cos(phi_a),np.cos(theta_a)*np.sin(phi_a),np.sin(theta_a)])
skb = np.array([np.cos(theta_b)*np.cos(phi_b),np.cos(theta_b)*np.sin(phi_b),np.sin(theta_b)])
# Monte Carlo for spatial realization
# s x m x tap
ua0 = (np.cos(theta_va)+1)/2
va0 = phi_va/(2*np.pi)
ub0 = (np.cos(theta_vb)+1)/2
vb0 = phi_vb/(2*np.pi)
# standard deviation of velocity vector orientation is inversely
# proportional to velocity magnitude
ua = (((1/(Va+0.1))*np.random.rand(Ns)+ua0)%1)[:,None,None]
va = (((1/(Va+0.1))*np.random.rand(Ns)+va0)%1)[:,None,None]
ub = (((1/(Vb+0.1))*np.random.rand(Ns)+ub0)%1)[:,None,None]
vb = (((1/(Vb+0.1))*np.random.rand(Ns)+vb0)%1)[:,None,None]
# uniform sampling over the sphere
tha = np.arccos(2*va-1)
pha = 2*np.pi*ua
thb = np.arccos(2*vb-1)
phb = 2*np.pi*ub
vax = np.cos(tha)*np.cos(pha)
vay = np.cos(tha)*np.sin(pha)
vaz = np.sin(tha)*np.cos(pha*0)
vaxy = np.concatenate([vax[None,None,None,...],vay[None,None,None,...]])
va = np.concatenate([vaxy,vaz[None,None,None,...]])
vbx = np.cos(thb)*np.cos(phb)
vby = np.cos(thb)*np.sin(phb)
vbz = np.sin(thb)*np.cos(phb*0)
vbxy = np.concatenate([vbx[None,None,None,...],vby[None,None,None,...]])
# 3 x r x f x s x m x tap
vb = np.concatenate([vbxy,vbz[None,None,None,...]])
# beta : r x f x s x m x tap
betaa = np.sum(ska[:,:,None,None,None,None]*va,axis=0)
betab = np.sum(skb[:,:,None,None,None,None]*vb,axis=0)
# m discrete time axis
# r x f x s x m x tap
m = np.linspace(0,mmax,Nm)[None,None,None,:,None]
# r x f x s x m x tap
l = np.arange(Ntap)[None,None,None,None,:]
# l : r x f x s x m x tap
tau = self.taud[:,None,None,None,None]+ \
self.taue[:,None,None,None,None]
ba = betaa*Va*m/(0.3*WMHz*1e6)
bb = betab*Vb*m/(0.3*WMHz*1e6)
tau2 = tau + ba + bb
# S : r x f x s x m x tap (form 2.34 [D. Tse])
S = np.sinc(l-tau2*WMHz/1000.)
# sum over r : f x s x m x tap
htap = np.sum(S*self.y[...,None,None,None]*np.exp(-2*1j*np.pi*fcGHz*tau2),axis=0)
# f x s x m x tap
htap = htap.reshape(Nf,Ns,Nm,Ntap)
Et_htap = np.sqrt(np.sum(htap*np.conj(htap),axis=2))/Nm
Er_htap = np.sum(htap,axis=1)/Ns
corrtap = correlate(Er_htap[0,:,0],np.conj(Er_htap[0,:,0]))
return(htap,Et_htap,Er_htap,corrtap)
# def minphas(self):
# """ construct a minimal phase FUsignal
# - Evaluate slope of the phase
# - deduce delay
# - update delay of FUDSignal
# - Compensation of phase slope to obtain minimal phase
# This methods updates the excess delay taue member.
# The samplinf frequency step should be
# # Examples
# # --------
# # .. plot::
# # :include-source:
# # >>> from pylayers.signal.bsignal import *
# # >>> import numpy as np
# # >>> fGHz = np.arange(2,11,0.1)
# # >>> tau1 = np.array([1,2,3])[:,None]
# # >>> y = np.exp(-2*1j*np.pi*fGHz[None,:]*tau1)/fGHz[None,:]
# # >>> H = Tchannel(x=fGHz,y=y,tau=np.array([15,17,18]))
# # >>> f,a = H.plot(typ=['ru'],xlabels=['Frequency GHz'])
# # >>> t1 = plt.suptitle('Before minimal phase compensation')
# # >>> H.minphas()
# # >>> H.taue
# # array([ 1., 2., 3.])
# # >>> f,a = H.plot(typ=['ru'],xlabels=['Frequency GHz'])
# # >>> t2 = plt.suptitle('After minimal phase compensation')
# """
# f = self.x
# phase = np.unwrap(np.angle(self.y))
# dphi = phase[:, -1] - phase[:, 0]
# df = self.x[-1] - self.x[0]
# slope = dphi / df
# #if slope >0:
# # print 'm inphas Warning : non causal FUSignal'
# #phi0 = +1j*slope*(f[-1]+f[0]/2)
# F, S = np.meshgrid(f, slope)
# #E = exp(-1j*slope*f+phi0)
# E = np.exp(-1j * S * F)
# self.y = self.y * E
# self.taue = -slope / (2 * np.pi)
# # update total delay
# #self.tau = self.tau+self.taue
def ifft(self):
""" inverse Fourier Transform
Examples
--------
>>> from pylayers.simul.link import *
>>> L = DLink(verbose=False)
>>> aktk = L.eval(force=True)
>>> L.H.cut()
>>> #T1 = L.H.totime()
>>> #f,a = T1.plot(typ='v')
>>> #L.H.minphas()
>>> #T2 = L.H.totime()
>>> #f,a = T2.plot(typ='v')
"""
y = fft.ifft(self.y)
T = 1/(self.x[1]-self.x[0])
x = np.linspace(0,T,len(self.x))
h = TUDchannel(x,y,self.taud,self.taue)
return(h)
def totime(self, Nz=1, ffts=0):
""" transform to TUDchannel
Parameters
----------
Nz : int
Number of zeros for zero padding
ffts : nt
fftshift indicator (default 0 )
Examples
--------
>>> #from pylayers.simul.link import *
>>> #L = DLink(verbose=False)
>>> #aktk = L.eval()
>>> #L.H.cut()
>>> #T1 = L.H.totime()
>>> #f,a = T1.plot(typ='v')
>>> #L.H.minphas()
>>> #T2 = L.H.totime()
>>> #f,a = T2.plot(typ='v')
See Also
--------
FUsignal.ift
"""
Nray = len(self.taud)
s = self.ift(Nz, ffts)
sy_shifted = fft.fftshift(s.y,axes=-1)
h = TUDchannel(s.x, sy_shifted, self.taud,self.taue)
return(h)
def iftd(self, Nz=1, tstart=-10, tstop=100, ffts=0):
""" time pasting
Parameters
----------
Nz : int
Number of zeros
tstart : float
tstop : float
ffts : int
fftshift indicator
Returns
-------
rf : TUsignal (1,N)
See Also
--------
TUsignal.translate
Examples
--------
"""
tau = self.taud+self.taue
Nray = len(tau)
s = self.ift(Nz, ffts)
x = s.x
dx = s.dx()
x_new = np.arange(tstart, tstop, dx)
yini = np.zeros((Nray, len(x_new)))
rf = bs.TUsignal(x_new, yini)
#
# initializes a void signal
#
for i in range(Nray):
r = bs.TUsignal(x_new, np.zeros(len(x_new)))
si = bs.TUsignal(x, s.y[i, :])
si.translate(tau[i])
r = r + si
rf.y[i, :] = r.y
return rf
def rir(self, Nz, ffts=0):
""" construct ray impulse response
Parameters
----------
Nz : number of zeros for zero padding
ffts : fftshift indicator
0 no fftshift
1 apply fftshift
Returns
-------
rir : TUsignal
See Also
--------
pylayers.signal.bsignal.
"""
tau = self.taud + self.taue
taumin = min(tau)
taumax = max(tau)
dtau = (taumax-taumin)
self.s = self.ift(Nz, ffts)
t0 = self.s.x[0]
te = self.s.x[-1]
shy = self.s.y.shape
dx = self.s.x[1]-self.s.x[0]
# Delta Tau + Npoints
N = np.ceil(dtau/dx)+shy[-1]
# convert tau in an integer offset
# taumin ray is not shifted
itau = np.floor((tau-taumin)/dx).astype(int)
U = np.ones((shy[0],shy[-1]),dtype=int)
CU = np.cumsum(U,axis=1)-1 #-1 to start @ value 0
rir = np.zeros((shy[0],N))
col1 = np.repeat(np.arange(shy[0],dtype=int),shy[-1])
col2 = (CU+itau[:,None]).ravel()
index = np.vstack((col1,col2)).T
rir[index[:,0],index[:,1]] = self.s.y.ravel()
t = np.linspace(t0+taumin,te+taumax,N)
return bs.TUsignal(x=t, y=rir)
def ft1(self, Nz, ffts=0):
""" construct CIR from ifft(RTF)
Parameters
----------
Nz : number of zeros for zero padding
ffts : fftshift indicator
0 no fftshift
1 apply fftshift
Returns
-------
r : TUsignal
See Also
--------
pylayers.signal.bsignal.
"""
tau = self.taud + self.taue
self.s = self.ift(Nz, ffts)
x = self.s.x
r = bs.TUsignal(x=x, y=np.zeros(self.s.y.shape[1:]))
if len(tau) == 1:
return(self.s)
else:
for i in range(len(tau)):
si = bs.TUsignal(self.s.x, self.s.y[i, :])
si.translate(tau[i])
r = r + si
return r
def ftau(self, Nz=0, k=0, ffts=0):
""" time superposition
Parameters
----------
Nz : number of zeros for zero padding
k : starting index
ffts = 0 no fftshift
ffts = 1 apply fftshift
Returns
-------
r : TUsignal
"""
tau = self.taud + self.taue
s = self.ift(Nz, ffts)
x = s.x
r = bs.TUsignal(x, np.zeros(len(x)))
si = bs.TUsignal(s.x, s.y[k, :])
si.translate(tau[k])
r = r + si
return r
def plot3d(self,fig=[],ax=[]):
""" plot in 3D
Examples
--------
.. plot::
:include-source:
>>> from pylayers.signal.bsignal import *
>>> import numpy as np
>>> N = 20
>>> fGHz = np.arange(1,3,1)
>>> taud = np.sort(np.random.rand(N))
>>> alpha = np.random.rand(N,len(fGHz))
>>> #s = Tchannel(x=fGHz,y=alpha,tau=taud)
>>> #s.plot3d()
"""
Ntau = np.shape(self.y)[0]
Nf = np.shape(self.y)[1]
if fig==[]:
fig = plt.figure()
if ax == []:
ax = fig.add_subplot(111, projection = '3d')
for k,f in enumerate(self.x):
for i,j in zip(self.taud+self.taue,abs(self.y[:,k])):
ax.plot([i,i],[f,f],[0,j],color= 'k')
ax.set_xlabel('Delay (ns)')
ax.set_xlim3d(0,max(self.taud+self.taue))
ax.set_ylabel('Frequency (fGHz)')
ax.set_ylim3d(self.x[0],self.x[-1])
powermin = abs(self.y).min()
powermax = abs(self.y).max()
ax.set_zlabel('Power (linear)')
ax.set_zlim3d(powermin,powermax)
def ft2(self, df=0.01):
""" build channel transfer function (frequency domain)
Parameters
----------
df : float
frequency step (default 0.01)
Notes
-----
1. get fmin and fmax
2. build a new base with frequency step df
3. Initialize a FUsignal with the new frequency base
4. build matrix tau * f (Nray x Nf)
5. buildl matrix E= exp(-2 j pi f tau)
6. resampling of FUDsignal according to f --> S
7. apply the element wise product E .* S
8. add all rays
"""
fmin = self.x[0]
fmax = self.x[-1]
tau = self.taud+self.taue
f = np.arange(fmin, fmax, df)
U = bs.FUsignal(f, np.zeros(len(f)))
TAUF = np.outer(tau, f)
E = np.exp(-2 * 1j * np.pi * TAUF)
S = self.resample(f)
ES = E * S.y
V = sum(ES, axis=0)
U.y = V
return U
def frombuf(self,S,sign=-1):
""" load a buffer from vna
Parameters
----------
S : buffer
sign : int (+1 |-1) for complex reconstruction
"""
N = len(self.x)
u = np.arange(0,N)*2
v = np.arange(0,N)*2+1
S21 = (S[u]+sign*1j*S[v]).reshape((1,N))
self.y = S21
def capacity(self,Pt,T=290,mode='blast'):
""" calculates channel Shannon capacity (no csi)
Parameters
----------
Pt : Power transmitted
T : Temperature (Kelvin)
mode : string
Returns
-------
C : Channel capacity (bit/s)
"""
kB = 1.3806488e-23
N0 = kB*T
dfGHz = self.x[1]-self.x[0]
BGHz = self.x[-1]-self.x[0]
Pb = N0*BGHz*1e9
H2 = self.y*np.conj(self.y)
snr = Pt[:,None]*H2[None,:]/Pb
c = np.log(1+snr)/np.log(2)
C = np.sum(c,axis=1)*dfGHz
SNR = np.sum(snr,axis=1)*dfGHz
return(C,SNR)
def calibrate(self,filecal='calibration.mat',conjugate=False):
""" calibrate data
Parameters
----------
filecal : string
calibration file name "calibration.mat"
conjugate : boolean
default False
"""
self.filecal = filecal
Hcal = Tchannel()
Hcal.load(filecal)
assert (len(self.x) == len(Hcal.x)),"calibration file has not the same number of points"
if not self.calibrated:
if not(conjugate):
self.y = self.y/Hcal.y
else:
self.y = self.y/np.conj(Hcal.y)
self.calibrated = not self.calibrated
else:
if not(conjugate):
self.y = self.y*Hcal.y
else:
self.y = self.y*np.conj(Hcal.y)
self.calibrated = not self.calibrated
# def pdp(self,win='hamming',calibrate=True):
# """ calculates power delay profile
# Parameters
# ----------
# win : string
# window name
# """
# self.win = win
# if calibrate and not self.calibrated:
# self.calibrate()
# if not self.windowed:
# self.window(win=win)
# # inverse Fourier transform
# pdp = self.ift(ffts=1)
# return pdp
class Ctilde(PyLayers):
""" container for the 4 components of the polarimetric ray channel
Attributes
----------
Ctt : bsignal.FUsignal
Ctp : bsignal.FUsignal
Cpt : bsignal.FUsignal
Cpp : bsignal.FUsignal
tauk : ndarray delays
tang : ndarray angles of departure
rang : ndarray angles of arrival
tangl : ndarray angles of departure (local)
rangl : ndarray angles of arrival (local)
fGHz : np.array
frequency array
nfreq : int
number of frequency point
nray : int
number of rays
Methods
-------
choose
load
mobility
doadod
show
energy
sort
prop2tran
"""
def __init__(self):
""" class constructor
Notes
-----
transpose == False (r,f)
transpose == True (f,r)
A Ctilde object can be :
+ returned from eval method of a Rays object.
+ generated from a statistical model of the propagation channel
"""
# by default C is expressed between the global frames
self.islocal = False
# by default antenna rotation matrices are identity
self.Ta = np.eye(3)
self.Tb = np.eye(3)
self.fGHz = np.array([2.4])
# a single ray
self.nray = 1
self.Ctt = bs.FUsignal(x=self.fGHz,y=np.array([[1]]))
self.Ctp = bs.FUsignal(x=self.fGHz,y=np.array([[0]]))
self.Cpt = bs.FUsignal(x=self.fGHz,y=np.array([[0]]))
self.Cpp = bs.FUsignal(x=self.fGHz,y=np.array([[1]]))
#
self.tang = np.array([[np.pi/2,np.pi/2]])
self.rang = np.array([[np.pi/2,3*np.pi/2]])
#
self.tangl = np.array([[np.pi/2,np.pi/2]])
self.rangl = np.array([[np.pi/2,3*np.pi/2]])
def __repr__(self):
s = 'Ctilde : Ray Propagation Channel Tensor (2x2xrxf)'+'\n---------\n'
if self.islocal:
s = s + 'between antennas local frames\n'
else:
s = s + 'between termination global frames\n'
s = s + 'Nray : ' + str(self.nray)+'\n'
if self.Cpp.x[0]!=self.Cpp.x[-1]:
s = s + 'Nfreq : ' + str(len(self.Cpp.x))+'\n'
s = s + 'fmin(GHz) : ' + str(self.Cpp.x[0])+'\n'
s = s + 'fmax(GHz): ' + str(self.Cpp.x[-1])+'\n'
else:
s = s + 'fGHz : ' + str(self.Cpp.x[0])+'\n'
s = s + '---1st ray 1st freq ---\n'
s = s + 'global angles (th,ph) degrees : \n'
s = s + str(np.round(self.tang[0,:]*1800/np.pi)/10.)+'\n'
s = s + str(np.round(self.rang[0,:]*1800/np.pi)/10.)+'\n'
s = s + 'local angles (th,ph) degrees : \n'
s = s + str(np.round(self.tangl[0,:]*1800/np.pi)/10.)+'\n'
s = s + str(np.round(self.rangl[0,:]*1800/np.pi)/10.)+'\n'
s = s + ' | '+ str(self.Ctt.y[0,0])+' '+str(self.Ctp.y[0,0])+' |\n'
s = s + ' | '+ str(self.Cpt.y[0,0])+' '+str(self.Cpp.y[0,0])+' |\n'
return(s)
def inforay(self,iray,ifreq=0):
""" provide information about a specific ray
"""
dray = self.tauk[iray]*0.3
draydB = 20*np.log10(1./dray)
Ctt = self.Ctt.y[iray,ifreq]
Ctp = self.Ctp.y[iray,ifreq]
Cpt = self.Cpt.y[iray,ifreq]
Cpp = self.Cpp.y[iray,ifreq]
Cttc = Ctt*dray
Ctpc = Ctp*dray
Cppc = Cpp*dray
Cptc = Cpt*dray
if self.islocal:
print("between local frames")
print("--------------------")
else:
print("between global frames")
print("--------------------")
print('distance losses',draydB)
if (np.abs(Cttc)!=0):
CttdB = 20*np.log10(np.abs(Ctt))
CttcdB = 20*np.log10(np.abs(Cttc))
else:
CttdB = -np.inf
CttcdB = -np.inf
if (np.abs(Cppc)!=0):
CppdB = 20*np.log10(np.abs(Cpp))
CppcdB = 20*np.log10(np.abs(Cppc))
else:
CppdB = -np.inf
CppcdB = -np.inf
if (np.abs(Ctpc)!=0):
CtpdB = 20*np.log10(np.abs(Ctp))
CtpcdB =20*np.log10(np.abs(Ctpc))
else:
CtpdB = -np.inf
CtpcdB = -np.inf
if (np.abs(Cptc)!=0):
CptdB = 20*np.log10(np.abs(Cpt))
CptcdB = 20*np.log10(np.abs(Cptc))
else:
CptdB = -np.inf
CptcdB = -np.inf
print('Without distance losses (Interactions only)')
print("-----------------------------------------------")
print('co-pol (tt,pp) dB :',CttcdB,CppcdB)
print('cross-pol (tt,pp) dB :',CtpcdB,CptcdB)
print('With distance losses (Interactions + distance)')
print("-----------------------------------------------")
print('co-pol (tt,pp) dB :',CttdB,CppdB)
print('cross-pol (tp,pt) dB :',CtpdB,CptdB)
def saveh5(self,Lfilename,idx,a,b):
""" save Ctilde object in hdf5 format
Parameters
----------
Lfilename : string
Layout filename
idx : int
file identifier number
a : np.ndarray
postion of point a (transmitter)
b : np.ndarray
postion of point b (receiver)
"""
Lfilename=Lfilename.split('.')[0]
_filename= Lfilename +'_' + str(idx).zfill(5) + '.hdf5'
filename=pyu.getlong(_filename,pstruc['DIRCT'])
# save channel in global basis
# new call to locbas
if self.islocal:
self.locbas()
# try/except to avoid loosing the h5 file if
# read/write error
try:
f=h5py.File(filename,'w')
f.create_dataset('Ta',shape=np.shape(self.Ta),data=self.Ta)
f.create_dataset('Tb',shape=np.shape(self.Tb),data=self.Tb)
f.create_dataset('tang',shape=np.shape(self.tang),data=self.tang)
f.create_dataset('rang',shape=np.shape(self.rang),data=self.rang)
f.create_dataset('tauk',shape=np.shape(self.tauk),data=self.tauk)
f.create_dataset('fGHz',shape=np.shape(self.fGHz),data=self.fGHz)
f.create_dataset('Ctt_y',shape=np.shape(self.Ctt.y),data=self.Ctt.y)
f.create_dataset('Cpp_y',shape=np.shape(self.Cpp.y),data=self.Cpp.y)
f.create_dataset('Cpt_y',shape=np.shape(self.Cpt.y),data=self.Cpt.y)
f.create_dataset('Ctp_y',shape=np.shape(self.Ctp.y),data=self.Ctp.y)
f.create_dataset('Tx',shape=np.shape(a),data=a)
f.create_dataset('Rx',shape=np.shape(b),data=b)
f.close()
except:
f.close()
raise NameError('Channel.Ctilde: issue when writting h5py file')
def loadh5(self,Lfilename,idx,output=True):
""" load Ctilde object in hdf5 format
Parameters
----------
Lfilename : string
Layout filename
idx : int
file identifier number
output : bool
return an output precised in return
Returns
-------
if output:
(Layout filename , Tx position, Rx position)
"""
_Lfilename=Lfilename.split('.')[0]
_filename= _Lfilename +'_' + str(idx).zfill(5) + '.hdf5'
filename=pyu.getlong(_filename,pstruc['DIRCT'])
try:
f=h5py.File(filename,'r')
self.fGHz = f['fGHz'][:]
self.tang = f['tang'][:]
self.rang = f['rang'][:]
self.tauk = f['tauk'][:]
self.Ta = f['Ta'][:]
self.Tb = f['Tb'][:]
Ctt = f['Ctt_y'][:]
Cpp = f['Cpp_y'][:]
Ctp = f['Ctp_y'][:]
Cpt = f['Cpt_y'][:]
self.Ctt = bs.FUsignal(self.fGHz, Ctt)
self.Ctp = bs.FUsignal(self.fGHz, Ctp)
self.Cpt = bs.FUsignal(self.fGHz, Cpt)
self.Cpp = bs.FUsignal(self.fGHz, Cpp)
tx = f['Tx'][:]
rx = f['Rx'][:]
self.nfreq = len(self.fGHz)
self.nray = np.shape(self.Cpp.y)[0]
f.close()
except:
f.close()
raise NameError('Channel.Ctilde: issue when reading h5py file')
if output :
return (Lfilename ,tx,rx)
def _saveh5(self,filenameh5,grpname):
""" save Ctilde object in hdf5 format compliant with Link Class
Parameters
----------
filenameh5 : str
file name of h5py file Link format
grpname : int
groupname in filenameh5
"""
# back to global frame
if self.islocal:
self.locbas()
filename=pyu.getlong(filenameh5,pstruc['DIRLNK'])
# try/except to avoid loosing the h5 file if
# read/write error
try:
fh5=h5py.File(filename,'a')
if not grpname in fh5['Ct'].keys():
fh5['Ct'].create_group(grpname)
else :
print('Warning : Ct/'+grpname +'already exists in '+filenameh5)
f=fh5['Ct/'+grpname]
# save channel in global basis
f.create_dataset('Ta',shape=np.shape(self.Ta),data=self.Ta)
f.create_dataset('Tb',shape=np.shape(self.Tb),data=self.Tb)
f.create_dataset('tang',shape=np.shape(self.tang),data=self.tang)
f.create_dataset('rang',shape=np.shape(self.rang),data=self.rang)
f.create_dataset('tauk',shape=np.shape(self.tauk),data=self.tauk)
f.create_dataset('fGHz',shape=np.shape(self.fGHz),data=self.fGHz)
f.create_dataset('Ctt_y',shape=np.shape(self.Ctt.y),data=self.Ctt.y)
f.create_dataset('Cpp_y',shape=np.shape(self.Cpp.y),data=self.Cpp.y)
f.create_dataset('Cpt_y',shape=np.shape(self.Cpt.y),data=self.Cpt.y)
f.create_dataset('Ctp_y',shape=np.shape(self.Ctp.y),data=self.Ctp.y)
fh5.close()
except:
fh5.close()
raise NameError('Channel.Ctilde: issue when writting h5py file')
def los(self,**kwargs):
""" Line of site channel
Parameters
----------
d(m)
fGHz (,Nf)
tang (1x2)
rang (1x2)
"""
defaults = {'pa':np.r_[197,189.8,1.65]
,'pb': np.r_[220,185,6]
,'fGHz':np.r_[32.6]
,'Ta':np.eye(3)
,'Tb':np.array([[0.28378894, -0.8972627, -0.33820628],
[-0.57674955, -0.44149706, 0.68734293],
[-0.76604425, 0., -0.64278784]])
}
for k in defaults:
if k not in kwargs:
kwargs[k]=defaults[k]
self.pa = kwargs['pa']
self.pb = kwargs['pb']
self.fGHz = kwargs['fGHz']
self.Ta = kwargs['Ta']
self.Tb = kwargs['Tb']
self.nray = 1
si = self.pb-self.pa
d = np.r_[np.sqrt(np.sum(si*si))]
si = si/d
self.tauk = d/0.3
#
# ka = - kb for LOS
#
tha = np.arccos(si[2])
pha = np.arctan2(si[1],si[0])
thb = np.arccos(-si[2])
phb = np.arctan2(-si[1],-si[0])
self.tang = np.array([tha,pha]).reshape((1,2))
self.rang = np.array([thb,phb]).reshape((1,2))
U = np.ones(len(self.fGHz),dtype=complex)/d[0]
Z = np.zeros(len(self.fGHz),dtype=complex)
self.Ctt = bs.FUsignal(self.fGHz, U)
self.Ctp = bs.FUsignal(self.fGHz, Z)
self.Cpt = bs.FUsignal(self.fGHz, Z)
self.Cpp = bs.FUsignal(self.fGHz, U)
self.locbas()
def _loadh5(self,filenameh5,grpname,**kwargs):
""" load Ctilde object in hdf5 format
Parameters
----------
filenameh5 : str
file name of h5py file Link format
grpname : int
groupname in filenameh5
"""
filename=pyu.getlong(filenameh5,pstruc['DIRLNK'])
try:
fh5=h5py.File(filename,'r')
f = fh5['Ct/'+grpname]
self.fGHz = f['fGHz'][:]
self.tang = f['tang'][:]
self.rang = f['rang'][:]
self.tauk = f['tauk'][:]
self.Ta = f['Ta'][:]
self.Tb = f['Tb'][:]
Ctt = f['Ctt_y'][:]
Cpp = f['Cpp_y'][:]
Ctp = f['Ctp_y'][:]
Cpt = f['Cpt_y'][:]
self.Ctt = bs.FUsignal(self.fGHz, Ctt)
self.Ctp = bs.FUsignal(self.fGHz, Ctp)
self.Cpt = bs.FUsignal(self.fGHz, Cpt)
self.Cpp = bs.FUsignal(self.fGHz, Cpp)
self.nfreq = len(self.fGHz)
self.nray = np.shape(self.Cpp.y)[0]
fh5.close()
except:
fh5.close()
raise NameError('Channel.Ctilde: issue when reading h5py file')
def mobility(self, v, dt):
""" modify channel for uniform mobility
Parameters
----------
v : float
velocity (m/s)
dt : float
delta t (s)
Notes
-----
Calculate a channel field from Ctilde and v(terminal vitese)
and dt(time of deplacement)
dt en s (observation time between 2 Rx position)
v en m/s (vitesse de changement de Rx)
Returns
-------
tau : modified Ctilde
"""
c = 0.3 # m/ns celerity of light
tauk = self.tauk
tang = self.tang
rang = self.rang
rk = tauk * c
rk_mod = abs(rk)
sk_ch = rk / rk_mod
# cos_alph =dot(v/abs(v),sk_ch)
cos_alph = (v * sk_ch) / abs(v)
self.cos_alph = cos_alph
rk_ch = rk_mod * cos_alph * abs(v) * dt
sk_ch_ch = (rk + v * dt) / (rk_ch + cos_alph * abs(v) * dt)
tauk_ch = (abs(rk_ch) * sk_ch_ch) / c
return(tauk_ch)
def plotd (self, d='doa', **kwargs):
""" plot direction of arrival/departure
Parameters
----------
d: string
'doa' | 'dod'
display direction of departure | arrival
fig : plt.figure
ax : plt.axis
phi: tuple (-180, 180)
phi angle
normalize: bool
energy normalized
reverse : bool
inverse theta and phi represenation
polar : bool
polar representation
cmap: matplotlib.cmap
mode: 'center' | 'mean' | 'in'
see bsignal.energy
s : float
scatter dot size
fontsize: float
edgecolors: bool
colorbar: bool
title : bool
"""
defaults = {
'fig': [],
'ax': [],
'phi':(-180, 180),
'normalize':False,
'reverse' : True,
'cmap':plt.cm.hot_r,
'mode':'center',
's':30,
'fontsize':12,
'edgecolors':'none',
'b3d':False,
'polar':False,
'colorbar':False,
'title' : False
}
for key, value in defaults.items():
if key not in kwargs:
kwargs[key] = value
if d =='dod':
tit = 'DOD : A'
di = getattr(self, 'tang')
elif d == 'doa':
tit = 'DOA : B'
di = getattr(self, 'rang')
else :
raise AttributeError('d attribute can only be doa or dod')
# remove non plt.scatter kwargs
phi = kwargs.pop('phi')
b3d = kwargs.pop('b3d')
the = (0,180)
fontsize = kwargs.pop('fontsize')
polar = kwargs.pop('polar')
fig = kwargs.pop('fig')
ax = kwargs.pop('ax')
colorbar = kwargs.pop('colorbar')
reverse = kwargs.pop('reverse')
normalize = kwargs.pop('normalize')
mode = kwargs.pop('mode')
title = kwargs.pop('title')
if fig == []:
fig = plt.figure()
Ett, Epp, Etp, Ept = self.energy(mode=mode,Friis=True)
Etot = Ett+Epp+Etp+Ept + 1e-15
if normalize:
Emax = max(Etot)
Etot = Etot / Emax
#
#
#
# col = 1 - (10*log10(Etot)-Emin)/(Emax-Emin)
# WARNING polar plot require radian angles
if polar :
al = 1.
alb = 180. / np.pi
phi=np.array(phi)
the=np.array(the)
if reverse :
phi[0] = phi[0]*np.pi/180
phi[1] = phi[1]*np.pi/180
the[0] = the[0]
the[1] = the[1]
else :
phi[0] = phi[0]
phi[1] = phi[1]
the[0] = the[0]*np.pi/180
the[1] = the[1]*np.pi/180
else :
al = 180. / np.pi
alb = 180. / np.pi
col = 10 * np.log10(Etot)
kwargs['c'] = col
if len(col) != len(di):
print("len(col):", len(col))
print("len(di):", len(dir))
if b3d:
ax = fig.add_subplot(111,projection='3d')
ax.scatter(1.05*array(xa),1.05*array(ya),1.05*array(za),'b')
ax.scatter(1.05*array(xb),1.05*array(yb),1.05*array(zb),'r')
else:
if ax == []:
ax = fig.add_subplot(111, polar=polar)
if reverse :
scat = ax.scatter(di[:, 1] * al, di[:, 0] * alb, **kwargs)
ax.axis((phi[0], phi[1], the[0], the[1]))
ax.set_xlabel('$\phi(^{\circ})$', fontsize=fontsize)
ax.set_ylabel("$\\theta_t(^{\circ})$", fontsize=fontsize)
else:
scat = ax.scatter(di[:, 0] * al, di[:, 1] * alb, **kwargs)
ax.axis((the[0], the[1], phi[0], phi[1]))
ax.set_xlabel("$\\theta_t(^{\circ})$", fontsize=fontsize)
ax.set_ylabel('$\phi(^{\circ})$', fontsize=fontsize)
if title:
ax.set_title(tit, fontsize=fontsize+2)
ll = ax.get_xticklabels()+ax.get_yticklabels()
for l in ll:
l.set_fontsize(fontsize)
if colorbar:
#divider = make_axes_locatable(ax)
#cax = divider.append_axes("right",size="5%",pad=0.05)
clb = plt.colorbar(scat,ax=ax)
if normalize:
clb.set_label('dB',size=fontsize)
else:
clb.set_label('Path Loss (dB)',size=fontsize)
for t in clb.ax.get_yticklabels():
t.set_fontsize(fontsize)
return (fig, ax)
def doadod(self, **kwargs):
""" doadod scatter plot
Parameters
----------
phi : tuple (-180, 180)
phi angle
normalize : bool
energy normalized
reverse : bool
inverse theta and phi representation
polar : bool
polar representation
cmap : matplotlib.cmap
mode : string
'center' | 'mean' | 'in'
s : float
scatter dot size
fontsize : float
edgecolors : bool
colorbar : bool
xa :
xb :
Summary
--------
scatter plot of the DoA-DoD channel structure
the energy is color coded over all couples of DoA-DoD
Examples
--------
>>> from pylayers.antprop.channel import *
See Also
--------
pylayers.signal.bsignal.energy
"""
defaults = {
'phi':(-180, 180),
'normalize':False,
'reverse' : True,
'cmap':plt.cm.hot_r,
'mode':'center',
's':30,
'fontsize':12,
'edgecolors':'none',
'polar':False,
'b3d':False,
'mode':'mean',
'colorbar':False,
'xa':0,
'xb':1
}
for key, value in defaults.items():
if key not in kwargs:
kwargs[key] = value
xa = kwargs.pop('xa')
xb = kwargs.pop('xb')
if 'fig' not in kwargs:
fig = plt.gcf()
kwargs['fig']=fig
else:
fig = kwargs['fig']
ax1 = fig.add_subplot(121,polar=kwargs['polar'])
ax2 = fig.add_subplot(122,polar=kwargs['polar'])
if xa<xb:
fig,ax1 = self.plotd(d='dod',ax=ax1,**kwargs)
fig,ax2 = self.plotd(d='doa',ax=ax2,**kwargs)
else:
fig,ax1 = self.plotd(d='doa',ax=ax1,**kwargs)
fig,ax2 = self.plotd(d='dod',ax=ax2,**kwargs)
return fig,[ax1,ax2]
def locbas(self,**kwargs):
""" global reference frame to local reference frame
If Tt and Tr are [] the global channel is retrieved
Parameters
----------
Ta : rotation matrix 3x3 side a
default []
Tb : rotation matrix 3x3 side b
default []
Returns
-------
This method affects the boolean islocal
This method update the ray propagation channel in either local or global frame
self.Ta and self.Tb are updated with input parameters Ta an Tb
C : ray propagation channel (2x2xrxf) complex
either local or global depends on self.islocal boolean value
Examples
--------
>>> C = Ctilde()
>>> Ta = MEulerAngle(np.pi/2,np.pi/2,np.pi/2.)
>>> Tb = MEulerAngle(np.pi/3,np.pi/3,np.pi/3.)
>>> C.locbas(Ta=Ta,Tb=Tb)
"""
# get Ctilde frequency axes
fGHz = self.fGHz
# if rotation matrices are passed in argument
# back to global if local
if ('Ta' in kwargs) & ('Tb' in kwargs):
if self.islocal:
self.locbas()
self.islocal=False
self.Tb = kwargs['Tb']
self.Ta = kwargs['Ta']
# angular axes
#
# tang : r x 2
# rang : r x 2
#
# Ra : 2 x 2 x r
# Rb : 2 x 2 x r
#
# tangl : r x 2
# rangl : r x 2
#
tangl,Ra = geu.BTB(self.tang, self.Ta)
rangl,Rb = geu.BTB(self.rang, self.Tb)
if self.islocal:
Ra = Ra.transpose((1,0,2))
self.islocal=False
else:
Rb = Rb.transpose((1,0,2))
self.islocal=True
#
# update direction of departure and arrival
#
self.tangl = tangl
self.rangl = rangl
#uf = np.ones(self.nfreq)
#
# r0 : r x 1(f)
#
#r0 = rb00
r0 = Rb[0,0,:][:, None]
#r1 = rb01
r1 = Rb[0,1,:][:, None]
t00 = r0 * self.Ctt.y + r1 * self.Cpt.y
t01 = r0 * self.Ctp.y + r1 * self.Cpp.y
#r0 = rb10
r0 = Rb[1, 0,:][:, None]
#r1 = rb11
r1 = Rb[1, 1,:][:, None]
t10 = r0 * self.Ctt.y + r1 * self.Cpt.y
t11 = r0 * self.Ctp.y + r1 * self.Cpp.y
#r0 = ra00
r0 = Ra[0, 0, :][:, None]
#r1 = ra10
r1 = Ra[1, 0, :][:, None]
Cttl = t00 * r0 + t01 * r1
Cptl = t10 * r0 + t11 * r1
#r0 = ra01
r0 = Ra[0, 1, :][:, None]
#r1 = ra11
r1 = Ra[1, 1, :][:, None]
Ctpl = t00 * r0 + t01 * r1
Cppl = t10 * r0 + t11 * r1
self.Ctt = bs.FUsignal(fGHz, Cttl)
self.Ctp = bs.FUsignal(fGHz, Ctpl)
self.Cpt = bs.FUsignal(fGHz, Cptl)
self.Cpp = bs.FUsignal(fGHz, Cppl)
#return self
def Cg2Cl(self, Tt=[], Tr=[]):
""" global reference frame to local reference frame
If Tt and Tr are [] the global channel is retrieved
Parameters
----------
Tt : Tx rotation matrix 3x3
default []
Tr : Rx rotation matrix 3x3
default []
Returns
-------
Cl : Ctilde local
Examples
--------
"""
# get frequency axes
fGHz = self.fGHz
if (Tt !=[]) & (Tr!=[]):
self.Ta = Tt
self.Tb = Tr
else:
if (hasattr(self,'Ta')) & (hasattr(self, 'Tb')):
self.Ta = self.Ta.transpose()
self.Tb = self.Tb.transpose()
else:
return
# get angular axes
# Rt (2x2)
# Rr (2x2)
#
# tang : r x 2
# rang : r x 2
#
# Rt : 2 x 2 x r
# Rr : 2 x 2 x r
#
# tangl : r x 2
# rangl : r x 2
#
tangl , Ra = geu.BTB(self.tang, self.Ta)
rangl , Rb = geu.BTB(self.rang, self.Tb)
Rb = Rb.transpose((1,0,2))
#
# update direction of departure and arrival
#
self.tang = tangl
self.rang = rangl
#uf = np.ones(self.nfreq)
#
# r0 : r x 1(f)
#
#r0 = np.outer(Rr[0, 0,:], uf)
r0 = Rr[0,0,:][:,None]
#r1 = np.outer(Rr[0, 1,:], uf)
r1 = Rr[0,1,:][:,None]
t00 = r0 * self.Ctt.y + r1 * self.Cpt.y
t01 = r0 * self.Ctp.y + r1 * self.Cpp.y
#r0 = np.outer(Rr[1, 0,:], uf)
r0 = Rr[1, 0,:][:,None]
#r1 = np.outer(Rr[1, 1,:], uf)
r1 = Rr[1, 1,:][:,None]
t10 = r0 * self.Ctt.y + r1 * self.Cpt.y
t11 = r0 * self.Ctp.y + r1 * self.Cpp.y
#r0 = np.outer(Rt[0, 0,:], uf)
r0 = Rt[0,0,:][:,None]
#r1 = np.outer(Rt[1, 0,:], uf)
r1 = Rt[1,0,:][:,None]
Cttl = t00 * r0 + t01 * r1
Cptl = t10 * r0 + t11 * r1
#r0 = np.outer(Rt[0, 1,:], uf)
r0 = Rt[0,1,:][:,None]
#r1 = np.outer(Rt[1, 1,:], uf)
r1 = Rt[1,1,:][:,None]
Ctpl = t00 * r0 + t01 * r1
Cppl = t10 * r0 + t11 * r1
self.Ctt = bs.FUsignal(fGHz, Cttl)
self.Ctp = bs.FUsignal(fGHz, Ctpl)
self.Cpt = bs.FUsignal(fGHz, Cptl)
self.Cpp = bs.FUsignal(fGHz, Cppl)
return self
def show(self, **kwargs):
""" show the propagation channel
Parameters
----------
typ : 'm', 'l20' , 'r'
cmap : colormap
default hot
fontsize : int
default 14
"""
defaults = {'typ': 'm',
'cmap': plt.cm.hot,
'fontsize':14}
for key, value in defaults.items():
if key not in kwargs:
kwargs[key] = value
if 'fig' not in kwargs:
kwargs['fig'] = plt.figure()
ax1 = kwargs['fig'].add_subplot(221)
fig, ax1 = self.Ctt.imshow(ax=ax1,**kwargs)
ax1.set_xlabel('Frequency (GHz)',fontsize=kwargs['fontsize'])
ax1.set_title(u'$C_{\\theta\\theta}$',fontsize=kwargs['fontsize'])
ax2 = kwargs['fig'].add_subplot(222)
fig, ax2 = self.Ctp.imshow(ax=ax2,**kwargs)
ax2.set_xlabel('Frequency (GHz)',fontsize=kwargs['fontsize'])
ax2.set_title(u'$C_{\\theta\phi}$',fontsize=kwargs['fontsize'])
ax3 = kwargs['fig'].add_subplot(223)
fig, ax3 = self.Cpt.imshow(ax=ax3,**kwargs)
ax3.set_xlabel('Frequency (GHz)',fontsize=kwargs['fontsize'])
ax3.set_title(u'$C_{\phi\\theta}$',fontsize=kwargs['fontsize'])
ax4 = kwargs['fig'].add_subplot(224)
fig, ax4 = self.Cpp.imshow(ax=ax4,**kwargs)
ax4.set_xlabel('Frequency (GHz)',fontsize=kwargs['fontsize'])
ax4.set_title(u'$C_{\phi\phi}$',fontsize=kwargs['fontsize'])
return fig, (ax1, ax2, ax3, ax4)
def check_reciprocity(self, C):
""" check channel reciprocity
Parameters
----------
C : Ctilde
Notes
-----
This is not properly implemented
"""
issue=[]
assert np.allclose(self.tauk, C.tauk)
for r in range(self.nray):
if not np.allclose(self.Ctt.y[r,:], C.Ctt.y[r,:]):
issue.append(r)
if len(issue) == 0:
print("Channel is reciprocal")
else:
print("WARNING Reciprocity issue WARNING")
print(len(issue),'/',self.nray, 'rays are not reciprocal,')
print("rays number with an issue :",issue)
# assert np.allclose(self.tang,C.rang)
# assert np.allclose(self.rang,C.tang)
def energy(self,mode='mean',Friis=True,sumray=False):
""" calculates energy on each channel
Parameters
----------
mode : string
'mean'
Friis: boolean
True
sumray: boolean
False
Returns
-------
ECtt : Energy on co channel tt
ECpp : Energy on co channel pp
ECtp : Energy on co channel tp
ECpt : Energy on co channel pt
See Also
--------
pylayers.signal.bsignal.FUsignal.energy
Notes
-----
r x f+
axis 0 : ray
axis 1 : frequency
"""
#
# r x f
# axis 0 : ray
# axis 1 : frequency
#
ECtt = self.Ctt.energy(axis=1,Friis=Friis,mode=mode)
ECtp = self.Ctp.energy(axis=1,Friis=Friis,mode=mode)
ECpt = self.Cpt.energy(axis=1,Friis=Friis,mode=mode)
ECpp = self.Cpp.energy(axis=1,Friis=Friis,mode=mode)
if sumray:
ECtt = np.sum(ECtt,axis=0)
ECtp = np.sum(ECtp,axis=0)
ECpt = np.sum(ECpt,axis=0)
ECpp = np.sum(ECpp,axis=0)
return ECtt, ECpp, ECtp, ECpt
def cut(self,threshold_dB=50):
""" cut rays from a energy threshold
Parameters
----------
threshold : float
default 0.99
"""
Ett, Epp, Etp, Ept = self.energy()
Etot = Ett+Epp+Etp+Ept
u = np.argsort(Etot)[::-1]
#cumE = np.cumsum(Etot[u])/sum(Etot)
profdB = 10*np.log10(Etot[u]/np.max(Etot))
#v1 = np.where(cumE<threshold)[0]
v = np.where(profdB>-threshold_dB)[0]
w = u[v]
self.selected = w
self.Eselected = Etot[w]
self.tauk = self.tauk[w]
self.tang = self.tang[w,:]
self.rang = self.rang[w,:]
self.Ctt.y = self.Ctt.y[w,:]
self.Cpp.y = self.Cpp.y[w,:]
self.Ctp.y = self.Ctp.y[w,:]
self.Cpt.y = self.Cpt.y[w,:]
def sort(self,typ='tauk'):
""" sort Ctilde with respect to typ (default tauk)
Parameters
----------
typ : string
sort w.r.t
'tauk' : delay (default)
'att' : theta Tx
'atp' : phi Tx
'art' : theta Rx
'arp' : phi Rx
'energy' : energy
"""
if typ == 'tauk':
u = np.argsort(self.tauk)
if typ == 'att':
u = np.argsort(self.tang[:, 0])
if typ == 'atp':
u = np.argsort(self.tang[:, 1])
if typ == 'art':
u = np.argsort(self.rang[:, 0])
if typ == 'arp':
u = np.argsort(self.rang[:, 1])
if typ == 'energy':
Ett, Epp, Etp, Ept = self.energy()
Etot = Ett+Epp+Etp+Ept
u = np.argsort(Etot)
self.tauk = self.tauk[u]
self.tang = self.tang[u,:]
self.rang = self.rang[u,:]
self.Ctt.y = self.Ctt.y[u,:]
self.Cpp.y = self.Cpp.y[u,:]
self.Ctp.y = self.Ctp.y[u,:]
self.Cpt.y = self.Cpt.y[u,:]
def prop2tran(self,a=[],b=[],Friis=True,debug=False):
r""" transform propagation channel into transmission channel
Parameters
----------
a : antenna or array a
b : antenna or array b
Ta : np.array(3x3)
unitary matrice for antenna orientation
Tb : np.array(3x3)
unitary matrice for antenna orientation
Friis : boolean
if True scale with :math:`-j\frac{\lambda}{f}`
debug : boolean
if True the antenna gain for each ray is stored
Returns
-------
H : Tchannel(bs.FUsignal)
"""
freq = self.fGHz
nfreq = self.nfreq
nray = self.nray
sh = np.shape(self.Ctt.y)
# select default antennas
# omni polar theta 't' <=> vertical polarization
#
if a ==[]:
a = ant.Antenna('Omni',param={'pol':'t','GmaxdB':0},fGHz=self.fGHz)
if b ==[]:
b = ant.Antenna('Omni',param={'pol':'t','GmaxdB':0},fGHz=self.fGHz)
a.eval(th = self.tangl[:, 0], ph = self.tangl[:, 1])
Fat = bs.FUsignal(a.fGHz, a.Ft)
Fap = bs.FUsignal(a.fGHz, a.Fp)
#b.eval(th=self.rangl[:, 0], ph=self.rangl[:, 1], grid=False)
b.eval(th = self.rangl[:, 0], ph = self.rangl[:, 1])
Fbt = bs.FUsignal(b.fGHz, b.Ft)
Fbp = bs.FUsignal(b.fGHz, b.Fp)
#
# C : 2 x 2 x r x f
#
# Ctt : r x f (complex FUsignal)
# Cpp : r x f (complex FUsignal)
# Ctp : r x f (complex FUsignal)
# Cpt : r x f (complex FUsignal)
#
# a.Ft = r x (Na) x f (complex ndarray)
# a.Fp = r x (Na) x f (complex ndarray)
# b.Ft = r x (Nb) x f (complex ndarray)
# b.Fp = r x (Nb) x f (complex ndarray)
#
# (r x f ) (r x Nt x f )
#
# This exploit * overloading in FUsignal
t1 = self.Ctt * Fat + self.Ctp * Fap
t2 = self.Cpt * Fat + self.Cpp * Fap
# depending on SISO or MIMO case
# the shape of the received fields T1 and T2
#
# In MIMO case
# a.Ft.y.shape == (r x Na x f)
# a.Fp.y.shape == (r x Na x f)
# In SISO case
# a.Ft.y.shape == (r x f)
# a.Fp.y.shape == (r x f)
#
if len(t1.y.shape)==3:
T1 = t1.y[:,None,:,:]
T2 = t2.y[:,None,:,:]
else:
T1 = t1.y[:,None,None,:]
T2 = t2.y[:,None,None,:]
if len(Fbt.y.shape)==3:
FBt = Fbt.y[:,:,None,:]
FBp = Fbp.y[:,:,None,:]
else:
FBt = Fbt.y[:,None,None,:]
FBp = Fbp.y[:,None,None,:]
# determine the common interval on frequency axis
if np.sum(t1.x!=Fbt.x)>0:
t1x_int = (np.round(t1.x*100)).astype(int)
Fbtx_int = (np.round(Fbt.x*100)).astype(int)
inter = np.intersect1d(t1x_int,Fbtx_int)
ut = np.in1d(t1x_int,inter)
uf = np.in1d(Fbtx_int,inter)
else:
ut = np.arange(len(t1.x))
uf = np.arange(len(Fbt.x))
assert(len(t1.x[ut])==len(Fbt.x[uf])),"problem in common index plage calculation"
alpha1 = np.einsum('ljkm,lkim->ljim',FBt[...,uf],T1[...,ut])
alpha2 = np.einsum('ljkm,lkim->ljim',FBp[...,uf],T2[...,ut])
#alpha = t1 * Fbt + t2 * Fbp
# Nd x Nr x Nt x Nf
alpha = alpha1 + alpha2
self.fGHz = t1.x[ut]
H = Tchannel(x = self.fGHz,
y = alpha,
tau = self.tauk,
dod = self.tang,
doa = self.rang)
if debug :
H.alpha=alpha
H.Fat=Fat.y
H.Fap=Fap.y
H.Fbt=Fbt.y
H.Fbp=Fbp.y
H.Gat=10*np.log10(np.sum(Fat.y*np.conj(Fat.y),axis=1)/len(Fat.x))
H.Gap=10*np.log10(np.sum(Fap.y*np.conj(Fap.y),axis=1)/len(Fap.x))
H.Gbt=10*np.log10(np.sum(Fbt.y*np.conj(Fbt.y),axis=1)/len(Fbt.x))
H.Gbp=10*np.log10(np.sum(Fbp.y*np.conj(Fbp.y),axis=1)/len(Fbp.x))
if Friis:
H.applyFriis()
return H
if __name__ == "__main__":
plt.ion()
doctest.testmod()
| mit |
IssamLaradji/scikit-learn | examples/plot_johnson_lindenstrauss_bound.py | 13 | 7459 | """
=====================================================================
The Johnson-Lindenstrauss bound for embedding with random projections
=====================================================================
The `Johnson-Lindenstrauss lemma`_ states that any high dimensional
dataset can be randomly projected into a lower dimensional Euclidean
space while controlling the distortion in the pairwise distances.
.. _`Johnson-Lindenstrauss lemma`: http://en.wikipedia.org/wiki/Johnson%E2%80%93Lindenstrauss_lemma
Theoretical bounds
==================
The distortion introduced by a random projection `p` is asserted by
the fact that `p` is defining an eps-embedding with good probability
as defined by:
(1 - eps) ||u - v||^2 < ||p(u) - p(v)||^2 < (1 + eps) ||u - v||^2
Where u and v are any rows taken from a dataset of shape [n_samples,
n_features] and p is a projection by a random Gaussian N(0, 1) matrix
with shape [n_components, n_features] (or a sparse Achlioptas matrix).
The minimum number of components to guarantees the eps-embedding is
given by:
n_components >= 4 log(n_samples) / (eps^2 / 2 - eps^3 / 3)
The first plot shows that with an increasing number of samples ``n_samples``,
the minimal number of dimensions ``n_components`` increased logarithmically
in order to guarantee an ``eps``-embedding.
The second plot shows that an increase of the admissible
distortion ``eps`` allows to reduce drastically the minimal number of
dimensions ``n_components`` for a given number of samples ``n_samples``
Empirical validation
====================
We validate the above bounds on the the digits dataset or on the 20 newsgroups
text document (TF-IDF word frequencies) dataset:
- for the digits dataset, some 8x8 gray level pixels data for 500
handwritten digits pictures are randomly projected to spaces for various
larger number of dimensions ``n_components``.
- for the 20 newsgroups dataset some 500 documents with 100k
features in total are projected using a sparse random matrix to smaller
euclidean spaces with various values for the target number of dimensions
``n_components``.
The default dataset is the digits dataset. To run the example on the twenty
newsgroups dataset, pass the --twenty-newsgroups command line argument to this
script.
For each value of ``n_components``, we plot:
- 2D distribution of sample pairs with pairwise distances in original
and projected spaces as x and y axis respectively.
- 1D histogram of the ratio of those distances (projected / original).
We can see that for low values of ``n_components`` the distribution is wide
with many distorted pairs and a skewed distribution (due to the hard
limit of zero ratio on the left as distances are always positives)
while for larger values of n_components the distortion is controlled
and the distances are well preserved by the random projection.
Remarks
=======
According to the JL lemma, projecting 500 samples without too much distortion
will require at least several thousands dimensions, irrespective of the
number of features of the original dataset.
Hence using random projections on the digits dataset which only has 64 features
in the input space does not make sense: it does not allow for dimensionality
reduction in this case.
On the twenty newsgroups on the other hand the dimensionality can be decreased
from 56436 down to 10000 while reasonably preserving pairwise distances.
"""
print(__doc__)
import sys
from time import time
import numpy as np
import matplotlib.pyplot as plt
from sklearn.random_projection import johnson_lindenstrauss_min_dim
from sklearn.random_projection import SparseRandomProjection
from sklearn.datasets import fetch_20newsgroups_vectorized
from sklearn.datasets import load_digits
from sklearn.metrics.pairwise import euclidean_distances
# Part 1: plot the theoretical dependency between n_components_min and
# n_samples
# range of admissible distortions
eps_range = np.linspace(0.1, 0.99, 5)
colors = plt.cm.Blues(np.linspace(0.3, 1.0, len(eps_range)))
# range of number of samples (observation) to embed
n_samples_range = np.logspace(1, 9, 9)
plt.figure()
for eps, color in zip(eps_range, colors):
min_n_components = johnson_lindenstrauss_min_dim(n_samples_range, eps=eps)
plt.loglog(n_samples_range, min_n_components, color=color)
plt.legend(["eps = %0.1f" % eps for eps in eps_range], loc="lower right")
plt.xlabel("Number of observations to eps-embed")
plt.ylabel("Minimum number of dimensions")
plt.title("Johnson-Lindenstrauss bounds:\nn_samples vs n_components")
plt.show()
# range of admissible distortions
eps_range = np.linspace(0.01, 0.99, 100)
# range of number of samples (observation) to embed
n_samples_range = np.logspace(2, 6, 5)
colors = plt.cm.Blues(np.linspace(0.3, 1.0, len(n_samples_range)))
plt.figure()
for n_samples, color in zip(n_samples_range, colors):
min_n_components = johnson_lindenstrauss_min_dim(n_samples, eps=eps_range)
plt.semilogy(eps_range, min_n_components, color=color)
plt.legend(["n_samples = %d" % n for n in n_samples_range], loc="upper right")
plt.xlabel("Distortion eps")
plt.ylabel("Minimum number of dimensions")
plt.title("Johnson-Lindenstrauss bounds:\nn_components vs eps")
plt.show()
# Part 2: perform sparse random projection of some digits images which are
# quite low dimensional and dense or documents of the 20 newsgroups dataset
# which is both high dimensional and sparse
if '--twenty-newsgroups' in sys.argv:
# Need an internet connection hence not enabled by default
data = fetch_20newsgroups_vectorized().data[:500]
else:
data = load_digits().data[:500]
n_samples, n_features = data.shape
print("Embedding %d samples with dim %d using various random projections"
% (n_samples, n_features))
n_components_range = np.array([300, 1000, 10000])
dists = euclidean_distances(data, squared=True).ravel()
# select only non-identical samples pairs
nonzero = dists != 0
dists = dists[nonzero]
for n_components in n_components_range:
t0 = time()
rp = SparseRandomProjection(n_components=n_components)
projected_data = rp.fit_transform(data)
print("Projected %d samples from %d to %d in %0.3fs"
% (n_samples, n_features, n_components, time() - t0))
if hasattr(rp, 'components_'):
n_bytes = rp.components_.data.nbytes
n_bytes += rp.components_.indices.nbytes
print("Random matrix with size: %0.3fMB" % (n_bytes / 1e6))
projected_dists = euclidean_distances(
projected_data, squared=True).ravel()[nonzero]
plt.figure()
plt.hexbin(dists, projected_dists, gridsize=100)
plt.xlabel("Pairwise squared distances in original space")
plt.ylabel("Pairwise squared distances in projected space")
plt.title("Pairwise distances distribution for n_components=%d" %
n_components)
cb = plt.colorbar()
cb.set_label('Sample pairs counts')
rates = projected_dists / dists
print("Mean distances rate: %0.2f (%0.2f)"
% (np.mean(rates), np.std(rates)))
plt.figure()
plt.hist(rates, bins=50, normed=True, range=(0., 2.))
plt.xlabel("Squared distances rate: projected / original")
plt.ylabel("Distribution of samples pairs")
plt.title("Histogram of pairwise distance rates for n_components=%d" %
n_components)
plt.show()
# TODO: compute the expected value of eps and add them to the previous plot
# as vertical lines / region
| bsd-3-clause |
pyro-ppl/numpyro | examples/prodlda.py | 1 | 12097 | # Copyright Contributors to the Pyro project.
# SPDX-License-Identifier: Apache-2.0
"""
Example: ProdLDA
================
In this example, we will follow [1] to implement the ProdLDA topic model from
Autoencoding Variational Inference For Topic Models by Akash Srivastava and Charles
Sutton [2]. This model returns consistently better topics than vanilla LDA and trains
much more quickly. Furthermore, it does not require a custom inference algorithm that
relies on complex mathematical derivations. This example also serves as an
introduction to Flax and Haiku modules in NumPyro.
Note that unlike [1, 2], this implementation uses a Dirichlet prior directly rather
than approximating it with a softmax-normal distribution.
For the interested reader, a nice extension of this model is the CombinedTM model [3]
which utilizes a pre-trained sentence transformer (like https://www.sbert.net/) to
generate a better representation of the encoded latent vector.
**References:**
1. http://pyro.ai/examples/prodlda.html
2. Akash Srivastava, & Charles Sutton. (2017). Autoencoding Variational Inference
For Topic Models.
3. Federico Bianchi, Silvia Terragni, and Dirk Hovy (2021), "Pre-training is a Hot
Topic: Contextualized Document Embeddings Improve Topic Coherence"
(https://arxiv.org/abs/2004.03974)
.. image:: ../_static/img/examples/prodlda.png
:align: center
"""
import argparse
import matplotlib.pyplot as plt
import pandas as pd
from sklearn.datasets import fetch_20newsgroups
from sklearn.feature_extraction.text import CountVectorizer
from wordcloud import WordCloud
import flax.linen as nn
import haiku as hk
import jax
from jax import device_put, random
import jax.numpy as jnp
import numpyro
from numpyro.contrib.module import flax_module, haiku_module
import numpyro.distributions as dist
from numpyro.infer import SVI, TraceMeanField_ELBO
class HaikuEncoder:
def __init__(self, vocab_size, num_topics, hidden, dropout_rate):
self._vocab_size = vocab_size
self._num_topics = num_topics
self._hidden = hidden
self._dropout_rate = dropout_rate
def __call__(self, inputs, is_training):
dropout_rate = self._dropout_rate if is_training else 0.0
h = jax.nn.softplus(hk.Linear(self._hidden)(inputs))
h = jax.nn.softplus(hk.Linear(self._hidden)(h))
h = hk.dropout(hk.next_rng_key(), dropout_rate, h)
h = hk.Linear(self._num_topics)(h)
# NB: here we set `create_scale=False` and `create_offset=False` to reduce
# the number of learning parameters
log_concentration = hk.BatchNorm(
create_scale=False, create_offset=False, decay_rate=0.9
)(h, is_training)
return jnp.exp(log_concentration)
class HaikuDecoder:
def __init__(self, vocab_size, dropout_rate):
self._vocab_size = vocab_size
self._dropout_rate = dropout_rate
def __call__(self, inputs, is_training):
dropout_rate = self._dropout_rate if is_training else 0.0
h = hk.dropout(hk.next_rng_key(), dropout_rate, inputs)
h = hk.Linear(self._vocab_size, with_bias=False)(h)
return hk.BatchNorm(create_scale=False, create_offset=False, decay_rate=0.9)(
h, is_training
)
class FlaxEncoder(nn.Module):
vocab_size: int
num_topics: int
hidden: int
dropout_rate: float
@nn.compact
def __call__(self, inputs, is_training):
h = nn.softplus(nn.Dense(self.hidden)(inputs))
h = nn.softplus(nn.Dense(self.hidden)(h))
h = nn.Dropout(self.dropout_rate, deterministic=not is_training)(h)
h = nn.Dense(self.num_topics)(h)
log_concentration = nn.BatchNorm(
use_bias=False,
use_scale=False,
momentum=0.9,
use_running_average=not is_training,
)(h)
return jnp.exp(log_concentration)
class FlaxDecoder(nn.Module):
vocab_size: int
dropout_rate: float
@nn.compact
def __call__(self, inputs, is_training):
h = nn.Dropout(self.dropout_rate, deterministic=not is_training)(inputs)
h = nn.Dense(self.vocab_size, use_bias=False)(h)
return nn.BatchNorm(
use_bias=False,
use_scale=False,
momentum=0.9,
use_running_average=not is_training,
)(h)
def model(docs, hyperparams, is_training=False, nn_framework="flax"):
if nn_framework == "flax":
decoder = flax_module(
"decoder",
FlaxDecoder(hyperparams["vocab_size"], hyperparams["dropout_rate"]),
input_shape=(1, hyperparams["num_topics"]),
# ensure PRNGKey is made available to dropout layers
apply_rng=["dropout"],
# indicate mutable state due to BatchNorm layers
mutable=["batch_stats"],
# to ensure proper initialisation of BatchNorm we must
# initialise with is_training=True
is_training=True,
)
elif nn_framework == "haiku":
decoder = haiku_module(
"decoder",
# use `transform_with_state` for BatchNorm
hk.transform_with_state(
HaikuDecoder(hyperparams["vocab_size"], hyperparams["dropout_rate"])
),
input_shape=(1, hyperparams["num_topics"]),
apply_rng=True,
# to ensure proper initialisation of BatchNorm we must
# initialise with is_training=True
is_training=True,
)
else:
raise ValueError(f"Invalid choice {nn_framework} for argument nn_framework")
with numpyro.plate(
"documents", docs.shape[0], subsample_size=hyperparams["batch_size"]
):
batch_docs = numpyro.subsample(docs, event_dim=1)
theta = numpyro.sample(
"theta", dist.Dirichlet(jnp.ones(hyperparams["num_topics"]))
)
if nn_framework == "flax":
logits = decoder(theta, is_training, rngs={"dropout": numpyro.prng_key()})
elif nn_framework == "haiku":
logits = decoder(numpyro.prng_key(), theta, is_training)
total_count = batch_docs.sum(-1)
numpyro.sample(
"obs", dist.Multinomial(total_count, logits=logits), obs=batch_docs
)
def guide(docs, hyperparams, is_training=False, nn_framework="flax"):
if nn_framework == "flax":
encoder = flax_module(
"encoder",
FlaxEncoder(
hyperparams["vocab_size"],
hyperparams["num_topics"],
hyperparams["hidden"],
hyperparams["dropout_rate"],
),
input_shape=(1, hyperparams["vocab_size"]),
# ensure PRNGKey is made available to dropout layers
apply_rng=["dropout"],
# indicate mutable state due to BatchNorm layers
mutable=["batch_stats"],
# to ensure proper initialisation of BatchNorm we must
# initialise with is_training=True
is_training=True,
)
elif nn_framework == "haiku":
encoder = haiku_module(
"encoder",
# use `transform_with_state` for BatchNorm
hk.transform_with_state(
HaikuEncoder(
hyperparams["vocab_size"],
hyperparams["num_topics"],
hyperparams["hidden"],
hyperparams["dropout_rate"],
)
),
input_shape=(1, hyperparams["vocab_size"]),
apply_rng=True,
# to ensure proper initialisation of BatchNorm we must
# initialise with is_training=True
is_training=True,
)
else:
raise ValueError(f"Invalid choice {nn_framework} for argument nn_framework")
with numpyro.plate(
"documents", docs.shape[0], subsample_size=hyperparams["batch_size"]
):
batch_docs = numpyro.subsample(docs, event_dim=1)
if nn_framework == "flax":
concentration = encoder(
batch_docs, is_training, rngs={"dropout": numpyro.prng_key()}
)
elif nn_framework == "haiku":
concentration = encoder(numpyro.prng_key(), batch_docs, is_training)
numpyro.sample("theta", dist.Dirichlet(concentration))
def load_data():
news = fetch_20newsgroups(subset="all")
vectorizer = CountVectorizer(max_df=0.5, min_df=20, stop_words="english")
docs = jnp.array(vectorizer.fit_transform(news["data"]).toarray())
vocab = pd.DataFrame(columns=["word", "index"])
vocab["word"] = vectorizer.get_feature_names()
vocab["index"] = vocab.index
return docs, vocab
def run_inference(docs, args):
rng_key = random.PRNGKey(0)
docs = device_put(docs)
hyperparams = dict(
vocab_size=docs.shape[1],
num_topics=args.num_topics,
hidden=args.hidden,
dropout_rate=args.dropout_rate,
batch_size=args.batch_size,
)
optimizer = numpyro.optim.Adam(args.learning_rate)
svi = SVI(model, guide, optimizer, loss=TraceMeanField_ELBO())
return svi.run(
rng_key,
args.num_steps,
docs,
hyperparams,
is_training=True,
progress_bar=not args.disable_progbar,
nn_framework=args.nn_framework,
)
def plot_word_cloud(b, ax, vocab, n):
indices = jnp.argsort(b)[::-1]
top20 = indices[:20]
df = pd.DataFrame(top20, columns=["index"])
words = pd.merge(df, vocab[["index", "word"]], how="left", on="index")[
"word"
].values.tolist()
sizes = b[top20].tolist()
freqs = {words[i]: sizes[i] for i in range(len(words))}
wc = WordCloud(background_color="white", width=800, height=500)
wc = wc.generate_from_frequencies(freqs)
ax.set_title(f"Topic {n + 1}")
ax.imshow(wc, interpolation="bilinear")
ax.axis("off")
def main(args):
docs, vocab = load_data()
print(f"Dictionary size: {len(vocab)}")
print(f"Corpus size: {docs.shape}")
svi_result = run_inference(docs, args)
if args.nn_framework == "flax":
beta = svi_result.params["decoder$params"]["Dense_0"]["kernel"]
elif args.nn_framework == "haiku":
beta = svi_result.params["decoder$params"]["linear"]["w"]
beta = jax.nn.softmax(beta)
# the number of plots depends on the chosen number of topics.
# add 2 to num topics to ensure we create a row for any remainder after division
nrows = (args.num_topics + 2) // 3
fig, axs = plt.subplots(nrows, 3, figsize=(14, 3 + 3 * nrows))
axs = axs.flatten()
for n in range(beta.shape[0]):
plot_word_cloud(beta[n], axs[n], vocab, n)
# hide any unused axes
for i in range(n, len(axs)):
axs[i].axis("off")
fig.savefig("wordclouds.png")
if __name__ == "__main__":
assert numpyro.__version__.startswith("0.6.0")
parser = argparse.ArgumentParser(
description="Probabilistic topic modelling with Flax and Haiku"
)
parser.add_argument("-n", "--num-steps", nargs="?", default=30_000, type=int)
parser.add_argument("-t", "--num-topics", nargs="?", default=12, type=int)
parser.add_argument("--batch-size", nargs="?", default=32, type=int)
parser.add_argument("--learning-rate", nargs="?", default=1e-3, type=float)
parser.add_argument("--hidden", nargs="?", default=200, type=int)
parser.add_argument("--dropout-rate", nargs="?", default=0.2, type=float)
parser.add_argument(
"-dp",
"--disable-progbar",
action="store_true",
default=False,
help="Whether to disable progress bar",
)
parser.add_argument(
"--device", default="cpu", type=str, help='use "cpu", "gpu" or "tpu".'
)
parser.add_argument(
"--nn-framework",
nargs="?",
default="flax",
help=(
"The framework to use for constructing encoder / decoder. Options are "
'"flax" or "haiku".'
),
)
args = parser.parse_args()
numpyro.set_platform(args.device)
main(args)
| apache-2.0 |
ryokbys/nap | nappy/fitpot/analyze_samples.py | 1 | 7436 | #!/usr/bin/env python
"""
Analyze energies and forces (stresses?) of specified samples.
The sample directory name should start with 'smpl_' and
the name follows before the second '_' is used for coloring.
Usage:
analyze_samples.py [options] DIRS [DIRS...]
Options:
-h, --help Show this message and exit.
--graph-format FORMAT
Specify a graph format. [default: png]
--energy-limit ELIM
Extract sample names whose energy difference from the minimum amoung data
is larger than ELIM. [default: none]
"""
from __future__ import print_function
import os,sys
from docopt import docopt
import numpy as np
import glob
from nappy.napsys import NAPSystem
__author__ = "RYO KOBAYASHI"
__version__ = "180224"
_graph_name = 'graph.erg-vs-vol.'
def read_erg(fname='erg.ref'):
with open(fname,'r') as f:
erg = float(f.readline().split()[0])
return erg
def read_frc(fname='frc.ref'):
with open(fname,'r') as f:
natm = int(f.readline().split()[0])
frcs = []
for i in range(natm):
frcs.append([ float(d) for d in f.readline().split() ])
return frcs
def read_strs(fname='strs.ref'):
with open(fname,'r') as f:
strs = [ float(d) for d in f.readline().split() ]
return strs
def read_sample(dirname):
"""
Read system, energy, forces and stress information of given DIRNAME.
"""
#...The directory must have erg.ref, frc.ref, strs.ref, and pos files.
files = ('pos','erg.ref','frc.ref','strs.ref')
for f in files:
if not os.path.exists(dirname+'/'+f):
raise RuntimeError('The file '+f+' does not exist in '+dirname)
#...Read pos first
nsys = NAPSystem(fname=dirname+'/pos',format='pmd')
erg = read_erg(fname=dirname+'/erg.ref')
frcs = read_frc(fname=dirname+'/frc.ref')
strs = read_strs(fname=dirname+'/strs.ref')
return nsys,erg,frcs,strs
def statistics(systems):
e_ave = 0.0
e_var = 0.0
e_min = 1e+30
e_max = -1e+30
f_ave = 0.0
f_var = 0.0
f_min = 1e+30
f_max = -1e+30
nf = 0
for s in systems:
nsys = s['nsys']
natm = nsys.num_atoms()
erg = s['erg']/natm
frcs = s['frcs']
#...energy
e_ave += erg
e_var += erg*erg
e_min = min(e_min,erg)
e_max = max(e_max,erg)
#...forces
for i in range(natm):
for j in range(3):
nf += 1
f_ave += frcs[i][j]
f_var += frcs[i][j]*frcs[i][j]
f_min = min(f_min,abs(frcs[i][j]))
f_max = max(f_max,abs(frcs[i][j]))
e_ave /= len(systems)
e_var = e_var/len(systems) -e_ave**2
f_ave /= nf
f_var = f_var/nf -f_ave**2
print('Energy per atom:')
print(' Average: {0:8.4f}'.format(e_ave))
print(' Standard deviation: {0:8.4f}'.format(np.sqrt(e_var)))
print(' Minimum: {0:8.4f}'.format(e_min))
print(' Maximum: {0:8.4f}'.format(e_max))
print(' Energy range: {0:8.4f}'.format(e_max-e_min))
print('Force component:')
print(' Average: {0:8.4f}'.format(f_ave))
print(' Standard deviation: {0:8.4f}'.format(np.sqrt(f_var)))
print(' Minimum: {0:8.4f}'.format(f_min))
print(' Maximum: {0:8.4f}'.format(f_max))
print(' Force range: {0:8.4f}'.format(f_max-f_min))
return
def angle2color(angle):
"""
Convert angle in degree [0:360] (float) to color in RGB.
"""
from matplotlib import colors
sat = 1.0
val = 0.8
hue = angle/360
return colors.hsv_to_rgb((hue,sat,val))
def uniq(arr):
uniq_arr = []
for a in arr:
if a not in uniq_arr:
uniq_arr.append(a)
return uniq_arr
def draw_graph(systems,uniq_names,graph_format='png',
graph_name='graph.png'):
"""
Draw a graph of erv-vs-vol of given systems.
"""
try:
import matplotlib.pyplot as plt
except Exception:
raise ImportError('Cannot import module matplotlib.pyplot')
try:
import seaborn as sns
sns.set(context='poster',style='darkgrid')
except Exception:
pass
cmap = plt.get_cmap('tab10')
markersize = 10
if len(systems) > 2000:
markersize = 5
num_name = len(uniq_names)
dangle = 360.0 /num_name
for i,name in enumerate(uniq_names):
ergs = []
vols = []
#angle = i*dangle
#color = angle2color(angle)
color = cmap(i)
for s in systems:
if s['name'] != name:
continue
nsys = s['nsys']
natm = nsys.num_atoms()
erg = s['erg'] /natm
ergs.append(erg)
vols.append(nsys.volume()/natm)
plt.plot(vols,ergs,'o',color=color,mec='black',mew=0.5,
ms=markersize,label=name)
plt.xlabel('Volume (Ang^3/atom)')
plt.ylabel('Energy (eV/atom)')
plt.legend(loc='best')
plt.savefig(graph_name, format=graph_format,
dpi=300, bbox_inches='tight')
return
def get_high_energy_samples(systems,elim=1.0):
emin = 0.0
for s in systems:
nsys = s['nsys']
natm = nsys.num_atoms()
erg = s['erg']/natm
emin = min(emin,erg)
print('Minimum energy = ',emin)
dnames = []
for s in systems:
nsys = s['nsys']
natm = nsys.num_atoms()
erg = s['erg']/natm
if np.abs(erg-emin) > elim:
dnames.append(s['dname'])
print('Num of samples over ELIM = ',len(dnames))
return dnames
def arrange_dirs(dirs):
#...If the dirname contains '/' at the end, remove it.
for i in range(len(dirs)):
if dirs[i][-1] == '/':
dirs[i] = dirs[i][:-1]
newdirs = []
#...If the dirname does not contain 'smpl_', look for smpl_ dirs in it.
for d in dirs:
basename = os.path.basename(d)
if not 'smpl_' in basename:
ds_in_dirs = glob.glob(d+'/smpl_*')
for dd in ds_in_dirs:
newdirs.append(dd)
else:
newdirs.append(d)
return newdirs
if __name__ == "__main__":
args = docopt(__doc__)
dirs = args['DIRS']
graph_format = args['--graph-format']
dirs = arrange_dirs(dirs)
print('Number of dirs = {0:d}'.format(len(dirs)))
systems = []
uniq_names = []
for d in dirs:
name = d.split('/')[-1].split('_')[1]
try:
nsys,erg,frcs,strs = read_sample(d)
if name not in uniq_names:
uniq_names.append(name)
except:
continue
s = {}
s['nsys'] = nsys
s['erg'] = erg
s['frcs'] = frcs
s['strs'] = strs
s['name'] = name
s['dname'] = d
systems.append(s)
statistics(systems)
graph_name = 'graph.erg-vs-vol.'+graph_format
draw_graph(systems,uniq_names,graph_format=graph_format,
graph_name=graph_name)
print('')
print('- '+graph_name)
elim = args['--energy-limit']
if elim != 'none':
print('')
elim = float(elim)
dnames = get_high_energy_samples(systems,elim)
with open('out.high_energy_samples','w') as f:
for d in dnames:
f.write('{0:s}\n'.format(d))
print('- out.high_energy_samples')
| mit |
YeoLab/gscripts | gscripts/rnaseq/splicing_modality.py | 1 | 3374 | __author__ = 'Olga'
import pymc as pm
import pandas as pd
from collections import Counter
def _assign_modality_from_estimate(mean_alpha, mean_beta):
"""
Given estimated alpha and beta parameters from an Markov Chain Monte Carlo
run, assign a modality.
"""
# check if one parameter is much larger than another, and that they're
# both larger than 1
if mean_alpha / mean_beta > 2 or mean_beta / mean_alpha > 2:
if mean_alpha > mean_beta:
return 'included'
else:
return 'excluded'
else:
if mean_alpha < .9 and mean_beta < .9:
return 'bimodal'
elif mean_alpha > 2 and mean_beta > 2:
return 'middle'
elif abs((mean_alpha + mean_beta) / 2 - 1) < 0.5:
return 'uniform'
else:
return None
def _print_and_plot(mean_alpha, mean_beta, alphas, betas, n_iter, data):
print
print mean_alpha, mean_beta, ' estimated modality:', \
_assign_modality_from_estimate(mean_alpha, mean_beta)
import numpy as np
from scipy.stats import beta
import matplotlib.pyplot as plt
import prettyplotlib as ppl
fig, axes = plt.subplots(ncols=2, figsize=(12, 4))
ax = axes[0]
ppl.plot(alphas, label='alpha', ax=ax)
ppl.plot(betas, label='beta', ax=ax)
ppl.legend(ax=ax)
ax.hlines(mean_alpha, 0, n_iter)
ax.hlines(mean_beta, 0, n_iter)
ax.annotate('mean_alpha = {:.5f}'.format(mean_alpha),
(0, mean_alpha), fontsize=12,
xytext=(0, 1), textcoords='offset points')
ax.annotate('mean_beta = {:.5f}'.format(mean_alpha),
(0, mean_beta), fontsize=12,
xytext=(0, 1), textcoords='offset points')
ax.set_xlim(0, n_iter)
ax = axes[1]
ppl.hist(data, facecolor='grey', alpha=0.5, bins=np.arange(0, 1, 0.05),
zorder=10, ax=ax)
ymin, ymax = ax.get_ylim()
one_x = np.arange(0, 1.01, 0.01)
x = np.repeat(one_x, n_iter).reshape(len(one_x), n_iter)
beta_distributions = np.vstack((beta(a, b).pdf(one_x)
for a, b in zip(alphas, betas))).T
ppl.plot(x, beta_distributions, color=ppl.colors.set2[0], alpha=0.1,
linewidth=2, ax=ax)
ax.set_ylim(0, ymax)
def _fit_beta_distribution(data, n_iter):
alpha_var = pm.Exponential('alpha', .5)
beta_var = pm.Exponential('beta', .5)
observations = pm.Beta('observations', alpha_var, beta_var, value=data,
observed=True)
model = pm.Model([alpha_var, beta_var, observations])
mcmc = pm.MCMC(model)
mcmc.sample(n_iter)
alphas = mcmc.trace('alpha')[:]
betas = mcmc.trace('beta')[:]
return alphas, betas
def estimate_modality(data, n_iter=1000, plot=False):
#if plot:
# print data.name
# print data
alphas, betas = _fit_beta_distribution(data, n_iter)
mean_alpha = alphas.mean()
mean_beta = betas.mean()
estimated_modality = _assign_modality_from_estimate(mean_alpha, mean_beta)
print Counter(_assign_modality_from_estimate(a, b)
for a, b in zip(alphas, betas))
if plot:
_print_and_plot(mean_alpha, mean_beta, alphas, betas, n_iter, data)
return pd.Series({'mean_alpha': mean_alpha, 'mean_beta': mean_beta,
'modality': estimated_modality}) | mit |
sauloal/cnidaria | scripts/venv/lib/python2.7/site-packages/pandas/sparse/array.py | 5 | 16980 | """
SparseArray data structure
"""
from __future__ import division
# pylint: disable=E1101,E1103,W0231
from numpy import nan, ndarray
import numpy as np
from pandas.core.base import PandasObject
import pandas.core.common as com
from pandas import compat, lib
from pandas.compat import range
from pandas._sparse import BlockIndex, IntIndex
import pandas._sparse as splib
import pandas.index as _index
import pandas.core.ops as ops
def _arith_method(op, name, str_rep=None, default_axis=None,
fill_zeros=None, **eval_kwargs):
"""
Wrapper function for Series arithmetic operations, to avoid
code duplication.
"""
def wrapper(self, other):
if isinstance(other, np.ndarray):
if len(self) != len(other):
raise AssertionError("length mismatch: %d vs. %d" %
(len(self), len(other)))
if not isinstance(other, com.ABCSparseArray):
other = SparseArray(other, fill_value=self.fill_value)
if name[0] == 'r':
return _sparse_array_op(other, self, op, name[1:])
else:
return _sparse_array_op(self, other, op, name)
elif np.isscalar(other):
new_fill_value = op(np.float64(self.fill_value),
np.float64(other))
return SparseArray(op(self.sp_values, other),
sparse_index=self.sp_index,
fill_value=new_fill_value)
else: # pragma: no cover
raise TypeError('operation with %s not supported' % type(other))
if name.startswith("__"):
name = name[2:-2]
wrapper.__name__ = name
return wrapper
def _sparse_array_op(left, right, op, name):
if np.isnan(left.fill_value):
sparse_op = lambda a, b: _sparse_nanop(a, b, name)
else:
sparse_op = lambda a, b: _sparse_fillop(a, b, name)
if left.sp_index.equals(right.sp_index):
result = op(left.sp_values, right.sp_values)
result_index = left.sp_index
else:
result, result_index = sparse_op(left, right)
try:
fill_value = op(left.fill_value, right.fill_value)
except:
fill_value = nan
return SparseArray(result, sparse_index=result_index,
fill_value=fill_value)
def _sparse_nanop(this, other, name):
sparse_op = getattr(splib, 'sparse_nan%s' % name)
result, result_index = sparse_op(this.sp_values,
this.sp_index,
other.sp_values,
other.sp_index)
return result, result_index
def _sparse_fillop(this, other, name):
sparse_op = getattr(splib, 'sparse_%s' % name)
result, result_index = sparse_op(this.sp_values,
this.sp_index,
this.fill_value,
other.sp_values,
other.sp_index,
other.fill_value)
return result, result_index
class SparseArray(PandasObject, np.ndarray):
"""Data structure for labeled, sparse floating point data
Parameters
----------
data : {array-like, Series, SparseSeries, dict}
kind : {'block', 'integer'}
fill_value : float
Defaults to NaN (code for missing)
sparse_index : {BlockIndex, IntIndex}, optional
Only if you have one. Mainly used internally
Notes
-----
SparseArray objects are immutable via the typical Python means. If you
must change values, convert to dense, make your changes, then convert back
to sparse
"""
__array_priority__ = 15
_typ = 'array'
_subtyp = 'sparse_array'
sp_index = None
fill_value = None
def __new__(
cls, data, sparse_index=None, index=None, kind='integer', fill_value=None,
dtype=np.float64, copy=False):
if index is not None:
if data is None:
data = np.nan
if not np.isscalar(data):
raise Exception("must only pass scalars with an index ")
values = np.empty(len(index), dtype='float64')
values.fill(data)
data = values
if dtype is not None:
dtype = np.dtype(dtype)
is_sparse_array = isinstance(data, SparseArray)
if fill_value is None:
if is_sparse_array:
fill_value = data.fill_value
else:
fill_value = nan
if is_sparse_array:
sparse_index = data.sp_index
values = np.asarray(data)
else:
# array-like
if sparse_index is None:
values, sparse_index = make_sparse(data, kind=kind,
fill_value=fill_value)
else:
values = data
if len(values) != sparse_index.npoints:
raise AssertionError("Non array-like type {0} must have"
" the same length as the"
" index".format(type(values)))
# Create array, do *not* copy data by default
if copy:
subarr = np.array(values, dtype=dtype, copy=True)
else:
subarr = np.asarray(values, dtype=dtype)
# if we have a bool type, make sure that we have a bool fill_value
if (dtype is not None and issubclass(dtype.type, np.bool_)) or (data is not None and lib.is_bool_array(subarr)):
if np.isnan(fill_value) or not fill_value:
fill_value = False
else:
fill_value = bool(fill_value)
# Change the class of the array to be the subclass type.
output = subarr.view(cls)
output.sp_index = sparse_index
output.fill_value = fill_value
return output
@property
def _constructor(self):
return lambda x: SparseArray(x, fill_value=self.fill_value,
kind=self.kind)
@property
def kind(self):
if isinstance(self.sp_index, BlockIndex):
return 'block'
elif isinstance(self.sp_index, IntIndex):
return 'integer'
def __array_finalize__(self, obj):
"""
Gets called after any ufunc or other array operations, necessary
to pass on the index.
"""
self.sp_index = getattr(obj, 'sp_index', None)
self.fill_value = getattr(obj, 'fill_value', None)
def __reduce__(self):
"""Necessary for making this object picklable"""
object_state = list(ndarray.__reduce__(self))
subclass_state = self.fill_value, self.sp_index
object_state[2] = (object_state[2], subclass_state)
return tuple(object_state)
def __setstate__(self, state):
"""Necessary for making this object picklable"""
nd_state, own_state = state
ndarray.__setstate__(self, nd_state)
fill_value, sp_index = own_state[:2]
self.sp_index = sp_index
self.fill_value = fill_value
def __len__(self):
try:
return self.sp_index.length
except:
return 0
def __unicode__(self):
return '%s\nFill: %s\n%s' % (com.pprint_thing(self),
com.pprint_thing(self.fill_value),
com.pprint_thing(self.sp_index))
def disable(self, other):
raise NotImplementedError('inplace binary ops not supported')
# Inplace operators
__iadd__ = disable
__isub__ = disable
__imul__ = disable
__itruediv__ = disable
__ifloordiv__ = disable
__ipow__ = disable
# Python 2 division operators
if not compat.PY3:
__idiv__ = disable
@property
def values(self):
"""
Dense values
"""
output = np.empty(len(self), dtype=np.float64)
int_index = self.sp_index.to_int_index()
output.fill(self.fill_value)
output.put(int_index.indices, self)
return output
@property
def sp_values(self):
# caching not an option, leaks memory
return self.view(np.ndarray)
def get_values(self, fill=None):
""" return a dense representation """
return self.to_dense(fill=fill)
def to_dense(self, fill=None):
"""
Convert SparseSeries to (dense) Series
"""
values = self.values
# fill the nans
if fill is None:
fill = self.fill_value
if not np.isnan(fill):
values[np.isnan(values)] = fill
return values
def __iter__(self):
for i in range(len(self)):
yield self._get_val_at(i)
raise StopIteration
def __getitem__(self, key):
"""
"""
if com.is_integer(key):
return self._get_val_at(key)
else:
data_slice = self.values[key]
return self._constructor(data_slice)
def __getslice__(self, i, j):
if i < 0:
i = 0
if j < 0:
j = 0
slobj = slice(i, j)
return self.__getitem__(slobj)
def _get_val_at(self, loc):
n = len(self)
if loc < 0:
loc += n
if loc >= n or loc < 0:
raise IndexError('Out of bounds access')
sp_loc = self.sp_index.lookup(loc)
if sp_loc == -1:
return self.fill_value
else:
return _index.get_value_at(self, sp_loc)
def take(self, indices, axis=0):
"""
Sparse-compatible version of ndarray.take
Returns
-------
taken : ndarray
"""
if axis:
raise ValueError("axis must be 0, input was {0}".format(axis))
indices = np.atleast_1d(np.asarray(indices, dtype=int))
# allow -1 to indicate missing values
n = len(self)
if ((indices >= n) | (indices < -1)).any():
raise IndexError('out of bounds access')
if self.sp_index.npoints > 0:
locs = np.array([self.sp_index.lookup(loc) if loc > -1 else -1
for loc in indices])
result = self.sp_values.take(locs)
mask = locs == -1
if mask.any():
try:
result[mask] = self.fill_value
except ValueError:
# wrong dtype
result = result.astype('float64')
result[mask] = self.fill_value
else:
result = np.empty(len(indices))
result.fill(self.fill_value)
return result
def __setitem__(self, key, value):
# if com.is_integer(key):
# self.values[key] = value
# else:
# raise Exception("SparseArray does not support seting non-scalars via setitem")
raise TypeError(
"SparseArray does not support item assignment via setitem")
def __setslice__(self, i, j, value):
if i < 0:
i = 0
if j < 0:
j = 0
slobj = slice(i, j)
# if not np.isscalar(value):
# raise Exception("SparseArray does not support seting non-scalars via slices")
#x = self.values
#x[slobj] = value
#self.values = x
raise TypeError(
"SparseArray does not support item assignment via slices")
def astype(self, dtype=None):
"""
"""
dtype = np.dtype(dtype)
if dtype is not None and dtype not in (np.float_, float):
raise TypeError('Can only support floating point data for now')
return self.copy()
def copy(self, deep=True):
"""
Make a copy of the SparseSeries. Only the actual sparse values need to
be copied
"""
if deep:
values = self.sp_values.copy()
else:
values = self.sp_values
return SparseArray(values, sparse_index=self.sp_index,
dtype=self.dtype,
fill_value=self.fill_value)
def count(self):
"""
Compute sum of non-NA/null observations in SparseSeries. If the
fill_value is not NaN, the "sparse" locations will be included in the
observation count
Returns
-------
nobs : int
"""
sp_values = self.sp_values
valid_spvals = np.isfinite(sp_values).sum()
if self._null_fill_value:
return valid_spvals
else:
return valid_spvals + self.sp_index.ngaps
@property
def _null_fill_value(self):
return np.isnan(self.fill_value)
@property
def _valid_sp_values(self):
sp_vals = self.sp_values
mask = np.isfinite(sp_vals)
return sp_vals[mask]
def sum(self, axis=None, dtype=None, out=None):
"""
Sum of non-NA/null values
Returns
-------
sum : float
"""
valid_vals = self._valid_sp_values
sp_sum = valid_vals.sum()
if self._null_fill_value:
return sp_sum
else:
nsparse = self.sp_index.ngaps
return sp_sum + self.fill_value * nsparse
def cumsum(self, axis=0, dtype=None, out=None):
"""
Cumulative sum of values. Preserves locations of NaN values
Extra parameters are to preserve ndarray interface.
Returns
-------
cumsum : Series
"""
if com.notnull(self.fill_value):
return self.to_dense().cumsum()
# TODO: what if sp_values contains NaN??
return SparseArray(self.sp_values.cumsum(),
sparse_index=self.sp_index,
fill_value=self.fill_value)
def mean(self, axis=None, dtype=None, out=None):
"""
Mean of non-NA/null values
Returns
-------
mean : float
"""
valid_vals = self._valid_sp_values
sp_sum = valid_vals.sum()
ct = len(valid_vals)
if self._null_fill_value:
return sp_sum / ct
else:
nsparse = self.sp_index.ngaps
return (sp_sum + self.fill_value * nsparse) / (ct + nsparse)
def _maybe_to_dense(obj):
""" try to convert to dense """
if hasattr(obj, 'to_dense'):
return obj.to_dense()
return obj
def _maybe_to_sparse(array):
if isinstance(array, com.ABCSparseSeries):
array = SparseArray(
array.values, sparse_index=array.sp_index, fill_value=array.fill_value, copy=True)
if not isinstance(array, SparseArray):
array = com._values_from_object(array)
return array
def make_sparse(arr, kind='block', fill_value=nan):
"""
Convert ndarray to sparse format
Parameters
----------
arr : ndarray
kind : {'block', 'integer'}
fill_value : NaN or another value
Returns
-------
(sparse_values, index) : (ndarray, SparseIndex)
"""
if hasattr(arr, 'values'):
arr = arr.values
else:
if np.isscalar(arr):
arr = [arr]
arr = np.asarray(arr)
length = len(arr)
if np.isnan(fill_value):
mask = ~np.isnan(arr)
else:
mask = arr != fill_value
indices = np.arange(length, dtype=np.int32)[mask]
if kind == 'block':
locs, lens = splib.get_blocks(indices)
index = BlockIndex(length, locs, lens)
elif kind == 'integer':
index = IntIndex(length, indices)
else: # pragma: no cover
raise ValueError('must be block or integer type')
sparsified_values = arr[mask]
return sparsified_values, index
ops.add_special_arithmetic_methods(SparseArray,
arith_method=_arith_method,
use_numexpr=False)
def _concat_compat(to_concat, axis=0):
"""
provide concatenation of an sparse/dense array of arrays each of which is a single dtype
Parameters
----------
to_concat : array of arrays
axis : axis to provide concatenation
Returns
-------
a single array, preserving the combined dtypes
"""
def convert_sparse(x, axis):
# coerce to native type
if isinstance(x, SparseArray):
x = x.get_values()
x = x.ravel()
if axis > 0:
x = np.atleast_2d(x)
return x
typs = com.get_dtype_kinds(to_concat)
# we have more than one type here, so densify and regular concat
to_concat = [ convert_sparse(x, axis) for x in to_concat ]
result = np.concatenate(to_concat,axis=axis)
if not len(typs-set(['sparse','f','i'])):
# we can remain sparse
result = SparseArray(result.ravel())
else:
# coerce to object if needed
result = result.astype('object')
return result
| mit |
caidongyun/BuildingMachineLearningSystemsWithPython | ch02/stump.py | 24 | 1604 | # This code is supporting material for the book
# Building Machine Learning Systems with Python
# by Willi Richert and Luis Pedro Coelho
# published by PACKT Publishing
#
# It is made available under the MIT License
from sklearn.datasets import load_iris
data = load_iris()
features = data.data
labels = data.target_names[data.target]
is_setosa = (labels == 'setosa')
features = features[~is_setosa]
labels = labels[~is_setosa]
is_virginica = (labels == 'virginica')
# Initialize to a value that is worse than any possible test
best_acc = -1.0
# Loop over all the features
for fi in range(features.shape[1]):
# Test every possible threshold value for feature fi
thresh = features[:, fi].copy()
# Test them in order
thresh.sort()
for t in thresh:
# Generate predictions using t as a threshold
pred = (features[:, fi] > t)
# Accuracy is the fraction of predictions that match reality
acc = (pred == is_virginica).mean()
# We test whether negating the test is a better threshold:
acc_neg = ((~pred) == is_virginica).mean()
if acc_neg > acc:
acc = acc_neg
negated = True
else:
negated = False
# If this is better than previous best, then this is now the new best:
if acc > best_acc:
best_acc = acc
best_fi = fi
best_t = t
best_is_negated = negated
print('Best threshold is {0} on feature {1} (index {2}), which achieves accuracy of {3:.1%}.'.format(
best_t, data.feature_names[best_fi], best_fi, best_acc))
| mit |
tosolveit/scikit-learn | examples/exercises/plot_cv_diabetes.py | 231 | 2527 | """
===============================================
Cross-validation on diabetes Dataset Exercise
===============================================
A tutorial exercise which uses cross-validation with linear models.
This exercise is used in the :ref:`cv_estimators_tut` part of the
:ref:`model_selection_tut` section of the :ref:`stat_learn_tut_index`.
"""
from __future__ import print_function
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn import cross_validation, datasets, linear_model
diabetes = datasets.load_diabetes()
X = diabetes.data[:150]
y = diabetes.target[:150]
lasso = linear_model.Lasso()
alphas = np.logspace(-4, -.5, 30)
scores = list()
scores_std = list()
for alpha in alphas:
lasso.alpha = alpha
this_scores = cross_validation.cross_val_score(lasso, X, y, n_jobs=1)
scores.append(np.mean(this_scores))
scores_std.append(np.std(this_scores))
plt.figure(figsize=(4, 3))
plt.semilogx(alphas, scores)
# plot error lines showing +/- std. errors of the scores
plt.semilogx(alphas, np.array(scores) + np.array(scores_std) / np.sqrt(len(X)),
'b--')
plt.semilogx(alphas, np.array(scores) - np.array(scores_std) / np.sqrt(len(X)),
'b--')
plt.ylabel('CV score')
plt.xlabel('alpha')
plt.axhline(np.max(scores), linestyle='--', color='.5')
##############################################################################
# Bonus: how much can you trust the selection of alpha?
# To answer this question we use the LassoCV object that sets its alpha
# parameter automatically from the data by internal cross-validation (i.e. it
# performs cross-validation on the training data it receives).
# We use external cross-validation to see how much the automatically obtained
# alphas differ across different cross-validation folds.
lasso_cv = linear_model.LassoCV(alphas=alphas)
k_fold = cross_validation.KFold(len(X), 3)
print("Answer to the bonus question:",
"how much can you trust the selection of alpha?")
print()
print("Alpha parameters maximising the generalization score on different")
print("subsets of the data:")
for k, (train, test) in enumerate(k_fold):
lasso_cv.fit(X[train], y[train])
print("[fold {0}] alpha: {1:.5f}, score: {2:.5f}".
format(k, lasso_cv.alpha_, lasso_cv.score(X[test], y[test])))
print()
print("Answer: Not very much since we obtained different alphas for different")
print("subsets of the data and moreover, the scores for these alphas differ")
print("quite substantially.")
plt.show()
| bsd-3-clause |
MechCoder/scikit-learn | sklearn/svm/tests/test_svm.py | 33 | 35916 | """
Testing for Support Vector Machine module (sklearn.svm)
TODO: remove hard coded numerical results when possible
"""
import numpy as np
import itertools
from numpy.testing import assert_array_equal, assert_array_almost_equal
from numpy.testing import assert_almost_equal
from numpy.testing import assert_allclose
from scipy import sparse
from sklearn import svm, linear_model, datasets, metrics, base
from sklearn.model_selection import train_test_split
from sklearn.datasets import make_classification, make_blobs
from sklearn.metrics import f1_score
from sklearn.metrics.pairwise import rbf_kernel
from sklearn.utils import check_random_state
from sklearn.utils.testing import assert_equal, assert_true, assert_false
from sklearn.utils.testing import assert_greater, assert_in, assert_less
from sklearn.utils.testing import assert_raises_regexp, assert_warns
from sklearn.utils.testing import assert_warns_message, assert_raise_message
from sklearn.utils.testing import ignore_warnings, assert_raises
from sklearn.exceptions import ConvergenceWarning
from sklearn.exceptions import NotFittedError
from sklearn.multiclass import OneVsRestClassifier
from sklearn.externals import six
# toy sample
X = [[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]]
Y = [1, 1, 1, 2, 2, 2]
T = [[-1, -1], [2, 2], [3, 2]]
true_result = [1, 2, 2]
# also load the iris dataset
iris = datasets.load_iris()
rng = check_random_state(42)
perm = rng.permutation(iris.target.size)
iris.data = iris.data[perm]
iris.target = iris.target[perm]
def test_libsvm_parameters():
# Test parameters on classes that make use of libsvm.
clf = svm.SVC(kernel='linear').fit(X, Y)
assert_array_equal(clf.dual_coef_, [[-0.25, .25]])
assert_array_equal(clf.support_, [1, 3])
assert_array_equal(clf.support_vectors_, (X[1], X[3]))
assert_array_equal(clf.intercept_, [0.])
assert_array_equal(clf.predict(X), Y)
def test_libsvm_iris():
# Check consistency on dataset iris.
# shuffle the dataset so that labels are not ordered
for k in ('linear', 'rbf'):
clf = svm.SVC(kernel=k).fit(iris.data, iris.target)
assert_greater(np.mean(clf.predict(iris.data) == iris.target), 0.9)
assert_true(hasattr(clf, "coef_") == (k == 'linear'))
assert_array_equal(clf.classes_, np.sort(clf.classes_))
# check also the low-level API
model = svm.libsvm.fit(iris.data, iris.target.astype(np.float64))
pred = svm.libsvm.predict(iris.data, *model)
assert_greater(np.mean(pred == iris.target), .95)
model = svm.libsvm.fit(iris.data, iris.target.astype(np.float64),
kernel='linear')
pred = svm.libsvm.predict(iris.data, *model, kernel='linear')
assert_greater(np.mean(pred == iris.target), .95)
pred = svm.libsvm.cross_validation(iris.data,
iris.target.astype(np.float64), 5,
kernel='linear',
random_seed=0)
assert_greater(np.mean(pred == iris.target), .95)
# If random_seed >= 0, the libsvm rng is seeded (by calling `srand`), hence
# we should get deterministic results (assuming that there is no other
# thread calling this wrapper calling `srand` concurrently).
pred2 = svm.libsvm.cross_validation(iris.data,
iris.target.astype(np.float64), 5,
kernel='linear',
random_seed=0)
assert_array_equal(pred, pred2)
def test_precomputed():
# SVC with a precomputed kernel.
# We test it with a toy dataset and with iris.
clf = svm.SVC(kernel='precomputed')
# Gram matrix for train data (square matrix)
# (we use just a linear kernel)
K = np.dot(X, np.array(X).T)
clf.fit(K, Y)
# Gram matrix for test data (rectangular matrix)
KT = np.dot(T, np.array(X).T)
pred = clf.predict(KT)
assert_raises(ValueError, clf.predict, KT.T)
assert_array_equal(clf.dual_coef_, [[-0.25, .25]])
assert_array_equal(clf.support_, [1, 3])
assert_array_equal(clf.intercept_, [0])
assert_array_almost_equal(clf.support_, [1, 3])
assert_array_equal(pred, true_result)
# Gram matrix for test data but compute KT[i,j]
# for support vectors j only.
KT = np.zeros_like(KT)
for i in range(len(T)):
for j in clf.support_:
KT[i, j] = np.dot(T[i], X[j])
pred = clf.predict(KT)
assert_array_equal(pred, true_result)
# same as before, but using a callable function instead of the kernel
# matrix. kernel is just a linear kernel
kfunc = lambda x, y: np.dot(x, y.T)
clf = svm.SVC(kernel=kfunc)
clf.fit(X, Y)
pred = clf.predict(T)
assert_array_equal(clf.dual_coef_, [[-0.25, .25]])
assert_array_equal(clf.intercept_, [0])
assert_array_almost_equal(clf.support_, [1, 3])
assert_array_equal(pred, true_result)
# test a precomputed kernel with the iris dataset
# and check parameters against a linear SVC
clf = svm.SVC(kernel='precomputed')
clf2 = svm.SVC(kernel='linear')
K = np.dot(iris.data, iris.data.T)
clf.fit(K, iris.target)
clf2.fit(iris.data, iris.target)
pred = clf.predict(K)
assert_array_almost_equal(clf.support_, clf2.support_)
assert_array_almost_equal(clf.dual_coef_, clf2.dual_coef_)
assert_array_almost_equal(clf.intercept_, clf2.intercept_)
assert_almost_equal(np.mean(pred == iris.target), .99, decimal=2)
# Gram matrix for test data but compute KT[i,j]
# for support vectors j only.
K = np.zeros_like(K)
for i in range(len(iris.data)):
for j in clf.support_:
K[i, j] = np.dot(iris.data[i], iris.data[j])
pred = clf.predict(K)
assert_almost_equal(np.mean(pred == iris.target), .99, decimal=2)
clf = svm.SVC(kernel=kfunc)
clf.fit(iris.data, iris.target)
assert_almost_equal(np.mean(pred == iris.target), .99, decimal=2)
def test_svr():
# Test Support Vector Regression
diabetes = datasets.load_diabetes()
for clf in (svm.NuSVR(kernel='linear', nu=.4, C=1.0),
svm.NuSVR(kernel='linear', nu=.4, C=10.),
svm.SVR(kernel='linear', C=10.),
svm.LinearSVR(C=10.),
svm.LinearSVR(C=10.),
):
clf.fit(diabetes.data, diabetes.target)
assert_greater(clf.score(diabetes.data, diabetes.target), 0.02)
# non-regression test; previously, BaseLibSVM would check that
# len(np.unique(y)) < 2, which must only be done for SVC
svm.SVR().fit(diabetes.data, np.ones(len(diabetes.data)))
svm.LinearSVR().fit(diabetes.data, np.ones(len(diabetes.data)))
def test_linearsvr():
# check that SVR(kernel='linear') and LinearSVC() give
# comparable results
diabetes = datasets.load_diabetes()
lsvr = svm.LinearSVR(C=1e3).fit(diabetes.data, diabetes.target)
score1 = lsvr.score(diabetes.data, diabetes.target)
svr = svm.SVR(kernel='linear', C=1e3).fit(diabetes.data, diabetes.target)
score2 = svr.score(diabetes.data, diabetes.target)
assert_allclose(np.linalg.norm(lsvr.coef_),
np.linalg.norm(svr.coef_), 1, 0.0001)
assert_almost_equal(score1, score2, 2)
def test_linearsvr_fit_sampleweight():
# check correct result when sample_weight is 1
# check that SVR(kernel='linear') and LinearSVC() give
# comparable results
diabetes = datasets.load_diabetes()
n_samples = len(diabetes.target)
unit_weight = np.ones(n_samples)
lsvr = svm.LinearSVR(C=1e3).fit(diabetes.data, diabetes.target,
sample_weight=unit_weight)
score1 = lsvr.score(diabetes.data, diabetes.target)
lsvr_no_weight = svm.LinearSVR(C=1e3).fit(diabetes.data, diabetes.target)
score2 = lsvr_no_weight.score(diabetes.data, diabetes.target)
assert_allclose(np.linalg.norm(lsvr.coef_),
np.linalg.norm(lsvr_no_weight.coef_), 1, 0.0001)
assert_almost_equal(score1, score2, 2)
# check that fit(X) = fit([X1, X2, X3],sample_weight = [n1, n2, n3]) where
# X = X1 repeated n1 times, X2 repeated n2 times and so forth
random_state = check_random_state(0)
random_weight = random_state.randint(0, 10, n_samples)
lsvr_unflat = svm.LinearSVR(C=1e3).fit(diabetes.data, diabetes.target,
sample_weight=random_weight)
score3 = lsvr_unflat.score(diabetes.data, diabetes.target,
sample_weight=random_weight)
X_flat = np.repeat(diabetes.data, random_weight, axis=0)
y_flat = np.repeat(diabetes.target, random_weight, axis=0)
lsvr_flat = svm.LinearSVR(C=1e3).fit(X_flat, y_flat)
score4 = lsvr_flat.score(X_flat, y_flat)
assert_almost_equal(score3, score4, 2)
def test_svr_errors():
X = [[0.0], [1.0]]
y = [0.0, 0.5]
# Bad kernel
clf = svm.SVR(kernel=lambda x, y: np.array([[1.0]]))
clf.fit(X, y)
assert_raises(ValueError, clf.predict, X)
def test_oneclass():
# Test OneClassSVM
clf = svm.OneClassSVM()
clf.fit(X)
pred = clf.predict(T)
assert_array_equal(pred, [-1, -1, -1])
assert_equal(pred.dtype, np.dtype('intp'))
assert_array_almost_equal(clf.intercept_, [-1.008], decimal=3)
assert_array_almost_equal(clf.dual_coef_,
[[0.632, 0.233, 0.633, 0.234, 0.632, 0.633]],
decimal=3)
assert_raises(AttributeError, lambda: clf.coef_)
def test_oneclass_decision_function():
# Test OneClassSVM decision function
clf = svm.OneClassSVM()
rnd = check_random_state(2)
# Generate train data
X = 0.3 * rnd.randn(100, 2)
X_train = np.r_[X + 2, X - 2]
# Generate some regular novel observations
X = 0.3 * rnd.randn(20, 2)
X_test = np.r_[X + 2, X - 2]
# Generate some abnormal novel observations
X_outliers = rnd.uniform(low=-4, high=4, size=(20, 2))
# fit the model
clf = svm.OneClassSVM(nu=0.1, kernel="rbf", gamma=0.1)
clf.fit(X_train)
# predict things
y_pred_test = clf.predict(X_test)
assert_greater(np.mean(y_pred_test == 1), .9)
y_pred_outliers = clf.predict(X_outliers)
assert_greater(np.mean(y_pred_outliers == -1), .9)
dec_func_test = clf.decision_function(X_test)
assert_array_equal((dec_func_test > 0).ravel(), y_pred_test == 1)
dec_func_outliers = clf.decision_function(X_outliers)
assert_array_equal((dec_func_outliers > 0).ravel(), y_pred_outliers == 1)
def test_tweak_params():
# Make sure some tweaking of parameters works.
# We change clf.dual_coef_ at run time and expect .predict() to change
# accordingly. Notice that this is not trivial since it involves a lot
# of C/Python copying in the libsvm bindings.
# The success of this test ensures that the mapping between libsvm and
# the python classifier is complete.
clf = svm.SVC(kernel='linear', C=1.0)
clf.fit(X, Y)
assert_array_equal(clf.dual_coef_, [[-.25, .25]])
assert_array_equal(clf.predict([[-.1, -.1]]), [1])
clf._dual_coef_ = np.array([[.0, 1.]])
assert_array_equal(clf.predict([[-.1, -.1]]), [2])
def test_probability():
# Predict probabilities using SVC
# This uses cross validation, so we use a slightly bigger testing set.
for clf in (svm.SVC(probability=True, random_state=0, C=1.0),
svm.NuSVC(probability=True, random_state=0)):
clf.fit(iris.data, iris.target)
prob_predict = clf.predict_proba(iris.data)
assert_array_almost_equal(
np.sum(prob_predict, 1), np.ones(iris.data.shape[0]))
assert_true(np.mean(np.argmax(prob_predict, 1)
== clf.predict(iris.data)) > 0.9)
assert_almost_equal(clf.predict_proba(iris.data),
np.exp(clf.predict_log_proba(iris.data)), 8)
def test_decision_function():
# Test decision_function
# Sanity check, test that decision_function implemented in python
# returns the same as the one in libsvm
# multi class:
clf = svm.SVC(kernel='linear', C=0.1,
decision_function_shape='ovo').fit(iris.data, iris.target)
dec = np.dot(iris.data, clf.coef_.T) + clf.intercept_
assert_array_almost_equal(dec, clf.decision_function(iris.data))
# binary:
clf.fit(X, Y)
dec = np.dot(X, clf.coef_.T) + clf.intercept_
prediction = clf.predict(X)
assert_array_almost_equal(dec.ravel(), clf.decision_function(X))
assert_array_almost_equal(
prediction,
clf.classes_[(clf.decision_function(X) > 0).astype(np.int)])
expected = np.array([-1., -0.66, -1., 0.66, 1., 1.])
assert_array_almost_equal(clf.decision_function(X), expected, 2)
# kernel binary:
clf = svm.SVC(kernel='rbf', gamma=1, decision_function_shape='ovo')
clf.fit(X, Y)
rbfs = rbf_kernel(X, clf.support_vectors_, gamma=clf.gamma)
dec = np.dot(rbfs, clf.dual_coef_.T) + clf.intercept_
assert_array_almost_equal(dec.ravel(), clf.decision_function(X))
def test_decision_function_shape():
# check that decision_function_shape='ovr' gives
# correct shape and is consistent with predict
clf = svm.SVC(kernel='linear', C=0.1,
decision_function_shape='ovr').fit(iris.data, iris.target)
dec = clf.decision_function(iris.data)
assert_equal(dec.shape, (len(iris.data), 3))
assert_array_equal(clf.predict(iris.data), np.argmax(dec, axis=1))
# with five classes:
X, y = make_blobs(n_samples=80, centers=5, random_state=0)
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
clf = svm.SVC(kernel='linear', C=0.1,
decision_function_shape='ovr').fit(X_train, y_train)
dec = clf.decision_function(X_test)
assert_equal(dec.shape, (len(X_test), 5))
assert_array_equal(clf.predict(X_test), np.argmax(dec, axis=1))
# check shape of ovo_decition_function=True
clf = svm.SVC(kernel='linear', C=0.1,
decision_function_shape='ovo').fit(X_train, y_train)
dec = clf.decision_function(X_train)
assert_equal(dec.shape, (len(X_train), 10))
def test_svr_predict():
# Test SVR's decision_function
# Sanity check, test that predict implemented in python
# returns the same as the one in libsvm
X = iris.data
y = iris.target
# linear kernel
reg = svm.SVR(kernel='linear', C=0.1).fit(X, y)
dec = np.dot(X, reg.coef_.T) + reg.intercept_
assert_array_almost_equal(dec.ravel(), reg.predict(X).ravel())
# rbf kernel
reg = svm.SVR(kernel='rbf', gamma=1).fit(X, y)
rbfs = rbf_kernel(X, reg.support_vectors_, gamma=reg.gamma)
dec = np.dot(rbfs, reg.dual_coef_.T) + reg.intercept_
assert_array_almost_equal(dec.ravel(), reg.predict(X).ravel())
def test_weight():
# Test class weights
clf = svm.SVC(class_weight={1: 0.1})
# we give a small weights to class 1
clf.fit(X, Y)
# so all predicted values belong to class 2
assert_array_almost_equal(clf.predict(X), [2] * 6)
X_, y_ = make_classification(n_samples=200, n_features=10,
weights=[0.833, 0.167], random_state=2)
for clf in (linear_model.LogisticRegression(),
svm.LinearSVC(random_state=0), svm.SVC()):
clf.set_params(class_weight={0: .1, 1: 10})
clf.fit(X_[:100], y_[:100])
y_pred = clf.predict(X_[100:])
assert_true(f1_score(y_[100:], y_pred) > .3)
def test_sample_weights():
# Test weights on individual samples
# TODO: check on NuSVR, OneClass, etc.
clf = svm.SVC()
clf.fit(X, Y)
assert_array_equal(clf.predict([X[2]]), [1.])
sample_weight = [.1] * 3 + [10] * 3
clf.fit(X, Y, sample_weight=sample_weight)
assert_array_equal(clf.predict([X[2]]), [2.])
# test that rescaling all samples is the same as changing C
clf = svm.SVC()
clf.fit(X, Y)
dual_coef_no_weight = clf.dual_coef_
clf.set_params(C=100)
clf.fit(X, Y, sample_weight=np.repeat(0.01, len(X)))
assert_array_almost_equal(dual_coef_no_weight, clf.dual_coef_)
def test_auto_weight():
# Test class weights for imbalanced data
from sklearn.linear_model import LogisticRegression
# We take as dataset the two-dimensional projection of iris so
# that it is not separable and remove half of predictors from
# class 1.
# We add one to the targets as a non-regression test: class_weight="balanced"
# used to work only when the labels where a range [0..K).
from sklearn.utils import compute_class_weight
X, y = iris.data[:, :2], iris.target + 1
unbalanced = np.delete(np.arange(y.size), np.where(y > 2)[0][::2])
classes = np.unique(y[unbalanced])
class_weights = compute_class_weight('balanced', classes, y[unbalanced])
assert_true(np.argmax(class_weights) == 2)
for clf in (svm.SVC(kernel='linear'), svm.LinearSVC(random_state=0),
LogisticRegression()):
# check that score is better when class='balanced' is set.
y_pred = clf.fit(X[unbalanced], y[unbalanced]).predict(X)
clf.set_params(class_weight='balanced')
y_pred_balanced = clf.fit(X[unbalanced], y[unbalanced],).predict(X)
assert_true(metrics.f1_score(y, y_pred, average='macro')
<= metrics.f1_score(y, y_pred_balanced,
average='macro'))
def test_bad_input():
# Test that it gives proper exception on deficient input
# impossible value of C
assert_raises(ValueError, svm.SVC(C=-1).fit, X, Y)
# impossible value of nu
clf = svm.NuSVC(nu=0.0)
assert_raises(ValueError, clf.fit, X, Y)
Y2 = Y[:-1] # wrong dimensions for labels
assert_raises(ValueError, clf.fit, X, Y2)
# Test with arrays that are non-contiguous.
for clf in (svm.SVC(), svm.LinearSVC(random_state=0)):
Xf = np.asfortranarray(X)
assert_false(Xf.flags['C_CONTIGUOUS'])
yf = np.ascontiguousarray(np.tile(Y, (2, 1)).T)
yf = yf[:, -1]
assert_false(yf.flags['F_CONTIGUOUS'])
assert_false(yf.flags['C_CONTIGUOUS'])
clf.fit(Xf, yf)
assert_array_equal(clf.predict(T), true_result)
# error for precomputed kernelsx
clf = svm.SVC(kernel='precomputed')
assert_raises(ValueError, clf.fit, X, Y)
# sample_weight bad dimensions
clf = svm.SVC()
assert_raises(ValueError, clf.fit, X, Y, sample_weight=range(len(X) - 1))
# predict with sparse input when trained with dense
clf = svm.SVC().fit(X, Y)
assert_raises(ValueError, clf.predict, sparse.lil_matrix(X))
Xt = np.array(X).T
clf.fit(np.dot(X, Xt), Y)
assert_raises(ValueError, clf.predict, X)
clf = svm.SVC()
clf.fit(X, Y)
assert_raises(ValueError, clf.predict, Xt)
def test_unicode_kernel():
# Test that a unicode kernel name does not cause a TypeError on clf.fit
if six.PY2:
# Test unicode (same as str on python3)
clf = svm.SVC(kernel=unicode('linear'))
clf.fit(X, Y)
# Test ascii bytes (str is bytes in python2)
clf = svm.SVC(kernel=str('linear'))
clf.fit(X, Y)
else:
# Test unicode (str is unicode in python3)
clf = svm.SVC(kernel=str('linear'))
clf.fit(X, Y)
# Test ascii bytes (same as str on python2)
clf = svm.SVC(kernel=bytes('linear', 'ascii'))
clf.fit(X, Y)
# Test default behavior on both versions
clf = svm.SVC(kernel='linear')
clf.fit(X, Y)
def test_sparse_precomputed():
clf = svm.SVC(kernel='precomputed')
sparse_gram = sparse.csr_matrix([[1, 0], [0, 1]])
try:
clf.fit(sparse_gram, [0, 1])
assert not "reached"
except TypeError as e:
assert_in("Sparse precomputed", str(e))
def test_linearsvc_parameters():
# Test possible parameter combinations in LinearSVC
# Generate list of possible parameter combinations
losses = ['hinge', 'squared_hinge', 'logistic_regression', 'foo']
penalties, duals = ['l1', 'l2', 'bar'], [True, False]
X, y = make_classification(n_samples=5, n_features=5)
for loss, penalty, dual in itertools.product(losses, penalties, duals):
clf = svm.LinearSVC(penalty=penalty, loss=loss, dual=dual)
if ((loss, penalty) == ('hinge', 'l1') or
(loss, penalty, dual) == ('hinge', 'l2', False) or
(penalty, dual) == ('l1', True) or
loss == 'foo' or penalty == 'bar'):
assert_raises_regexp(ValueError,
"Unsupported set of arguments.*penalty='%s.*"
"loss='%s.*dual=%s"
% (penalty, loss, dual),
clf.fit, X, y)
else:
clf.fit(X, y)
# Incorrect loss value - test if explicit error message is raised
assert_raises_regexp(ValueError, ".*loss='l3' is not supported.*",
svm.LinearSVC(loss="l3").fit, X, y)
# FIXME remove in 1.0
def test_linearsvx_loss_penalty_deprecations():
X, y = [[0.0], [1.0]], [0, 1]
msg = ("loss='%s' has been deprecated in favor of "
"loss='%s' as of 0.16. Backward compatibility"
" for the %s will be removed in %s")
# LinearSVC
# loss l1 --> hinge
assert_warns_message(DeprecationWarning,
msg % ("l1", "hinge", "loss='l1'", "1.0"),
svm.LinearSVC(loss="l1").fit, X, y)
# loss l2 --> squared_hinge
assert_warns_message(DeprecationWarning,
msg % ("l2", "squared_hinge", "loss='l2'", "1.0"),
svm.LinearSVC(loss="l2").fit, X, y)
# LinearSVR
# loss l1 --> epsilon_insensitive
assert_warns_message(DeprecationWarning,
msg % ("l1", "epsilon_insensitive", "loss='l1'",
"1.0"),
svm.LinearSVR(loss="l1").fit, X, y)
# loss l2 --> squared_epsilon_insensitive
assert_warns_message(DeprecationWarning,
msg % ("l2", "squared_epsilon_insensitive",
"loss='l2'", "1.0"),
svm.LinearSVR(loss="l2").fit, X, y)
def test_linear_svx_uppercase_loss_penality_raises_error():
# Check if Upper case notation raises error at _fit_liblinear
# which is called by fit
X, y = [[0.0], [1.0]], [0, 1]
assert_raise_message(ValueError, "loss='SQuared_hinge' is not supported",
svm.LinearSVC(loss="SQuared_hinge").fit, X, y)
assert_raise_message(ValueError, ("The combination of penalty='L2'"
" and loss='squared_hinge' is not supported"),
svm.LinearSVC(penalty="L2").fit, X, y)
def test_linearsvc():
# Test basic routines using LinearSVC
clf = svm.LinearSVC(random_state=0).fit(X, Y)
# by default should have intercept
assert_true(clf.fit_intercept)
assert_array_equal(clf.predict(T), true_result)
assert_array_almost_equal(clf.intercept_, [0], decimal=3)
# the same with l1 penalty
clf = svm.LinearSVC(penalty='l1', loss='squared_hinge', dual=False,
random_state=0).fit(X, Y)
assert_array_equal(clf.predict(T), true_result)
# l2 penalty with dual formulation
clf = svm.LinearSVC(penalty='l2', dual=True, random_state=0).fit(X, Y)
assert_array_equal(clf.predict(T), true_result)
# l2 penalty, l1 loss
clf = svm.LinearSVC(penalty='l2', loss='hinge', dual=True, random_state=0)
clf.fit(X, Y)
assert_array_equal(clf.predict(T), true_result)
# test also decision function
dec = clf.decision_function(T)
res = (dec > 0).astype(np.int) + 1
assert_array_equal(res, true_result)
def test_linearsvc_crammer_singer():
# Test LinearSVC with crammer_singer multi-class svm
ovr_clf = svm.LinearSVC(random_state=0).fit(iris.data, iris.target)
cs_clf = svm.LinearSVC(multi_class='crammer_singer', random_state=0)
cs_clf.fit(iris.data, iris.target)
# similar prediction for ovr and crammer-singer:
assert_true((ovr_clf.predict(iris.data) ==
cs_clf.predict(iris.data)).mean() > .9)
# classifiers shouldn't be the same
assert_true((ovr_clf.coef_ != cs_clf.coef_).all())
# test decision function
assert_array_equal(cs_clf.predict(iris.data),
np.argmax(cs_clf.decision_function(iris.data), axis=1))
dec_func = np.dot(iris.data, cs_clf.coef_.T) + cs_clf.intercept_
assert_array_almost_equal(dec_func, cs_clf.decision_function(iris.data))
def test_linearsvc_fit_sampleweight():
# check correct result when sample_weight is 1
n_samples = len(X)
unit_weight = np.ones(n_samples)
clf = svm.LinearSVC(random_state=0).fit(X, Y)
clf_unitweight = svm.LinearSVC(random_state=0).\
fit(X, Y, sample_weight=unit_weight)
# check if same as sample_weight=None
assert_array_equal(clf_unitweight.predict(T), clf.predict(T))
assert_allclose(clf.coef_, clf_unitweight.coef_, 1, 0.0001)
# check that fit(X) = fit([X1, X2, X3],sample_weight = [n1, n2, n3]) where
# X = X1 repeated n1 times, X2 repeated n2 times and so forth
random_state = check_random_state(0)
random_weight = random_state.randint(0, 10, n_samples)
lsvc_unflat = svm.LinearSVC(random_state=0).\
fit(X, Y, sample_weight=random_weight)
pred1 = lsvc_unflat.predict(T)
X_flat = np.repeat(X, random_weight, axis=0)
y_flat = np.repeat(Y, random_weight, axis=0)
lsvc_flat = svm.LinearSVC(random_state=0).fit(X_flat, y_flat)
pred2 = lsvc_flat.predict(T)
assert_array_equal(pred1, pred2)
assert_allclose(lsvc_unflat.coef_, lsvc_flat.coef_, 1, 0.0001)
def test_crammer_singer_binary():
# Test Crammer-Singer formulation in the binary case
X, y = make_classification(n_classes=2, random_state=0)
for fit_intercept in (True, False):
acc = svm.LinearSVC(fit_intercept=fit_intercept,
multi_class="crammer_singer",
random_state=0).fit(X, y).score(X, y)
assert_greater(acc, 0.9)
def test_linearsvc_iris():
# Test that LinearSVC gives plausible predictions on the iris dataset
# Also, test symbolic class names (classes_).
target = iris.target_names[iris.target]
clf = svm.LinearSVC(random_state=0).fit(iris.data, target)
assert_equal(set(clf.classes_), set(iris.target_names))
assert_greater(np.mean(clf.predict(iris.data) == target), 0.8)
dec = clf.decision_function(iris.data)
pred = iris.target_names[np.argmax(dec, 1)]
assert_array_equal(pred, clf.predict(iris.data))
def test_dense_liblinear_intercept_handling(classifier=svm.LinearSVC):
# Test that dense liblinear honours intercept_scaling param
X = [[2, 1],
[3, 1],
[1, 3],
[2, 3]]
y = [0, 0, 1, 1]
clf = classifier(fit_intercept=True, penalty='l1', loss='squared_hinge',
dual=False, C=4, tol=1e-7, random_state=0)
assert_true(clf.intercept_scaling == 1, clf.intercept_scaling)
assert_true(clf.fit_intercept)
# when intercept_scaling is low the intercept value is highly "penalized"
# by regularization
clf.intercept_scaling = 1
clf.fit(X, y)
assert_almost_equal(clf.intercept_, 0, decimal=5)
# when intercept_scaling is sufficiently high, the intercept value
# is not affected by regularization
clf.intercept_scaling = 100
clf.fit(X, y)
intercept1 = clf.intercept_
assert_less(intercept1, -1)
# when intercept_scaling is sufficiently high, the intercept value
# doesn't depend on intercept_scaling value
clf.intercept_scaling = 1000
clf.fit(X, y)
intercept2 = clf.intercept_
assert_array_almost_equal(intercept1, intercept2, decimal=2)
def test_liblinear_set_coef():
# multi-class case
clf = svm.LinearSVC().fit(iris.data, iris.target)
values = clf.decision_function(iris.data)
clf.coef_ = clf.coef_.copy()
clf.intercept_ = clf.intercept_.copy()
values2 = clf.decision_function(iris.data)
assert_array_almost_equal(values, values2)
# binary-class case
X = [[2, 1],
[3, 1],
[1, 3],
[2, 3]]
y = [0, 0, 1, 1]
clf = svm.LinearSVC().fit(X, y)
values = clf.decision_function(X)
clf.coef_ = clf.coef_.copy()
clf.intercept_ = clf.intercept_.copy()
values2 = clf.decision_function(X)
assert_array_equal(values, values2)
def test_immutable_coef_property():
# Check that primal coef modification are not silently ignored
svms = [
svm.SVC(kernel='linear').fit(iris.data, iris.target),
svm.NuSVC(kernel='linear').fit(iris.data, iris.target),
svm.SVR(kernel='linear').fit(iris.data, iris.target),
svm.NuSVR(kernel='linear').fit(iris.data, iris.target),
svm.OneClassSVM(kernel='linear').fit(iris.data),
]
for clf in svms:
assert_raises(AttributeError, clf.__setattr__, 'coef_', np.arange(3))
assert_raises((RuntimeError, ValueError),
clf.coef_.__setitem__, (0, 0), 0)
def test_linearsvc_verbose():
# stdout: redirect
import os
stdout = os.dup(1) # save original stdout
os.dup2(os.pipe()[1], 1) # replace it
# actual call
clf = svm.LinearSVC(verbose=1)
clf.fit(X, Y)
# stdout: restore
os.dup2(stdout, 1) # restore original stdout
def test_svc_clone_with_callable_kernel():
# create SVM with callable linear kernel, check that results are the same
# as with built-in linear kernel
svm_callable = svm.SVC(kernel=lambda x, y: np.dot(x, y.T),
probability=True, random_state=0,
decision_function_shape='ovr')
# clone for checking clonability with lambda functions..
svm_cloned = base.clone(svm_callable)
svm_cloned.fit(iris.data, iris.target)
svm_builtin = svm.SVC(kernel='linear', probability=True, random_state=0,
decision_function_shape='ovr')
svm_builtin.fit(iris.data, iris.target)
assert_array_almost_equal(svm_cloned.dual_coef_,
svm_builtin.dual_coef_)
assert_array_almost_equal(svm_cloned.intercept_,
svm_builtin.intercept_)
assert_array_equal(svm_cloned.predict(iris.data),
svm_builtin.predict(iris.data))
assert_array_almost_equal(svm_cloned.predict_proba(iris.data),
svm_builtin.predict_proba(iris.data),
decimal=4)
assert_array_almost_equal(svm_cloned.decision_function(iris.data),
svm_builtin.decision_function(iris.data))
def test_svc_bad_kernel():
svc = svm.SVC(kernel=lambda x, y: x)
assert_raises(ValueError, svc.fit, X, Y)
def test_timeout():
a = svm.SVC(kernel=lambda x, y: np.dot(x, y.T), probability=True,
random_state=0, max_iter=1)
assert_warns(ConvergenceWarning, a.fit, X, Y)
def test_unfitted():
X = "foo!" # input validation not required when SVM not fitted
clf = svm.SVC()
assert_raises_regexp(Exception, r".*\bSVC\b.*\bnot\b.*\bfitted\b",
clf.predict, X)
clf = svm.NuSVR()
assert_raises_regexp(Exception, r".*\bNuSVR\b.*\bnot\b.*\bfitted\b",
clf.predict, X)
# ignore convergence warnings from max_iter=1
@ignore_warnings
def test_consistent_proba():
a = svm.SVC(probability=True, max_iter=1, random_state=0)
proba_1 = a.fit(X, Y).predict_proba(X)
a = svm.SVC(probability=True, max_iter=1, random_state=0)
proba_2 = a.fit(X, Y).predict_proba(X)
assert_array_almost_equal(proba_1, proba_2)
def test_linear_svc_convergence_warnings():
# Test that warnings are raised if model does not converge
lsvc = svm.LinearSVC(max_iter=2, verbose=1)
assert_warns(ConvergenceWarning, lsvc.fit, X, Y)
assert_equal(lsvc.n_iter_, 2)
def test_svr_coef_sign():
# Test that SVR(kernel="linear") has coef_ with the right sign.
# Non-regression test for #2933.
X = np.random.RandomState(21).randn(10, 3)
y = np.random.RandomState(12).randn(10)
for svr in [svm.SVR(kernel='linear'), svm.NuSVR(kernel='linear'),
svm.LinearSVR()]:
svr.fit(X, y)
assert_array_almost_equal(svr.predict(X),
np.dot(X, svr.coef_.ravel()) + svr.intercept_)
def test_linear_svc_intercept_scaling():
# Test that the right error message is thrown when intercept_scaling <= 0
for i in [-1, 0]:
lsvc = svm.LinearSVC(intercept_scaling=i)
msg = ('Intercept scaling is %r but needs to be greater than 0.'
' To disable fitting an intercept,'
' set fit_intercept=False.' % lsvc.intercept_scaling)
assert_raise_message(ValueError, msg, lsvc.fit, X, Y)
def test_lsvc_intercept_scaling_zero():
# Test that intercept_scaling is ignored when fit_intercept is False
lsvc = svm.LinearSVC(fit_intercept=False)
lsvc.fit(X, Y)
assert_equal(lsvc.intercept_, 0.)
def test_hasattr_predict_proba():
# Method must be (un)available before or after fit, switched by
# `probability` param
G = svm.SVC(probability=True)
assert_true(hasattr(G, 'predict_proba'))
G.fit(iris.data, iris.target)
assert_true(hasattr(G, 'predict_proba'))
G = svm.SVC(probability=False)
assert_false(hasattr(G, 'predict_proba'))
G.fit(iris.data, iris.target)
assert_false(hasattr(G, 'predict_proba'))
# Switching to `probability=True` after fitting should make
# predict_proba available, but calling it must not work:
G.probability = True
assert_true(hasattr(G, 'predict_proba'))
msg = "predict_proba is not available when fitted with probability=False"
assert_raise_message(NotFittedError, msg, G.predict_proba, iris.data)
def test_decision_function_shape_two_class():
for n_classes in [2, 3]:
X, y = make_blobs(centers=n_classes, random_state=0)
for estimator in [svm.SVC, svm.NuSVC]:
clf = OneVsRestClassifier(estimator(
decision_function_shape="ovr")).fit(X, y)
assert_equal(len(clf.predict(X)), len(y))
def test_ovr_decision_function():
# One point from each quadrant represents one class
X_train = np.array([[1, 1], [-1, 1], [-1, -1], [1, -1]])
y_train = [0, 1, 2, 3]
# First point is closer to the decision boundaries than the second point
base_points = np.array([[5, 5], [10, 10]])
# For all the quadrants (classes)
X_test = np.vstack((
base_points * [1, 1], # Q1
base_points * [-1, 1], # Q2
base_points * [-1, -1], # Q3
base_points * [1, -1] # Q4
))
y_test = [0] * 2 + [1] * 2 + [2] * 2 + [3] * 2
clf = svm.SVC(kernel='linear', decision_function_shape='ovr')
clf.fit(X_train, y_train)
y_pred = clf.predict(X_test)
# Test if the prediction is the same as y
assert_array_equal(y_pred, y_test)
deci_val = clf.decision_function(X_test)
# Assert that the predicted class has the maximum value
assert_array_equal(np.argmax(deci_val, axis=1), y_pred)
# Get decision value at test points for the predicted class
pred_class_deci_val = deci_val[range(8), y_pred].reshape((4, 2))
# Assert pred_class_deci_val > 0 here
assert_greater(np.min(pred_class_deci_val), 0.0)
# Test if the first point has lower decision value on every quadrant
# compared to the second point
assert_true(np.all(pred_class_deci_val[:, 0] < pred_class_deci_val[:, 1]))
| bsd-3-clause |
charanpald/sandbox | sandbox/util/Evaluator.py | 1 | 12323 |
import numpy
from sandbox.util.Parameter import Parameter
#TODO: Test this file
class Evaluator(object):
"""
A class to evaluate machine learning performance.
"""
def __init__(self):
pass
@staticmethod
def evaluateBinary1DLabels(testY, predY):
numEvaluations = 6
evaluations = numpy.zeros(numEvaluations)
evaluations[0] = Evaluator.binaryError(testY, predY)
#evaluations[1] = mlpy.sens(testY, predY)
#evaluations[2] = mlpy.spec(testY, predY)
evaluations[3] = Evaluator.binaryErrorP(testY, predY)
evaluations[4] = Evaluator.binaryErrorN(testY, predY)
evaluations[5] = Evaluator.balancedError(testY, predY)
return evaluations
@staticmethod
def balancedError(testY, predY):
if testY.shape[0] != predY.shape[0]:
raise ValueError("Labels vector much be same dimensions as predicted labels")
return 0.5*(Evaluator.binaryErrorP(testY, predY)+Evaluator.binaryErrorN(testY, predY))
@staticmethod
def weightedRootMeanSqError(testY, predY):
"""
Weighted root mean square error.
"""
if testY.shape[0] != predY.shape[0]:
raise ValueError("Labels vector much be same dimensions as predicted labels")
alpha = 1.0
w = numpy.exp(alpha * testY)
return numpy.linalg.norm((testY - predY)*numpy.sqrt(w))/numpy.sqrt(testY.shape[0])
@staticmethod
def rootMeanSqError(testY, predY):
"""
This is the error given by sqrt{1//y.shape sum_i (py - y)^2}
"""
if testY.shape[0] != predY.shape[0]:
raise ValueError("Labels vector much be same dimensions as predicted labels")
return numpy.linalg.norm(testY - predY)/numpy.sqrt(testY.size)
@staticmethod
def meanAbsError(testY, predY):
if testY.shape[0] != predY.shape[0]:
raise ValueError("Labels vector much be same dimensions as predicted labels")
return numpy.abs(testY - predY).mean()
@staticmethod
def meanSqError(testY, predY):
"""
This is the error given by the mean squared difference between examamples
"""
if testY.shape[0] != predY.shape[0]:
raise ValueError("Labels vector much be same dimensions as predicted labels")
return ((testY - predY)**2).mean()
@staticmethod
def evaluateWindowError(D, windowSize, pairIndices):
"""
The input is a matrix D of distances between examples such that
D_ij = d(x_i, x_j). The aim is to match each example to the corresponding
pair based on ranking in order of their distance. An error is
counted if the given item in the pair is not in the window.
"""
if D.shape[0]!=D.shape[1]:
raise ValueError("D must be a square and symmetric matrix")
numExamples = D.shape[0]
numPairs = numExamples/2
error = 0
for i in pairIndices[:, 0]:
windowInds = numpy.argsort(D[i, :])[0:windowSize]
error = error + (windowInds != pairIndices[i, 1]).all()
return float(error)/numPairs
@staticmethod
def binaryError(testY, predY):
"""
Work out the error on a set of -1/+1 labels
"""
Parameter.checkClass(testY, numpy.ndarray)
Parameter.checkClass(predY, numpy.ndarray)
if testY.shape[0] != predY.shape[0]:
raise ValueError("Labels vector much be same dimensions as predicted labels")
error = numpy.sum(testY != predY)/float(predY.shape[0])
return error
@staticmethod
def binaryBootstrapError(testY, predTestY, trainY, predTrainY, weight):
"""
Evaluate an error in conjunction with a bootstrap method by computing
w*testErr + (1-w)*trainErr
"""
Parameter.checkFloat(weight, 0.0, 1.0)
return weight*Evaluator.binaryError(testY, predTestY) + (1-weight)*Evaluator.binaryError(trainY, predTrainY)
@staticmethod
def binaryErrorP(testY, predY):
"""
Work out the error on a set of -1/+1 labels
"""
if testY.shape[0] != predY.shape[0]:
raise ValueError("Labels vector much be same dimensions as predicted labels")
posInds = (testY == 1)
if testY[posInds].shape[0] != 0:
error = numpy.sum(numpy.abs(testY[posInds] - predY[posInds]))/(2.0*testY[posInds].shape[0])
else:
error = 0.0
return error
@staticmethod
def binaryErrorN(testY, predY):
"""
Work out the error on a set of -1/+1 labels
"""
if testY.shape[0] != predY.shape[0]:
raise ValueError("Labels vector much be same dimensions as predicted labels")
negInds = (testY == -1)
if testY[negInds].shape[0] != 0:
error = numpy.sum(numpy.abs(testY[negInds] - predY[negInds]))/(2.0*testY[negInds].shape[0])
else:
error = 0.0
return error
@staticmethod
def auc2(trueY, predY):
return Evaluator.auc(predY, trueY)
@staticmethod
def auc(predY, trueY):
"""
Can be used in conjunction with evaluateCV using the scores, and true
labels. Note the order of parameters.
"""
try:
import sklearn.metrics
except ImportError:
raise
Parameter.checkClass(predY, numpy.ndarray)
Parameter.checkClass(trueY, numpy.ndarray)
if predY.ndim != 1:
raise ValueError("Expecting predY to be 1D")
if trueY.ndim != 1:
raise ValueError("Expecting trueY to be 1D")
if numpy.unique(trueY).shape[0] > 2:
raise ValueError("Found more than two label types in trueY")
if numpy.unique(trueY).shape[0] == 1:
return 0.5
fpr, tpr, threshold = sklearn.metrics.roc_curve(trueY.ravel(), predY.ravel())
return sklearn.metrics.metrics.auc(fpr, tpr)
@staticmethod
def roc(testY, predY):
try:
import sklearn.metrics
except ImportError:
raise
if numpy.unique(testY).shape[0] == 1:
fpr = numpy.array([])
tpr = numpy.array([])
else:
fpr, tpr, threshold = sklearn.metrics.roc_curve(testY.ravel(), predY.ravel())
#Insert 0,0 at the start of fpr and tpr
if fpr[0] != 0.0 or tpr[0] != 0.0:
fpr = numpy.insert(fpr, 0, 0)
tpr = numpy.insert(tpr, 0, 0)
return (fpr, tpr)
@staticmethod
def localAuc(testY, predY, u):
"""
Compute the local AUC measure for a given ROC curve. The parameter u is
the proportion of best instances to use u = P(s(X) > t).
"""
Parameter.checkFloat(u, 0.0, 1.0)
fpr, tpr = Evaluator.roc(testY, predY)
minExampleIndex = int(numpy.floor((predY.shape[0]-1)*u))
minExampleScore = numpy.flipud(numpy.sort(predY))[minExampleIndex]
intersectInd = numpy.searchsorted(numpy.sort(numpy.unique(predY)), minExampleScore)
intersectInd = numpy.unique(predY).shape[0] - intersectInd
alpha = fpr[intersectInd]
beta = tpr[intersectInd]
localAuc = numpy.sum(0.5*numpy.diff(fpr[0:intersectInd])*(tpr[0:max(intersectInd-1, 0)] + tpr[1:intersectInd]))
localAuc += beta*(1-alpha)
return localAuc
@staticmethod
def precisionFromIndLists(testList, predList):
"""
Measure the precision of a predicted list given the true list. The precision is
|relevant items \cup retrieved items| / |retrieved items|. The items of the
lists are indices.
"""
if len(testList) == 0 or len(predList) == 0:
return 0
import sklearn.metrics
n = max(numpy.max(testList), numpy.max(predList))+1
predY = -1*numpy.ones(n)
predY[predList] = 1
testY = -1*numpy.ones(n)
testY[testList] = 1
return sklearn.metrics.precision_score(testY, predY)
@staticmethod
def recallFromIndLists(testList, predList):
"""
Measure the recall of a predicted list given the true list. The precision is
|relevant items \cap retrieved items| / |relevant items|. The items of the
lists are indices.
"""
import sklearn.metrics
n = max(numpy.max(testList), numpy.max(predList))+1
predY = -1*numpy.ones(n)
predY[predList] = 1
testY = -1*numpy.ones(n)
testY[testList] = 1
return sklearn.metrics.recall_score(testY, predY)
@staticmethod
def f1FromIndLists(testList, predList):
"""
Measure the recall of a predicted list given the true list. The precision is
|relevant items \cap retrieved items| / |relevant items|. The items of the
lists are indices.
"""
import sklearn.metrics
n = max(numpy.max(testList), numpy.max(predList))+1
predY = -1*numpy.ones(n)
predY[predList] = 1
testY = -1*numpy.ones(n)
testY[testList] = 1
return sklearn.metrics.f1_score(testY, predY)
@staticmethod
def averagePrecisionFromLists(testList, predList, k=100):
"""
Computes the average precision at k. Borrowed from https://github.com/benhamner/Metrics.
This function computes the average prescision at k between two lists of
items.
Parameters
----------
testList : list
A list of elements that are to be predicted (order doesn't matter)
predList : list
A list of predicted elements (order does matter)
k : int, optional
The maximum number of predicted elements
Returns
-------
score : double
The average precision at k over the input lists
"""
if len(predList)>k:
predList = predList[:k]
score = 0.0
num_hits = 0.0
for i,p in enumerate(predList):
if p in testList and p not in predList[:i]:
num_hits += 1.0
score += num_hits / (i+1.0)
if not testList:
return 1.0
return score / min(len(testList), k)
@staticmethod
def meanAveragePrecisionFromLists(actual, predicted, k=10):
"""
Computes the mean average precision at k.
This function computes the mean average prescision at k between two lists
of lists of items.
Parameters
----------
actual : list
A list of lists of elements that are to be predicted
(order doesn't matter in the lists)
predicted : list
A list of lists of predicted elements
(order matters in the lists)
k : int, optional
The maximum number of predicted elements
Returns
-------
score : double
The mean average precision at k over the input lists
"""
print(actual, predicted)
return numpy.mean([Evaluator.averagePrecisionFromLists(a,p,k) for a,p in zip(actual, predicted)])
@staticmethod
def ndcg(testY, predY, n):
"""
Compute the Normalised Discounted Cumulative Gain at N. The relevance
of the items in testY is 1 otherwise the item has relevance 0.
:param testY: A partial list of indices
:param predY: A list of predicted indices
"""
raise ValueError("Method not implemented completely")
testY = testY[0:n]
predY = predY[0:n]
m = max(numpy.max(testY), numpy.max(predY))+1
rel = numpy.zeros(m)
rel[predY] = 1
dcg = rel[0] + rel[1:]/numpy.log2(numpy.arange(2, m+1, dtype=numpy.float))
dcg = dcg.sum()
rel = numpy.zeros(m)
rel[testY] = 1
dcg2 = rel[0] + rel[1:]/numpy.log2(numpy.arange(2, m+1, dtype=numpy.float))
dcg2 = dcg2.sum()
return dcg/dcg2 | gpl-3.0 |
yaukwankiu/twstocks | mark1.py | 1 | 9719 | # -*- coding: utf8 -*-
############################
# imports
import time
import datetime
import urllib2
import re
import os
import pickle
import numpy as np
import matplotlib.pyplot as plt
############################
# defining the parameters
currentPriceRegex = re.compile(r'(?<=\<td\ align\=\"center\"\ bgcolor\=\"\#FFFfff\"\ nowrap\>\<b\>)\d*\.\d*(?=\<\/b\>\<\/td\>)')
#companyNameRegex = re.compile( ur'(?<=\<TITLE\>).+(?=-公司資料-奇摩股市\<\/TITLE\>)',re.UNICODE) #doesn't work somehow
companyNameRegex = re.compile( ur'\<TITLE.+TITLE\>', re.UNICODE)
stockSymbolsList = []
outputFolder = "c:/chen chen/stocks/"
stockSymbolsFile='stockSymbols.pydump'
pricesFolder = outputFolder+ "prices/"
stocksFolder = outputFolder +"stocks/"
############################
#
############################
# defining the classes
class stock:
def __init__(self, symbol):
"""e.g.
https://tw.stock.yahoo.com/d/s/company_1473.html
"""
symbol= ('000'+str(symbol))[-4:]
self.symbol = symbol
self.yahooFrontPageUrl = 'https://tw.stock.yahoo.com/d/s/company_' + symbol + '.html'
self.yahooCurrentPageUrl = 'https://tw.stock.yahoo.com/q/q?s=' + symbol
# get some basic information from the front page
yahooFrontPage = urllib2.urlopen(self.yahooFrontPageUrl)
raw_text = yahooFrontPage.read()
self.name = companyNameRegex.findall(raw_text)[0]
self.name = self.name[7:-26]
self.pricesList = []
def __call__(self):
outputString = ""
#outputString += self.symbol + '\n' #unnecessary
outputString += self.name + '\n'
outputString += self.yahooCurrentPageUrl + '\n'
if self.pricesList != []:
outputString += '\n'.join([time.asctime(time.localtime((v['pingTime'])))+ ": $" + str(v['price']) for v in self.pricesList])
print outputString
def openYahooCurrentPage(self):
self.yahooCurrentPage = urllib2.urlopen(self.yahooCurrentPageUrl)
def getCurrentPrice(self, verbose=True, showResponseTime=True):
self.openYahooCurrentPage()
t0 = time.time()
raw_text = self.yahooCurrentPage.read()
t1 = time.time()
self.yahooCurrentPage.close()
currentPrice = currentPriceRegex.findall(raw_text)[0]
self.currentPricePingTime = t0
self.currentPricePingReturnTime = t1
self.currentPrice = currentPrice
if verbose:
print "Time: ", time.asctime(time.localtime(t0)),
if showResponseTime:
print "(response time: ", t1-t0, ")",
#print self.symbol, #unnecessary
print self.name, "Price:", currentPrice
self.pricesList.append({'price' : currentPrice,
'pingTime' : t0,
'responseTime' : t1-t0,
})
return currentPrice, t0, t1-t0
def getPriceList(self, throttle=1, repetitions=-999, verbose=True):
count = 0
while count!= repetitions:
count +=1
p, t0, dt = self.getCurrentPrice(verbose=verbose)
self.pricesList.append({'price' : p,
'pingTime' : t0,
'responseTime' : dt,
})
if throttle>0:
time.sleep(throttle)
def writeCurrentPrice(self, verbose=True):
P = self.pricesList[-1] # the last one
currentPrice = P['price']
t0 = P['pingTime']
dt = P['responseTime']
outputString= ''
if not os.path.exists(pricesFolder+self.name+'.dat'):
outputString = "#time, price, response time\n"
else:
outputString = ""
outputString += str(t0) + ", " + str(currentPrice)
if dt>1:
outputString += ", " + str(int(dt))
outputString += '\n'
open(pricesFolder+self.name+'.dat','a').write(outputString)
if verbose:
print self.name, outputString
def loadPrices(self, pricesPath="", eraseOld=True):
if eraseOld:
self.pricesList = []
if pricesPath == "":
pricesPath = pricesFolder + self.name + ".dat"
if not os.path.exists(pricesPath):
return 0
raw_text = open(pricesPath, 'r').read()
x = raw_text.split('\n')[1:]
xx = [v.split(',') for v in x]
for u in xx:
print u
if len(u) ==2:
self.pricesList.append({'price' : float(u[1]),
'pingTime' : float(u[0] ),
'responseTime': 0
})
elif len(u) ==3:
self.pricesList.append({'price' : float(u[1]),
'pingTime' : float(u[0]) ,
'responseTime': float(u[2])
})
def load(self, *args, **kwargs):
self.loadPrices(*args, **kwargs)
def plot(self, display=True):
y = [v['price'] for v in self.pricesList]
x = [v['pingTime'] for v in self.pricesList]
plt.plot(x,y)
plt.title(self.symbol)
if display:
plt.show()
############################
# defining the functions
def getStockSymbolsList1():
for N in range(9999):
try:
s = stock(N)
stockSymbolsList.append(N)
print N, s.name, "<-------------added"
except:
print N, "doesn't exist!"
return stocksSymbolsList
def getStockSymbolsList2(url="http://sheet1688.blogspot.tw/2008/11/blog-post_18.html"):
raw_text = urllib2.urlopen(url).read()
symbols = re.findall(ur'(?<=num\>)\d\d\d\d(?=\<\/td\>)', raw_text, re.UNICODE)
symbols.sort()
pickle.dump(symbols, open(outputFolder+stockSymbolsFile,'w'))
stockSymbolsList = symbols
return symbols
def loadStockSymbolsList(path=outputFolder+stockSymbolsFile):
stockSymbolsList = pickle.load(open(path,'r'))
return stockSymbolsList
def makeStocksList(inPath=outputFolder+stockSymbolsFile,
outputFolder=stocksFolder):
symbols = loadStockSymbolsList()
for N in symbols:
try:
st = stock(N)
pickle.dump(st, open(outputFolder+st.name+'.pydump','w'))
print st.name, "-->", outputFolder+st.name+'.pydump'
except:
print "stock symbol", N, "not found!!!!"
def loadStocksList(inputFolder=stocksFolder):
stocksList = []
L = os.listdir(inputFolder)
L.sort(key=lambda v: v[-13:-7])
for fileName in L:
stocksList.append(pickle.load(open(inputFolder+fileName,'r')))
return stocksList
############################
# test run
def main0():
for st in stocksList:
st()
st.getPriceList(repetitions=5, throttle=0.3)
def main1(throttle=0.5):
for st in stocksList:
st.load()
st()
print "=================="
while True:
time0 = time.time()
if time.time() - time0 > 600:
for st in stocksList:
st()
try:
st.writeCurrentPrice()
except:
print "writeCurrentPrice() -- error!"
time0 = time.time()
for st in stocksList:
st.getCurrentPrice()
time.sleep(throttle)
def main2():
print "=================="
print time.asctime(time.localtime(time.time()))
#symbols = loadStockSymbolsList()
while True:
stocks = loadStocksList() #clean up every day
while time.localtime(time.time()).tm_wday > 4: #weekends
pass
while time.localtime(time.time()).tm_hour<9:
pass
while (time.localtime(time.time()).tm_hour >=9 and \
time.localtime(time.time()).tm_hour < 13) or \
(time.localtime(time.time()).tm_hour==13 and time.localtime(time.time()).tm_min<=30):
for st in stocks:
try:
currentPrice, t0, dt = st.getCurrentPrice()
if not os.path.exists(pricesFolder+st.name+'.dat'):
outputString = "time, price, response time\n"
else:
outputString = ""
outputString += str(t0) + ", " + str(currentPrice)
if dt>1:
outputString += ", " + str(int(dt))
outputString += '\n'
open(pricesFolder+st.name+'.dat','a').write(outputString)
time.sleep(.5)
except:
print "ERROR!! <------ ", st.name
T = time.localtime()
print time.asctime(T)
#if T.tm_hour < 9 or T.tm_hour>=13 and T.tm_min>=30:
# time.sleep(86400 - (13-9)*3600 - 30*60)
print "End of the trading session of the day!"
def main(*args, **kwargs):
main1(*args, **kwargs)
if __name__=="__main__":
############################
# constructing examples
tainam = stock(symbol='1473')
chenpinsen = stock(symbol=2926)
ganung = stock(symbol=2374)
tungyang = stock(symbol=1319)
htc = stock(2498)
prince = stock(2511)
stocksList = [tainam, chenpinsen, ganung, tungyang, htc, prince]
##############################
# test run
main(60)
| cc0-1.0 |
rajat1994/scikit-learn | sklearn/utils/validation.py | 67 | 24013 | """Utilities for input validation"""
# Authors: Olivier Grisel
# Gael Varoquaux
# Andreas Mueller
# Lars Buitinck
# Alexandre Gramfort
# Nicolas Tresegnie
# License: BSD 3 clause
import warnings
import numbers
import numpy as np
import scipy.sparse as sp
from ..externals import six
from inspect import getargspec
FLOAT_DTYPES = (np.float64, np.float32, np.float16)
class DataConversionWarning(UserWarning):
"""A warning on implicit data conversions happening in the code"""
pass
warnings.simplefilter("always", DataConversionWarning)
class NonBLASDotWarning(UserWarning):
"""A warning on implicit dispatch to numpy.dot"""
class NotFittedError(ValueError, AttributeError):
"""Exception class to raise if estimator is used before fitting
This class inherits from both ValueError and AttributeError to help with
exception handling and backward compatibility.
"""
# Silenced by default to reduce verbosity. Turn on at runtime for
# performance profiling.
warnings.simplefilter('ignore', NonBLASDotWarning)
def _assert_all_finite(X):
"""Like assert_all_finite, but only for ndarray."""
X = np.asanyarray(X)
# First try an O(n) time, O(1) space solution for the common case that
# everything is finite; fall back to O(n) space np.isfinite to prevent
# false positives from overflow in sum method.
if (X.dtype.char in np.typecodes['AllFloat'] and not np.isfinite(X.sum())
and not np.isfinite(X).all()):
raise ValueError("Input contains NaN, infinity"
" or a value too large for %r." % X.dtype)
def assert_all_finite(X):
"""Throw a ValueError if X contains NaN or infinity.
Input MUST be an np.ndarray instance or a scipy.sparse matrix."""
_assert_all_finite(X.data if sp.issparse(X) else X)
def as_float_array(X, copy=True, force_all_finite=True):
"""Converts an array-like to an array of floats
The new dtype will be np.float32 or np.float64, depending on the original
type. The function can create a copy or modify the argument depending
on the argument copy.
Parameters
----------
X : {array-like, sparse matrix}
copy : bool, optional
If True, a copy of X will be created. If False, a copy may still be
returned if X's dtype is not a floating point type.
force_all_finite : boolean (default=True)
Whether to raise an error on np.inf and np.nan in X.
Returns
-------
XT : {array, sparse matrix}
An array of type np.float
"""
if isinstance(X, np.matrix) or (not isinstance(X, np.ndarray)
and not sp.issparse(X)):
return check_array(X, ['csr', 'csc', 'coo'], dtype=np.float64,
copy=copy, force_all_finite=force_all_finite,
ensure_2d=False)
elif sp.issparse(X) and X.dtype in [np.float32, np.float64]:
return X.copy() if copy else X
elif X.dtype in [np.float32, np.float64]: # is numpy array
return X.copy('F' if X.flags['F_CONTIGUOUS'] else 'C') if copy else X
else:
return X.astype(np.float32 if X.dtype == np.int32 else np.float64)
def _is_arraylike(x):
"""Returns whether the input is array-like"""
return (hasattr(x, '__len__') or
hasattr(x, 'shape') or
hasattr(x, '__array__'))
def _num_samples(x):
"""Return number of samples in array-like x."""
if hasattr(x, 'fit'):
# Don't get num_samples from an ensembles length!
raise TypeError('Expected sequence or array-like, got '
'estimator %s' % x)
if not hasattr(x, '__len__') and not hasattr(x, 'shape'):
if hasattr(x, '__array__'):
x = np.asarray(x)
else:
raise TypeError("Expected sequence or array-like, got %s" %
type(x))
if hasattr(x, 'shape'):
if len(x.shape) == 0:
raise TypeError("Singleton array %r cannot be considered"
" a valid collection." % x)
return x.shape[0]
else:
return len(x)
def _shape_repr(shape):
"""Return a platform independent reprensentation of an array shape
Under Python 2, the `long` type introduces an 'L' suffix when using the
default %r format for tuples of integers (typically used to store the shape
of an array).
Under Windows 64 bit (and Python 2), the `long` type is used by default
in numpy shapes even when the integer dimensions are well below 32 bit.
The platform specific type causes string messages or doctests to change
from one platform to another which is not desirable.
Under Python 3, there is no more `long` type so the `L` suffix is never
introduced in string representation.
>>> _shape_repr((1, 2))
'(1, 2)'
>>> one = 2 ** 64 / 2 ** 64 # force an upcast to `long` under Python 2
>>> _shape_repr((one, 2 * one))
'(1, 2)'
>>> _shape_repr((1,))
'(1,)'
>>> _shape_repr(())
'()'
"""
if len(shape) == 0:
return "()"
joined = ", ".join("%d" % e for e in shape)
if len(shape) == 1:
# special notation for singleton tuples
joined += ','
return "(%s)" % joined
def check_consistent_length(*arrays):
"""Check that all arrays have consistent first dimensions.
Checks whether all objects in arrays have the same shape or length.
Parameters
----------
*arrays : list or tuple of input objects.
Objects that will be checked for consistent length.
"""
uniques = np.unique([_num_samples(X) for X in arrays if X is not None])
if len(uniques) > 1:
raise ValueError("Found arrays with inconsistent numbers of samples: "
"%s" % str(uniques))
def indexable(*iterables):
"""Make arrays indexable for cross-validation.
Checks consistent length, passes through None, and ensures that everything
can be indexed by converting sparse matrices to csr and converting
non-interable objects to arrays.
Parameters
----------
*iterables : lists, dataframes, arrays, sparse matrices
List of objects to ensure sliceability.
"""
result = []
for X in iterables:
if sp.issparse(X):
result.append(X.tocsr())
elif hasattr(X, "__getitem__") or hasattr(X, "iloc"):
result.append(X)
elif X is None:
result.append(X)
else:
result.append(np.array(X))
check_consistent_length(*result)
return result
def _ensure_sparse_format(spmatrix, accept_sparse, dtype, copy,
force_all_finite):
"""Convert a sparse matrix to a given format.
Checks the sparse format of spmatrix and converts if necessary.
Parameters
----------
spmatrix : scipy sparse matrix
Input to validate and convert.
accept_sparse : string, list of string or None (default=None)
String[s] representing allowed sparse matrix formats ('csc',
'csr', 'coo', 'dok', 'bsr', 'lil', 'dia'). None means that sparse
matrix input will raise an error. If the input is sparse but not in
the allowed format, it will be converted to the first listed format.
dtype : string, type or None (default=none)
Data type of result. If None, the dtype of the input is preserved.
copy : boolean (default=False)
Whether a forced copy will be triggered. If copy=False, a copy might
be triggered by a conversion.
force_all_finite : boolean (default=True)
Whether to raise an error on np.inf and np.nan in X.
Returns
-------
spmatrix_converted : scipy sparse matrix.
Matrix that is ensured to have an allowed type.
"""
if accept_sparse in [None, False]:
raise TypeError('A sparse matrix was passed, but dense '
'data is required. Use X.toarray() to '
'convert to a dense numpy array.')
if dtype is None:
dtype = spmatrix.dtype
changed_format = False
if (isinstance(accept_sparse, (list, tuple))
and spmatrix.format not in accept_sparse):
# create new with correct sparse
spmatrix = spmatrix.asformat(accept_sparse[0])
changed_format = True
if dtype != spmatrix.dtype:
# convert dtype
spmatrix = spmatrix.astype(dtype)
elif copy and not changed_format:
# force copy
spmatrix = spmatrix.copy()
if force_all_finite:
if not hasattr(spmatrix, "data"):
warnings.warn("Can't check %s sparse matrix for nan or inf."
% spmatrix.format)
else:
_assert_all_finite(spmatrix.data)
return spmatrix
def check_array(array, accept_sparse=None, dtype="numeric", order=None,
copy=False, force_all_finite=True, ensure_2d=True,
allow_nd=False, ensure_min_samples=1, ensure_min_features=1,
warn_on_dtype=False, estimator=None):
"""Input validation on an array, list, sparse matrix or similar.
By default, the input is converted to an at least 2nd numpy array.
If the dtype of the array is object, attempt converting to float,
raising on failure.
Parameters
----------
array : object
Input object to check / convert.
accept_sparse : string, list of string or None (default=None)
String[s] representing allowed sparse matrix formats, such as 'csc',
'csr', etc. None means that sparse matrix input will raise an error.
If the input is sparse but not in the allowed format, it will be
converted to the first listed format.
dtype : string, type, list of types or None (default="numeric")
Data type of result. If None, the dtype of the input is preserved.
If "numeric", dtype is preserved unless array.dtype is object.
If dtype is a list of types, conversion on the first type is only
performed if the dtype of the input is not in the list.
order : 'F', 'C' or None (default=None)
Whether an array will be forced to be fortran or c-style.
copy : boolean (default=False)
Whether a forced copy will be triggered. If copy=False, a copy might
be triggered by a conversion.
force_all_finite : boolean (default=True)
Whether to raise an error on np.inf and np.nan in X.
ensure_2d : boolean (default=True)
Whether to make X at least 2d.
allow_nd : boolean (default=False)
Whether to allow X.ndim > 2.
ensure_min_samples : int (default=1)
Make sure that the array has a minimum number of samples in its first
axis (rows for a 2D array). Setting to 0 disables this check.
ensure_min_features : int (default=1)
Make sure that the 2D array has some minimum number of features
(columns). The default value of 1 rejects empty datasets.
This check is only enforced when the input data has effectively 2
dimensions or is originally 1D and ``ensure_2d`` is True. Setting to 0
disables this check.
warn_on_dtype : boolean (default=False)
Raise DataConversionWarning if the dtype of the input data structure
does not match the requested dtype, causing a memory copy.
estimator : str or estimator instance (default=None)
If passed, include the name of the estimator in warning messages.
Returns
-------
X_converted : object
The converted and validated X.
"""
if isinstance(accept_sparse, str):
accept_sparse = [accept_sparse]
# store whether originally we wanted numeric dtype
dtype_numeric = dtype == "numeric"
dtype_orig = getattr(array, "dtype", None)
if not hasattr(dtype_orig, 'kind'):
# not a data type (e.g. a column named dtype in a pandas DataFrame)
dtype_orig = None
if dtype_numeric:
if dtype_orig is not None and dtype_orig.kind == "O":
# if input is object, convert to float.
dtype = np.float64
else:
dtype = None
if isinstance(dtype, (list, tuple)):
if dtype_orig is not None and dtype_orig in dtype:
# no dtype conversion required
dtype = None
else:
# dtype conversion required. Let's select the first element of the
# list of accepted types.
dtype = dtype[0]
if sp.issparse(array):
array = _ensure_sparse_format(array, accept_sparse, dtype, copy,
force_all_finite)
else:
if ensure_2d:
array = np.atleast_2d(array)
array = np.array(array, dtype=dtype, order=order, copy=copy)
# make sure we actually converted to numeric:
if dtype_numeric and array.dtype.kind == "O":
array = array.astype(np.float64)
if not allow_nd and array.ndim >= 3:
raise ValueError("Found array with dim %d. Expected <= 2" %
array.ndim)
if force_all_finite:
_assert_all_finite(array)
shape_repr = _shape_repr(array.shape)
if ensure_min_samples > 0:
n_samples = _num_samples(array)
if n_samples < ensure_min_samples:
raise ValueError("Found array with %d sample(s) (shape=%s) while a"
" minimum of %d is required."
% (n_samples, shape_repr, ensure_min_samples))
if ensure_min_features > 0 and array.ndim == 2:
n_features = array.shape[1]
if n_features < ensure_min_features:
raise ValueError("Found array with %d feature(s) (shape=%s) while"
" a minimum of %d is required."
% (n_features, shape_repr, ensure_min_features))
if warn_on_dtype and dtype_orig is not None and array.dtype != dtype_orig:
msg = ("Data with input dtype %s was converted to %s"
% (dtype_orig, array.dtype))
if estimator is not None:
if not isinstance(estimator, six.string_types):
estimator = estimator.__class__.__name__
msg += " by %s" % estimator
warnings.warn(msg, DataConversionWarning)
return array
def check_X_y(X, y, accept_sparse=None, dtype="numeric", order=None, copy=False,
force_all_finite=True, ensure_2d=True, allow_nd=False,
multi_output=False, ensure_min_samples=1,
ensure_min_features=1, y_numeric=False,
warn_on_dtype=False, estimator=None):
"""Input validation for standard estimators.
Checks X and y for consistent length, enforces X 2d and y 1d.
Standard input checks are only applied to y. For multi-label y,
set multi_output=True to allow 2d and sparse y.
If the dtype of X is object, attempt converting to float,
raising on failure.
Parameters
----------
X : nd-array, list or sparse matrix
Input data.
y : nd-array, list or sparse matrix
Labels.
accept_sparse : string, list of string or None (default=None)
String[s] representing allowed sparse matrix formats, such as 'csc',
'csr', etc. None means that sparse matrix input will raise an error.
If the input is sparse but not in the allowed format, it will be
converted to the first listed format.
dtype : string, type, list of types or None (default="numeric")
Data type of result. If None, the dtype of the input is preserved.
If "numeric", dtype is preserved unless array.dtype is object.
If dtype is a list of types, conversion on the first type is only
performed if the dtype of the input is not in the list.
order : 'F', 'C' or None (default=None)
Whether an array will be forced to be fortran or c-style.
copy : boolean (default=False)
Whether a forced copy will be triggered. If copy=False, a copy might
be triggered by a conversion.
force_all_finite : boolean (default=True)
Whether to raise an error on np.inf and np.nan in X.
ensure_2d : boolean (default=True)
Whether to make X at least 2d.
allow_nd : boolean (default=False)
Whether to allow X.ndim > 2.
multi_output : boolean (default=False)
Whether to allow 2-d y (array or sparse matrix). If false, y will be
validated as a vector.
ensure_min_samples : int (default=1)
Make sure that X has a minimum number of samples in its first
axis (rows for a 2D array).
ensure_min_features : int (default=1)
Make sure that the 2D array has some minimum number of features
(columns). The default value of 1 rejects empty datasets.
This check is only enforced when X has effectively 2 dimensions or
is originally 1D and ``ensure_2d`` is True. Setting to 0 disables
this check.
y_numeric : boolean (default=False)
Whether to ensure that y has a numeric type. If dtype of y is object,
it is converted to float64. Should only be used for regression
algorithms.
warn_on_dtype : boolean (default=False)
Raise DataConversionWarning if the dtype of the input data structure
does not match the requested dtype, causing a memory copy.
estimator : str or estimator instance (default=None)
If passed, include the name of the estimator in warning messages.
Returns
-------
X_converted : object
The converted and validated X.
"""
X = check_array(X, accept_sparse, dtype, order, copy, force_all_finite,
ensure_2d, allow_nd, ensure_min_samples,
ensure_min_features, warn_on_dtype, estimator)
if multi_output:
y = check_array(y, 'csr', force_all_finite=True, ensure_2d=False,
dtype=None)
else:
y = column_or_1d(y, warn=True)
_assert_all_finite(y)
if y_numeric and y.dtype.kind == 'O':
y = y.astype(np.float64)
check_consistent_length(X, y)
return X, y
def column_or_1d(y, warn=False):
""" Ravel column or 1d numpy array, else raises an error
Parameters
----------
y : array-like
warn : boolean, default False
To control display of warnings.
Returns
-------
y : array
"""
shape = np.shape(y)
if len(shape) == 1:
return np.ravel(y)
if len(shape) == 2 and shape[1] == 1:
if warn:
warnings.warn("A column-vector y was passed when a 1d array was"
" expected. Please change the shape of y to "
"(n_samples, ), for example using ravel().",
DataConversionWarning, stacklevel=2)
return np.ravel(y)
raise ValueError("bad input shape {0}".format(shape))
def check_random_state(seed):
"""Turn seed into a np.random.RandomState instance
If seed is None, return the RandomState singleton used by np.random.
If seed is an int, return a new RandomState instance seeded with seed.
If seed is already a RandomState instance, return it.
Otherwise raise ValueError.
"""
if seed is None or seed is np.random:
return np.random.mtrand._rand
if isinstance(seed, (numbers.Integral, np.integer)):
return np.random.RandomState(seed)
if isinstance(seed, np.random.RandomState):
return seed
raise ValueError('%r cannot be used to seed a numpy.random.RandomState'
' instance' % seed)
def has_fit_parameter(estimator, parameter):
"""Checks whether the estimator's fit method supports the given parameter.
Examples
--------
>>> from sklearn.svm import SVC
>>> has_fit_parameter(SVC(), "sample_weight")
True
"""
return parameter in getargspec(estimator.fit)[0]
def check_symmetric(array, tol=1E-10, raise_warning=True,
raise_exception=False):
"""Make sure that array is 2D, square and symmetric.
If the array is not symmetric, then a symmetrized version is returned.
Optionally, a warning or exception is raised if the matrix is not
symmetric.
Parameters
----------
array : nd-array or sparse matrix
Input object to check / convert. Must be two-dimensional and square,
otherwise a ValueError will be raised.
tol : float
Absolute tolerance for equivalence of arrays. Default = 1E-10.
raise_warning : boolean (default=True)
If True then raise a warning if conversion is required.
raise_exception : boolean (default=False)
If True then raise an exception if array is not symmetric.
Returns
-------
array_sym : ndarray or sparse matrix
Symmetrized version of the input array, i.e. the average of array
and array.transpose(). If sparse, then duplicate entries are first
summed and zeros are eliminated.
"""
if (array.ndim != 2) or (array.shape[0] != array.shape[1]):
raise ValueError("array must be 2-dimensional and square. "
"shape = {0}".format(array.shape))
if sp.issparse(array):
diff = array - array.T
# only csr, csc, and coo have `data` attribute
if diff.format not in ['csr', 'csc', 'coo']:
diff = diff.tocsr()
symmetric = np.all(abs(diff.data) < tol)
else:
symmetric = np.allclose(array, array.T, atol=tol)
if not symmetric:
if raise_exception:
raise ValueError("Array must be symmetric")
if raise_warning:
warnings.warn("Array is not symmetric, and will be converted "
"to symmetric by average with its transpose.")
if sp.issparse(array):
conversion = 'to' + array.format
array = getattr(0.5 * (array + array.T), conversion)()
else:
array = 0.5 * (array + array.T)
return array
def check_is_fitted(estimator, attributes, msg=None, all_or_any=all):
"""Perform is_fitted validation for estimator.
Checks if the estimator is fitted by verifying the presence of
"all_or_any" of the passed attributes and raises a NotFittedError with the
given message.
Parameters
----------
estimator : estimator instance.
estimator instance for which the check is performed.
attributes : attribute name(s) given as string or a list/tuple of strings
Eg. : ["coef_", "estimator_", ...], "coef_"
msg : string
The default error message is, "This %(name)s instance is not fitted
yet. Call 'fit' with appropriate arguments before using this method."
For custom messages if "%(name)s" is present in the message string,
it is substituted for the estimator name.
Eg. : "Estimator, %(name)s, must be fitted before sparsifying".
all_or_any : callable, {all, any}, default all
Specify whether all or any of the given attributes must exist.
"""
if msg is None:
msg = ("This %(name)s instance is not fitted yet. Call 'fit' with "
"appropriate arguments before using this method.")
if not hasattr(estimator, 'fit'):
raise TypeError("%s is not an estimator instance." % (estimator))
if not isinstance(attributes, (list, tuple)):
attributes = [attributes]
if not all_or_any([hasattr(estimator, attr) for attr in attributes]):
raise NotFittedError(msg % {'name': type(estimator).__name__})
def check_non_negative(X, whom):
"""
Check if there is any negative value in an array.
Parameters
----------
X : array-like or sparse matrix
Input data.
whom : string
Who passed X to this function.
"""
X = X.data if sp.issparse(X) else X
if (X < 0).any():
raise ValueError("Negative values in data passed to %s" % whom)
| bsd-3-clause |
emmanuelle/scikits.image | doc/examples/applications/plot_geometric.py | 2 | 3213 | """
===============================
Using geometric transformations
===============================
In this example, we will see how to use geometric transformations in the context
of image processing.
"""
import math
import numpy as np
import matplotlib.pyplot as plt
from skimage import data
from skimage import transform as tf
margins = dict(hspace=0.01, wspace=0.01, top=1, bottom=0, left=0, right=1)
"""
Basics
======
Several different geometric transformation types are supported: similarity,
affine, projective and polynomial.
Geometric transformations can either be created using the explicit parameters
(e.g. scale, shear, rotation and translation) or the transformation matrix:
First we create a transformation using explicit parameters:
"""
tform = tf.SimilarityTransform(scale=1, rotation=math.pi / 2,
translation=(0, 1))
print tform._matrix
"""
Alternatively you can define a transformation by the transformation matrix
itself:
"""
matrix = tform._matrix.copy()
matrix[1, 2] = 2
tform2 = tf.SimilarityTransform(matrix)
"""
These transformation objects can then be used to apply forward and inverse
coordinate transformations between the source and destination coordinate
systems:
"""
coord = [1, 0]
print tform2(coord)
print tform2.inverse(tform(coord))
"""
Image warping
=============
Geometric transformations can also be used to warp images:
"""
text = data.text()
tform = tf.SimilarityTransform(scale=1, rotation=math.pi / 4,
translation=(text.shape[0] / 2, -100))
rotated = tf.warp(text, tform)
back_rotated = tf.warp(rotated, tform.inverse)
fig, (ax1, ax2, ax3) = plt.subplots(ncols=3, figsize=(8, 3))
fig.subplots_adjust(**margins)
plt.gray()
ax1.imshow(text)
ax1.axis('off')
ax2.imshow(rotated)
ax2.axis('off')
ax3.imshow(back_rotated)
ax3.axis('off')
"""
.. image:: PLOT2RST.current_figure
Parameter estimation
====================
In addition to the basic functionality mentioned above you can also estimate the
parameters of a geometric transformation using the least-squares method.
This can amongst other things be used for image registration or rectification,
where you have a set of control points or homologous/corresponding points in two
images.
Let's assume we want to recognize letters on a photograph which was not taken
from the front but at a certain angle. In the simplest case of a plane paper
surface the letters are projectively distorted. Simple matching algorithms would
not be able to match such symbols. One solution to this problem would be to warp
the image so that the distortion is removed and then apply a matching algorithm:
"""
text = data.text()
src = np.array((
(0, 0),
(0, 50),
(300, 50),
(300, 0)
))
dst = np.array((
(155, 15),
(65, 40),
(260, 130),
(360, 95)
))
tform3 = tf.ProjectiveTransform()
tform3.estimate(src, dst)
warped = tf.warp(text, tform3, output_shape=(50, 300))
fig, (ax1, ax2) = plt.subplots(nrows=2, figsize=(8, 3))
fig.subplots_adjust(**margins)
plt.gray()
ax1.imshow(text)
ax1.plot(dst[:, 0], dst[:, 1], '.r')
ax1.axis('off')
ax2.imshow(warped)
ax2.axis('off')
"""
.. image:: PLOT2RST.current_figure
"""
plt.show()
| bsd-3-clause |
madjelan/scikit-learn | sklearn/mixture/tests/test_gmm.py | 200 | 17427 | import unittest
import copy
import sys
from nose.tools import assert_true
import numpy as np
from numpy.testing import (assert_array_equal, assert_array_almost_equal,
assert_raises)
from scipy import stats
from sklearn import mixture
from sklearn.datasets.samples_generator import make_spd_matrix
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_raise_message
from sklearn.metrics.cluster import adjusted_rand_score
from sklearn.externals.six.moves import cStringIO as StringIO
rng = np.random.RandomState(0)
def test_sample_gaussian():
# Test sample generation from mixture.sample_gaussian where covariance
# is diagonal, spherical and full
n_features, n_samples = 2, 300
axis = 1
mu = rng.randint(10) * rng.rand(n_features)
cv = (rng.rand(n_features) + 1.0) ** 2
samples = mixture.sample_gaussian(
mu, cv, covariance_type='diag', n_samples=n_samples)
assert_true(np.allclose(samples.mean(axis), mu, atol=1.3))
assert_true(np.allclose(samples.var(axis), cv, atol=1.5))
# the same for spherical covariances
cv = (rng.rand() + 1.0) ** 2
samples = mixture.sample_gaussian(
mu, cv, covariance_type='spherical', n_samples=n_samples)
assert_true(np.allclose(samples.mean(axis), mu, atol=1.5))
assert_true(np.allclose(
samples.var(axis), np.repeat(cv, n_features), atol=1.5))
# and for full covariances
A = rng.randn(n_features, n_features)
cv = np.dot(A.T, A) + np.eye(n_features)
samples = mixture.sample_gaussian(
mu, cv, covariance_type='full', n_samples=n_samples)
assert_true(np.allclose(samples.mean(axis), mu, atol=1.3))
assert_true(np.allclose(np.cov(samples), cv, atol=2.5))
# Numerical stability check: in SciPy 0.12.0 at least, eigh may return
# tiny negative values in its second return value.
from sklearn.mixture import sample_gaussian
x = sample_gaussian([0, 0], [[4, 3], [1, .1]],
covariance_type='full', random_state=42)
print(x)
assert_true(np.isfinite(x).all())
def _naive_lmvnpdf_diag(X, mu, cv):
# slow and naive implementation of lmvnpdf
ref = np.empty((len(X), len(mu)))
stds = np.sqrt(cv)
for i, (m, std) in enumerate(zip(mu, stds)):
ref[:, i] = np.log(stats.norm.pdf(X, m, std)).sum(axis=1)
return ref
def test_lmvnpdf_diag():
# test a slow and naive implementation of lmvnpdf and
# compare it to the vectorized version (mixture.lmvnpdf) to test
# for correctness
n_features, n_components, n_samples = 2, 3, 10
mu = rng.randint(10) * rng.rand(n_components, n_features)
cv = (rng.rand(n_components, n_features) + 1.0) ** 2
X = rng.randint(10) * rng.rand(n_samples, n_features)
ref = _naive_lmvnpdf_diag(X, mu, cv)
lpr = mixture.log_multivariate_normal_density(X, mu, cv, 'diag')
assert_array_almost_equal(lpr, ref)
def test_lmvnpdf_spherical():
n_features, n_components, n_samples = 2, 3, 10
mu = rng.randint(10) * rng.rand(n_components, n_features)
spherecv = rng.rand(n_components, 1) ** 2 + 1
X = rng.randint(10) * rng.rand(n_samples, n_features)
cv = np.tile(spherecv, (n_features, 1))
reference = _naive_lmvnpdf_diag(X, mu, cv)
lpr = mixture.log_multivariate_normal_density(X, mu, spherecv,
'spherical')
assert_array_almost_equal(lpr, reference)
def test_lmvnpdf_full():
n_features, n_components, n_samples = 2, 3, 10
mu = rng.randint(10) * rng.rand(n_components, n_features)
cv = (rng.rand(n_components, n_features) + 1.0) ** 2
X = rng.randint(10) * rng.rand(n_samples, n_features)
fullcv = np.array([np.diag(x) for x in cv])
reference = _naive_lmvnpdf_diag(X, mu, cv)
lpr = mixture.log_multivariate_normal_density(X, mu, fullcv, 'full')
assert_array_almost_equal(lpr, reference)
def test_lvmpdf_full_cv_non_positive_definite():
n_features, n_samples = 2, 10
rng = np.random.RandomState(0)
X = rng.randint(10) * rng.rand(n_samples, n_features)
mu = np.mean(X, 0)
cv = np.array([[[-1, 0], [0, 1]]])
expected_message = "'covars' must be symmetric, positive-definite"
assert_raise_message(ValueError, expected_message,
mixture.log_multivariate_normal_density,
X, mu, cv, 'full')
def test_GMM_attributes():
n_components, n_features = 10, 4
covariance_type = 'diag'
g = mixture.GMM(n_components, covariance_type, random_state=rng)
weights = rng.rand(n_components)
weights = weights / weights.sum()
means = rng.randint(-20, 20, (n_components, n_features))
assert_true(g.n_components == n_components)
assert_true(g.covariance_type == covariance_type)
g.weights_ = weights
assert_array_almost_equal(g.weights_, weights)
g.means_ = means
assert_array_almost_equal(g.means_, means)
covars = (0.1 + 2 * rng.rand(n_components, n_features)) ** 2
g.covars_ = covars
assert_array_almost_equal(g.covars_, covars)
assert_raises(ValueError, g._set_covars, [])
assert_raises(ValueError, g._set_covars,
np.zeros((n_components - 2, n_features)))
assert_raises(ValueError, mixture.GMM, n_components=20,
covariance_type='badcovariance_type')
class GMMTester():
do_test_eval = True
def _setUp(self):
self.n_components = 10
self.n_features = 4
self.weights = rng.rand(self.n_components)
self.weights = self.weights / self.weights.sum()
self.means = rng.randint(-20, 20, (self.n_components, self.n_features))
self.threshold = -0.5
self.I = np.eye(self.n_features)
self.covars = {
'spherical': (0.1 + 2 * rng.rand(self.n_components,
self.n_features)) ** 2,
'tied': (make_spd_matrix(self.n_features, random_state=0)
+ 5 * self.I),
'diag': (0.1 + 2 * rng.rand(self.n_components,
self.n_features)) ** 2,
'full': np.array([make_spd_matrix(self.n_features, random_state=0)
+ 5 * self.I for x in range(self.n_components)])}
def test_eval(self):
if not self.do_test_eval:
return # DPGMM does not support setting the means and
# covariances before fitting There is no way of fixing this
# due to the variational parameters being more expressive than
# covariance matrices
g = self.model(n_components=self.n_components,
covariance_type=self.covariance_type, random_state=rng)
# Make sure the means are far apart so responsibilities.argmax()
# picks the actual component used to generate the observations.
g.means_ = 20 * self.means
g.covars_ = self.covars[self.covariance_type]
g.weights_ = self.weights
gaussidx = np.repeat(np.arange(self.n_components), 5)
n_samples = len(gaussidx)
X = rng.randn(n_samples, self.n_features) + g.means_[gaussidx]
ll, responsibilities = g.score_samples(X)
self.assertEqual(len(ll), n_samples)
self.assertEqual(responsibilities.shape,
(n_samples, self.n_components))
assert_array_almost_equal(responsibilities.sum(axis=1),
np.ones(n_samples))
assert_array_equal(responsibilities.argmax(axis=1), gaussidx)
def test_sample(self, n=100):
g = self.model(n_components=self.n_components,
covariance_type=self.covariance_type, random_state=rng)
# Make sure the means are far apart so responsibilities.argmax()
# picks the actual component used to generate the observations.
g.means_ = 20 * self.means
g.covars_ = np.maximum(self.covars[self.covariance_type], 0.1)
g.weights_ = self.weights
samples = g.sample(n)
self.assertEqual(samples.shape, (n, self.n_features))
def test_train(self, params='wmc'):
g = mixture.GMM(n_components=self.n_components,
covariance_type=self.covariance_type)
g.weights_ = self.weights
g.means_ = self.means
g.covars_ = 20 * self.covars[self.covariance_type]
# Create a training set by sampling from the predefined distribution.
X = g.sample(n_samples=100)
g = self.model(n_components=self.n_components,
covariance_type=self.covariance_type,
random_state=rng, min_covar=1e-1,
n_iter=1, init_params=params)
g.fit(X)
# Do one training iteration at a time so we can keep track of
# the log likelihood to make sure that it increases after each
# iteration.
trainll = []
for _ in range(5):
g.params = params
g.init_params = ''
g.fit(X)
trainll.append(self.score(g, X))
g.n_iter = 10
g.init_params = ''
g.params = params
g.fit(X) # finish fitting
# Note that the log likelihood will sometimes decrease by a
# very small amount after it has more or less converged due to
# the addition of min_covar to the covariance (to prevent
# underflow). This is why the threshold is set to -0.5
# instead of 0.
delta_min = np.diff(trainll).min()
self.assertTrue(
delta_min > self.threshold,
"The min nll increase is %f which is lower than the admissible"
" threshold of %f, for model %s. The likelihoods are %s."
% (delta_min, self.threshold, self.covariance_type, trainll))
def test_train_degenerate(self, params='wmc'):
# Train on degenerate data with 0 in some dimensions
# Create a training set by sampling from the predefined distribution.
X = rng.randn(100, self.n_features)
X.T[1:] = 0
g = self.model(n_components=2, covariance_type=self.covariance_type,
random_state=rng, min_covar=1e-3, n_iter=5,
init_params=params)
g.fit(X)
trainll = g.score(X)
self.assertTrue(np.sum(np.abs(trainll / 100 / X.shape[1])) < 5)
def test_train_1d(self, params='wmc'):
# Train on 1-D data
# Create a training set by sampling from the predefined distribution.
X = rng.randn(100, 1)
# X.T[1:] = 0
g = self.model(n_components=2, covariance_type=self.covariance_type,
random_state=rng, min_covar=1e-7, n_iter=5,
init_params=params)
g.fit(X)
trainll = g.score(X)
if isinstance(g, mixture.DPGMM):
self.assertTrue(np.sum(np.abs(trainll / 100)) < 5)
else:
self.assertTrue(np.sum(np.abs(trainll / 100)) < 2)
def score(self, g, X):
return g.score(X).sum()
class TestGMMWithSphericalCovars(unittest.TestCase, GMMTester):
covariance_type = 'spherical'
model = mixture.GMM
setUp = GMMTester._setUp
class TestGMMWithDiagonalCovars(unittest.TestCase, GMMTester):
covariance_type = 'diag'
model = mixture.GMM
setUp = GMMTester._setUp
class TestGMMWithTiedCovars(unittest.TestCase, GMMTester):
covariance_type = 'tied'
model = mixture.GMM
setUp = GMMTester._setUp
class TestGMMWithFullCovars(unittest.TestCase, GMMTester):
covariance_type = 'full'
model = mixture.GMM
setUp = GMMTester._setUp
def test_multiple_init():
# Test that multiple inits does not much worse than a single one
X = rng.randn(30, 5)
X[:10] += 2
g = mixture.GMM(n_components=2, covariance_type='spherical',
random_state=rng, min_covar=1e-7, n_iter=5)
train1 = g.fit(X).score(X).sum()
g.n_init = 5
train2 = g.fit(X).score(X).sum()
assert_true(train2 >= train1 - 1.e-2)
def test_n_parameters():
# Test that the right number of parameters is estimated
n_samples, n_dim, n_components = 7, 5, 2
X = rng.randn(n_samples, n_dim)
n_params = {'spherical': 13, 'diag': 21, 'tied': 26, 'full': 41}
for cv_type in ['full', 'tied', 'diag', 'spherical']:
g = mixture.GMM(n_components=n_components, covariance_type=cv_type,
random_state=rng, min_covar=1e-7, n_iter=1)
g.fit(X)
assert_true(g._n_parameters() == n_params[cv_type])
def test_1d_1component():
# Test all of the covariance_types return the same BIC score for
# 1-dimensional, 1 component fits.
n_samples, n_dim, n_components = 100, 1, 1
X = rng.randn(n_samples, n_dim)
g_full = mixture.GMM(n_components=n_components, covariance_type='full',
random_state=rng, min_covar=1e-7, n_iter=1)
g_full.fit(X)
g_full_bic = g_full.bic(X)
for cv_type in ['tied', 'diag', 'spherical']:
g = mixture.GMM(n_components=n_components, covariance_type=cv_type,
random_state=rng, min_covar=1e-7, n_iter=1)
g.fit(X)
assert_array_almost_equal(g.bic(X), g_full_bic)
def assert_fit_predict_correct(model, X):
model2 = copy.deepcopy(model)
predictions_1 = model.fit(X).predict(X)
predictions_2 = model2.fit_predict(X)
assert adjusted_rand_score(predictions_1, predictions_2) == 1.0
def test_fit_predict():
"""
test that gmm.fit_predict is equivalent to gmm.fit + gmm.predict
"""
lrng = np.random.RandomState(101)
n_samples, n_dim, n_comps = 100, 2, 2
mu = np.array([[8, 8]])
component_0 = lrng.randn(n_samples, n_dim)
component_1 = lrng.randn(n_samples, n_dim) + mu
X = np.vstack((component_0, component_1))
for m_constructor in (mixture.GMM, mixture.VBGMM, mixture.DPGMM):
model = m_constructor(n_components=n_comps, covariance_type='full',
min_covar=1e-7, n_iter=5,
random_state=np.random.RandomState(0))
assert_fit_predict_correct(model, X)
model = mixture.GMM(n_components=n_comps, n_iter=0)
z = model.fit_predict(X)
assert np.all(z == 0), "Quick Initialization Failed!"
def test_aic():
# Test the aic and bic criteria
n_samples, n_dim, n_components = 50, 3, 2
X = rng.randn(n_samples, n_dim)
SGH = 0.5 * (X.var() + np.log(2 * np.pi)) # standard gaussian entropy
for cv_type in ['full', 'tied', 'diag', 'spherical']:
g = mixture.GMM(n_components=n_components, covariance_type=cv_type,
random_state=rng, min_covar=1e-7)
g.fit(X)
aic = 2 * n_samples * SGH * n_dim + 2 * g._n_parameters()
bic = (2 * n_samples * SGH * n_dim +
np.log(n_samples) * g._n_parameters())
bound = n_dim * 3. / np.sqrt(n_samples)
assert_true(np.abs(g.aic(X) - aic) / n_samples < bound)
assert_true(np.abs(g.bic(X) - bic) / n_samples < bound)
def check_positive_definite_covars(covariance_type):
r"""Test that covariance matrices do not become non positive definite
Due to the accumulation of round-off errors, the computation of the
covariance matrices during the learning phase could lead to non-positive
definite covariance matrices. Namely the use of the formula:
.. math:: C = (\sum_i w_i x_i x_i^T) - \mu \mu^T
instead of:
.. math:: C = \sum_i w_i (x_i - \mu)(x_i - \mu)^T
while mathematically equivalent, was observed a ``LinAlgError`` exception,
when computing a ``GMM`` with full covariance matrices and fixed mean.
This function ensures that some later optimization will not introduce the
problem again.
"""
rng = np.random.RandomState(1)
# we build a dataset with 2 2d component. The components are unbalanced
# (respective weights 0.9 and 0.1)
X = rng.randn(100, 2)
X[-10:] += (3, 3) # Shift the 10 last points
gmm = mixture.GMM(2, params="wc", covariance_type=covariance_type,
min_covar=1e-3)
# This is a non-regression test for issue #2640. The following call used
# to trigger:
# numpy.linalg.linalg.LinAlgError: 2-th leading minor not positive definite
gmm.fit(X)
if covariance_type == "diag" or covariance_type == "spherical":
assert_greater(gmm.covars_.min(), 0)
else:
if covariance_type == "tied":
covs = [gmm.covars_]
else:
covs = gmm.covars_
for c in covs:
assert_greater(np.linalg.det(c), 0)
def test_positive_definite_covars():
# Check positive definiteness for all covariance types
for covariance_type in ["full", "tied", "diag", "spherical"]:
yield check_positive_definite_covars, covariance_type
def test_verbose_first_level():
# Create sample data
X = rng.randn(30, 5)
X[:10] += 2
g = mixture.GMM(n_components=2, n_init=2, verbose=1)
old_stdout = sys.stdout
sys.stdout = StringIO()
try:
g.fit(X)
finally:
sys.stdout = old_stdout
def test_verbose_second_level():
# Create sample data
X = rng.randn(30, 5)
X[:10] += 2
g = mixture.GMM(n_components=2, n_init=2, verbose=2)
old_stdout = sys.stdout
sys.stdout = StringIO()
try:
g.fit(X)
finally:
sys.stdout = old_stdout
| bsd-3-clause |
anguoyang/SMQTK | python/smqtk/content_description/colordescriptor/colordescriptor.py | 1 | 42778 | import abc
import logging
import json
import math
import mimetypes
import multiprocessing
import multiprocessing.pool
import numpy
import os
import os.path as osp
import pyflann
import sklearn.cluster
import sys
import tempfile
import smqtk_config
from smqtk.content_description import ContentDescriptor
from smqtk.utils import safe_create_dir, SimpleTimer, video_utils
from smqtk.utils.string_utils import partition_string
from smqtk.utils.video_utils import get_metadata_info
# Attempt importing utilities module. If not, flag descriptor as unusable.
from . import utils
# noinspection PyAbstractClass,PyPep8Naming
class ColorDescriptor_Base (ContentDescriptor):
"""
Simple implementation of ColorDescriptor feature descriptor utility for
feature generation over images and videos.
This was started as an attempt at gaining a deeper understanding of what's
going on with this feature descriptor's use and how it applied to later use
in an indexer.
Codebook generated via kmeans given a set of input data. FLANN index model
used for quantization, buily using auto-tuning (picks the best indexing
algorithm of linear, kdtree, kmeans, or combined), and using the Chi-Squared
distance function.
"""
# colorDescriptor executable that should be on the PATH
PROC_COLORDESCRIPTOR = 'colorDescriptor'
# Distance function to use in FLANN indexing. See FLANN documentation for
# available distance function types (under the MATLAB section reference for
# valid string identifiers)
FLANN_DISTANCE_FUNCTION = 'chi_square'
# Total number of descriptors to use from input data to generate codebook
# model. Fewer than this may be used if the data set is small, but if it is
# greater, we randomly sample down to this count (occurs on a per element
# basis).
CODEBOOK_DESCRIPTOR_LIMIT = 1000000.
@classmethod
def is_usable(cls):
"""
Check whether this descriptor is available for use.
:return: Boolean determination of whether this implementation is usable.
:rtype: bool
"""
log = logging.getLogger('.'.join([cls.__module__,
cls.__name__,
"is_usable"]))
# Check for colorDescriptor executable on the path
import subprocess
try:
# This should try to print out the CLI options return with code 1.
subprocess.call(['colorDescriptor', '-h'],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
except OSError:
log.warn("Could not locate colorDescriptor executable. Make sure "
"that its on the PATH! See "
"smqtk/content_description/colordescriptor/INSTALL.md "
"for help.")
return False
# Checking if DescriptorIO is importable
if not utils.has_colordescriptor_module():
log.warn("Could not import DescriptorIO. Make sure that the "
"colorDescriptor package is on the PYTHONPATH! See "
"smqtk/content_description/colordescriptor/INSTALL.md "
"for help.")
return False
return True
def __init__(self, model_directory, work_directory,
kmeans_k=1024, flann_target_precision=0.95,
flann_sample_fraction=0.75,
random_seed=None, use_spatial_pyramid=False):
"""
Initialize a new ColorDescriptor interface instance.
:param model_directory: Path to the directory to store/read data model
files on the local filesystem. Relative paths are treated relative
to ``smqtk_config.DATA_DIR``.
:type model_directory: str | unicode
:param work_directory: Path to the directory in which to place
temporary/working files. Relative paths are treated relative to
``smqtk_config.WORD_DIR``.
:type work_directory: str | unicode
:param kmeans_k: Centroids to generate. Default of 1024
:type kmeans_k: int
:param flann_target_precision: Target precision percent to tune index
for. Default is 0.90 (90% accuracy). For some codebooks, if this is
too close to 1.0, the FLANN library may non-deterministically
overflow, causing an infinite loop requiring a SIGKILL to stop.
:type flann_target_precision: float
:param flann_sample_fraction: Fraction of input data to use for index
auto tuning. Default is 0.75 (75%).
:type flann_sample_fraction: float
:param random_seed: Optional value to seed components requiring random
operations.
:type random_seed: None or int
"""
# TODO: Because of the FLANN library non-deterministic overflow issue,
# an alternative must be found before this can be put into
# production. Suggest saving/using sk-learn MBKMeans class? Can
# the class be regenerated from an existing codebook?
self._model_dir = osp.join(smqtk_config.DATA_DIR, model_directory)
self._work_dir = osp.join(smqtk_config.WORK_DIR, work_directory)
self._kmeans_k = int(kmeans_k)
self._flann_target_precision = float(flann_target_precision)
self._flann_sample_fraction = float(flann_sample_fraction)
self._use_sp = use_spatial_pyramid
self._rand_seed = None if random_seed is None else int(random_seed)
if self._rand_seed is not None:
numpy.random.seed(self._rand_seed)
# Cannot pre-load FLANN stuff because odd things happen when processing/
# threading. Loading index file is fast anyway.
self._codebook = None
if self.has_model:
self._codebook = numpy.load(self.codebook_filepath)
@property
def codebook_filepath(self):
safe_create_dir(self._model_dir)
return osp.join(self._model_dir,
"%s.codebook.npy" % (self.descriptor_type(),))
@property
def flann_index_filepath(self):
safe_create_dir(self._model_dir)
return osp.join(self._model_dir,
"%s.flann_index.dat" % (self.descriptor_type(),))
@property
def flann_params_filepath(self):
safe_create_dir(self._model_dir)
return osp.join(self._model_dir,
"%s.flann_params.json" % (self.descriptor_type(),))
@property
def has_model(self):
has_model = (osp.isfile(self.codebook_filepath)
and osp.isfile(self.flann_index_filepath))
# Load the codebook model if not already loaded. FLANN index will be
# loaded when needed to prevent thread/subprocess memory issues.
if self._codebook is None and has_model:
self._codebook = numpy.load(self.codebook_filepath)
return has_model
@property
def temp_dir(self):
return safe_create_dir(osp.join(self._work_dir, 'temp_files'))
@abc.abstractmethod
def descriptor_type(self):
"""
:return: String descriptor type as used by colorDescriptor
:rtype: str
"""
return
@abc.abstractmethod
def _generate_descriptor_matrices(self, data_set, **kwargs):
"""
Generate info and descriptor matrices based on ingest type.
:param data_set: Iterable of data elements to generate combined info
and descriptor matrices for.
:type item_iter: collections.Set[smqtk.data_rep.DataElement]
:param limit: Limit the number of descriptor entries to this amount.
:type limit: int
:return: Combined info and descriptor matrices for all base images
:rtype: (numpy.core.multiarray.ndarray, numpy.core.multiarray.ndarray)
"""
pass
def _get_checkpoint_dir(self, data):
"""
The directory that contains checkpoint material for a given data element
:param data: Data element
:type data: smqtk.data_rep.DataElement
:return: directory path
:rtype: str
"""
d = osp.join(self._work_dir, *partition_string(data.md5(), 8))
safe_create_dir(d)
return d
def _get_standard_info_descriptors_filepath(self, data, frame=None):
"""
Get the standard path to a data element's computed descriptor output,
which for colorDescriptor consists of two matrices: info and descriptors
:param data: Data element
:type data: smqtk.data_rep.DataElement
:param frame: frame within the data file
:type frame: int
:return: Paths to info and descriptor checkpoint numpy files
:rtype: (str, str)
"""
d = self._get_checkpoint_dir(data)
if frame is not None:
return (
osp.join(d, "%s.info.%d.npy" % (data.md5(), frame)),
osp.join(d, "%s.descriptors.%d.npy" % (data.md5(), frame))
)
else:
return (
osp.join(d, "%s.info.npy" % data.md5()),
osp.join(d, "%s.descriptors.npy" % data.md5())
)
def _get_checkpoint_feature_file(self, data):
"""
Return the standard path to a data element's computed feature checkpoint
file relative to our current working directory.
:param data: Data element
:type data: smqtk.data_rep.DataElement
:return: Standard path to where the feature checkpoint file for this
given data element.
:rtype: str
"""
if self._use_sp:
return osp.join(self._get_checkpoint_dir(data),
"%s.feature.sp.npy" % data.md5())
else:
return osp.join(self._get_checkpoint_dir(data),
"%s.feature.npy" % data.md5())
def generate_model(self, data_set, **kwargs):
"""
Generate this feature detector's data-model given a file ingest. This
saves the generated model to the currently configured data directory.
For colorDescriptor, we generate raw features over the ingest data,
compute a codebook via kmeans, and then create an index with FLANN via
the "autotune" algorithm to intelligently pick the fastest indexing
method.
:param num_elements: Number of data elements in the iterator
:type num_elements: int
:param data_set: Set of input data elements to generate the model
with.
:type data_set: collections.Set[smqtk.data_rep.DataElement]
"""
super(ColorDescriptor_Base, self).generate_model(data_set, **kwargs)
if self.has_model:
self.log.warn("ColorDescriptor model for descriptor type '%s' "
"already generated!", self.descriptor_type())
return
pyflann.set_distance_type(self.FLANN_DISTANCE_FUNCTION)
flann = pyflann.FLANN()
if not osp.isfile(self.codebook_filepath):
self.log.info("Did not find existing ColorDescriptor codebook for "
"descriptor '%s'.", self.descriptor_type())
# generate descriptors
with SimpleTimer("Generating descriptor matrices...",
self.log.info):
descriptors_checkpoint = osp.join(self._work_dir,
"model_descriptors.npy")
if osp.isfile(descriptors_checkpoint):
self.log.debug("Found existing computed descriptors work "
"file for model generation.")
descriptors = numpy.load(descriptors_checkpoint)
else:
self.log.debug("Computing model descriptors")
_, descriptors = \
self._generate_descriptor_matrices(
data_set,
limit=self.CODEBOOK_DESCRIPTOR_LIMIT
)
_, tmp = tempfile.mkstemp(dir=self._work_dir,
suffix='.npy')
self.log.debug("Saving model-gen info/descriptor matrix")
numpy.save(tmp, descriptors)
os.rename(tmp, descriptors_checkpoint)
# Compute centroids (codebook) with kmeans
with SimpleTimer("Computing sklearn.cluster.MiniBatchKMeans...",
self.log.info):
kmeans_verbose = self.log.getEffectiveLevel <= logging.DEBUG
kmeans = sklearn.cluster.MiniBatchKMeans(
n_clusters=self._kmeans_k,
init_size=self._kmeans_k*3,
random_state=self._rand_seed,
verbose=kmeans_verbose,
compute_labels=False,
)
kmeans.fit(descriptors)
codebook = kmeans.cluster_centers_
with SimpleTimer("Saving generated codebook...", self.log.debug):
numpy.save(self.codebook_filepath, codebook)
else:
self.log.info("Found existing codebook file.")
codebook = numpy.load(self.codebook_filepath)
# create FLANN index
# - autotune will force select linear search if there are < 1000 words
# in the codebook vocabulary.
if self.log.getEffectiveLevel() <= logging.DEBUG:
log_level = 'info'
else:
log_level = 'warning'
with SimpleTimer("Building FLANN index...", self.log.info):
p = {
"target_precision": self._flann_target_precision,
"sample_fraction": self._flann_sample_fraction,
"log_level": log_level,
"algorithm": "autotuned"
}
if self._rand_seed is not None:
p['random_seed'] = self._rand_seed
flann_params = flann.build_index(codebook, **p)
with SimpleTimer("Saving FLANN index to file...", self.log.debug):
# Save FLANN index data binary
flann.save_index(self.flann_index_filepath)
# Save out log of parameters
with open(self.flann_params_filepath, 'w') as ofile:
json.dump(flann_params, ofile, indent=4, sort_keys=True)
# save generation results to class for immediate feature computation use
self._codebook = codebook
def compute_descriptor(self, data):
"""
Given some kind of data, process and return a feature vector as a Numpy
array.
:raises RuntimeError: Feature extraction failure of some kind.
:param data: Some kind of input data for the feature descriptor. This is
descriptor dependent.
:type data: smqtk.data_rep.DataElement
:return: Feature vector. This is a histogram of N bins where N is the
number of centroids in the codebook. Bin values is percent
composition, not absolute counts.
:rtype: numpy.ndarray
"""
super(ColorDescriptor_Base, self).compute_descriptor(data)
checkpoint_filepath = self._get_checkpoint_feature_file(data)
if osp.isfile(checkpoint_filepath):
return numpy.load(checkpoint_filepath)
if not self.has_model:
raise RuntimeError("No model currently loaded! Check the existence "
"or, or generate, model files!\n"
"Codebook path: %s\n"
"FLANN Index path: %s"
% (self.codebook_filepath,
self.flann_index_filepath))
self.log.debug("Computing descriptors for data UID[%s]...", data.uuid())
info, descriptors = self._generate_descriptor_matrices({data})
if not self._use_sp:
###
# Codebook Quantization
#
# - loaded the model at class initialization if we had one
self.log.debug("Quantizing descriptors")
pyflann.set_distance_type(self.FLANN_DISTANCE_FUNCTION)
flann = pyflann.FLANN()
flann.load_index(self.flann_index_filepath, self._codebook)
try:
idxs, dists = flann.nn_index(descriptors)
except AssertionError:
self.log.error("Codebook shape : %s", self._codebook.shape)
self.log.error("Descriptor shape: %s", descriptors.shape)
raise
# Create histogram
# - Using explicit bin slots to prevent numpy from automatically
# creating tightly constrained bins. This would otherwise cause
# histograms between two inputs to be non-comparable (unaligned
# bins).
# - See numpy note about ``bins`` to understand why the +1 is
# necessary
# - Learned from spatial implementation that we could feed multiple
# neighbors per descriptor into here, leading to a more populated
# histogram.
# - Could also possibly weight things based on dist from
# descriptor?
#: :type: numpy.core.multiarray.ndarray
h = numpy.histogram(idxs, # indices are all integers
bins=numpy.arange(self._codebook.shape[0]+1))[0]
# self.log.debug("Quantization histogram: %s", h)
# Normalize histogram into relative frequencies
# - Not using /= on purpose. h is originally int32 coming out of
# histogram. /= would keep int32 type when we want it to be
# transformed into a float type by the division.
if h.sum():
# noinspection PyAugmentAssignment
h = h / float(h.sum())
else:
h = numpy.zeros(h.shape, h.dtype)
# self.log.debug("Normalized histogram: %s", h)
else:
###
# Spatial Pyramid Quantization
#
self.log.debug("Quantizing descriptors using spatial pyramid")
##
# Quantization factor - number of nearest codes to be saved
q_factor = 10
##
# Concatenating spatial information to descriptor vectors to format:
# [ x y <descriptor> ]
self.log.debug("Creating combined descriptor matrix")
m = numpy.concatenate((info[:, :2],
descriptors), axis=1)
##
# Creating quantized vectors, consisting vector:
# [ x y c_1 ... c_qf dist_1 ... dist_qf ]
# which has a total size of 2+(qf*2)
#
# Sangmin's code included the distances in the quantized vector, but
# then also passed this vector into numpy's histogram function with
# integral bins, causing the [0,1] to be heavily populated, which
# doesn't make sense to do.
# idxs, dists = flann.nn_index(m[:, 2:], q_factor)
# q = numpy.concatenate([m[:, :2], idxs, dists], axis=1)
self.log.debug("Computing nearest neighbors")
pyflann.set_distance_type(self.FLANN_DISTANCE_FUNCTION)
flann = pyflann.FLANN()
flann.load_index(self.flann_index_filepath, self._codebook)
idxs = flann.nn_index(m[:, 2:], q_factor)[0]
self.log.debug("Creating quantization matrix")
q = numpy.concatenate([m[:, :2], idxs], axis=1)
##
# Build spatial pyramid from quantized matrix
self.log.debug("Building spatial pyramid histograms")
hist_sp = self._build_sp_hist(q, self._codebook.shape[0])
##
# Combine each quadrants into single vector
self.log.debug("Combining global+thirds into final histogram.")
f = sys.float_info.min # so as we don't div by 0 accidentally
rf_norm = lambda h: h / (float(h.sum()) + f)
h = numpy.concatenate([rf_norm(hist_sp[0]),
rf_norm(hist_sp[5]),
rf_norm(hist_sp[6]),
rf_norm(hist_sp[7])],
axis=1)
# noinspection PyAugmentAssignment
h /= h.sum()
self.log.debug("Saving checkpoint feature file")
if not osp.isdir(osp.dirname(checkpoint_filepath)):
safe_create_dir(osp.dirname(checkpoint_filepath))
numpy.save(checkpoint_filepath, h)
return h
@staticmethod
def _build_sp_hist(feas, bins):
"""
Build spatial pyramid from quantized data. We expect feature matrix
to be in the following format:
[[ x y c_1 ... c_n dist_1 ... dist_n ]
[ ... ]
... ]
NOTES:
- See encode_FLANN.py for original implementation this was adapted
from.
:param feas: Feature matrix with the above format.
:type feas: numpy.core.multiarray.ndarray
:param bins: number of bins for the spatial histograms. This should
probably be the size of the codebook used when generating quantized
descriptors.
:type bins: int
:return: Matrix of 8 rows representing the histograms for the different
spatial regions
:rtype: numpy.core.multiarray.ndarray
"""
bins = numpy.arange(0, bins+1)
cordx = feas[:, 0]
cordy = feas[:, 1]
feas = feas[:, 2:]
# hard quantization
# global histogram
#: :type: numpy.core.multiarray.ndarray
hist_sp_g = numpy.histogram(feas, bins=bins)[0]
hist_sp_g = hist_sp_g[numpy.newaxis]
# 4 quadrants
# noinspection PyTypeChecker
midx = numpy.ceil(cordx.max()/2)
# noinspection PyTypeChecker
midy = numpy.ceil(cordy.max()/2)
lx = cordx < midx
rx = cordx >= midx
uy = cordy < midy
dy = cordy >= midy
# logging.error("LXUI: %s,%s", lx.__repr__(), uy.__repr__())
# logging.error("Length LXUI: %s,%s", lx.shape, uy.shape)
# logging.error("feas dimensions: %s", feas.shape)
#: :type: numpy.core.multiarray.ndarray
hist_sp_q1 = numpy.histogram(feas[lx & uy], bins=bins)[0]
#: :type: numpy.core.multiarray.ndarray
hist_sp_q2 = numpy.histogram(feas[rx & uy], bins=bins)[0]
#: :type: numpy.core.multiarray.ndarray
hist_sp_q3 = numpy.histogram(feas[lx & dy], bins=bins)[0]
#: :type: numpy.core.multiarray.ndarray
hist_sp_q4 = numpy.histogram(feas[rx & dy], bins=bins)[0]
hist_sp_q1 = hist_sp_q1[numpy.newaxis]
hist_sp_q2 = hist_sp_q2[numpy.newaxis]
hist_sp_q3 = hist_sp_q3[numpy.newaxis]
hist_sp_q4 = hist_sp_q4[numpy.newaxis]
# 3 layers
# noinspection PyTypeChecker
ythird = numpy.ceil(cordy.max()/3)
l1 = cordy <= ythird
l2 = (cordy > ythird) & (cordy <= 2*ythird)
l3 = cordy > 2*ythird
#: :type: numpy.core.multiarray.ndarray
hist_sp_l1 = numpy.histogram(feas[l1], bins=bins)[0]
#: :type: numpy.core.multiarray.ndarray
hist_sp_l2 = numpy.histogram(feas[l2], bins=bins)[0]
#: :type: numpy.core.multiarray.ndarray
hist_sp_l3 = numpy.histogram(feas[l3], bins=bins)[0]
hist_sp_l1 = hist_sp_l1[numpy.newaxis]
hist_sp_l2 = hist_sp_l2[numpy.newaxis]
hist_sp_l3 = hist_sp_l3[numpy.newaxis]
# concatenate
hist_sp = numpy.vstack((hist_sp_g, hist_sp_q1, hist_sp_q2,
hist_sp_q3, hist_sp_q4, hist_sp_l1,
hist_sp_l2, hist_sp_l3))
return hist_sp
# noinspection PyAbstractClass,PyPep8Naming
class ColorDescriptor_Image (ColorDescriptor_Base):
def valid_content_types(self):
"""
:return: A set valid MIME type content types that this descriptor can
handle.
:rtype: set[str]
"""
return {'image/bmp', 'image/jpeg', 'image/png', 'image/tiff'}
def _generate_descriptor_matrices(self, data_set, **kwargs):
"""
Generate info and descriptor matrices based on ingest type.
:param data_set: Iterable of data elements to generate combined info
and descriptor matrices for.
:type item_iter: collections.Set[smqtk.data_rep.DataElement]
:param limit: Limit the number of descriptor entries to this amount.
:type limit: int
:return: Combined info and descriptor matrices for all base images
:rtype: (numpy.core.multiarray.ndarray, numpy.core.multiarray.ndarray)
"""
if not data_set:
raise ValueError("No data given to process.")
inf = float('inf')
descriptor_limit = kwargs.get('limit', inf)
per_item_limit = numpy.floor(float(descriptor_limit) / len(data_set))
if len(data_set) == 1:
# because an iterable doesn't necessarily have a next() method
di = iter(data_set).next()
# Check for checkpoint files
info_fp, desc_fp = \
self._get_standard_info_descriptors_filepath(di)
# Save out data bytes to temporary file
temp_img_filepath = di.write_temp(self.temp_dir)
try:
# Generate descriptors
utils.generate_descriptors(
self.PROC_COLORDESCRIPTOR, temp_img_filepath,
self.descriptor_type(), info_fp, desc_fp, per_item_limit
)
finally:
# clean temp file
di.clean_temp()
return numpy.load(info_fp), numpy.load(desc_fp)
else:
# compute and V-stack matrices for all given images
pool = multiprocessing.Pool(processes=self.PARALLEL)
# Mapping of UID to tuple containing:
# (info_fp, desc_fp, async processing result, tmp_clean_method)
r_map = {}
with SimpleTimer("Computing descriptors async...", self.log.debug):
for di in data_set:
# Creating temporary image file from data bytes
tmp_img_fp = di.write_temp(self.temp_dir)
info_fp, desc_fp = \
self._get_standard_info_descriptors_filepath(di)
args = (self.PROC_COLORDESCRIPTOR, tmp_img_fp,
self.descriptor_type(), info_fp, desc_fp)
r = pool.apply_async(utils.generate_descriptors, args)
r_map[di.uuid()] = (info_fp, desc_fp, r, di.clean_temp)
pool.close()
# Pass through results from descriptor generation, aggregating
# matrix shapes.
# - Transforms r_map into:
# UID -> (info_fp, desc_fp, starting_row, SubSampleIndices)
self.log.debug("Constructing information for super matrices...")
s_keys = sorted(r_map.keys())
running_height = 0 # info and desc heights congruent
# Known constants
i_width = 5
d_width = 384
for uid in s_keys:
ifp, dfp, r, tmp_clean_method = r_map[uid]
# descriptor generation may have failed for this ingest UID
try:
i_shape, d_shape = r.get()
except RuntimeError:
self.log.warning("Descriptor generation failed for "
"UID[%d], skipping its inclusion in "
"model.", uid)
r_map[uid] = None
continue
finally:
# Done with image file, so remove from filesystem
tmp_clean_method()
if None in (i_width, d_width):
i_width = i_shape[1]
d_width = d_shape[1]
ssi = None
if i_shape[0] > per_item_limit:
# pick random indices to subsample down to size limit
ssi = sorted(
numpy.random.permutation(i_shape[0])[:per_item_limit]
)
r_map[uid] = (ifp, dfp, running_height, ssi)
running_height += min(i_shape[0], per_item_limit)
pool.join()
# Asynchronously load files, inserting data into master matrices
self.log.debug("Building super matrices...")
master_info = numpy.zeros((running_height, i_width), dtype=float)
master_desc = numpy.zeros((running_height, d_width), dtype=float)
tp = multiprocessing.pool.ThreadPool(processes=self.PARALLEL)
for uid in s_keys:
if r_map[uid]:
ifp, dfp, sR, ssi = r_map[uid]
tp.apply_async(ColorDescriptor_Image._thread_load_matrix,
args=(ifp, master_info, sR, ssi))
tp.apply_async(ColorDescriptor_Image._thread_load_matrix,
args=(dfp, master_desc, sR, ssi))
tp.close()
tp.join()
return master_info, master_desc
@staticmethod
def _thread_load_matrix(filepath, m, sR, subsample=None):
"""
load a numpy matrix from ``filepath``, inserting the loaded matrix into
``m`` starting at the row ``sR``.
If subsample has a value, it will be a list if indices to
"""
n = numpy.load(filepath)
if subsample:
n = n[subsample, :]
m[sR:sR+n.shape[0], :n.shape[1]] = n
# noinspection PyAbstractClass,PyPep8Naming
class ColorDescriptor_Video (ColorDescriptor_Base):
# # Custom higher limit for video since, ya know, they have multiple frames.
CODEBOOK_DESCRIPTOR_LIMIT = 1500000
FRAME_EXTRACTION_PARAMS = {
"second_offset": 0.0, # Start at beginning
"second_interval": 0.5, # Sample every 0.5 seconds
"max_duration": 1.0, # Cover full duration
"output_image_ext": 'png', # Output PNG files
"ffmpeg_exe": "ffmpeg",
}
def valid_content_types(self):
"""
:return: A set valid MIME type content types that this descriptor can
handle.
:rtype: set[str]
"""
# At the moment, assuming ffmpeg can decode all video types, which it
# probably cannot, but we'll filter this down when it becomes relevant.
# noinspection PyUnresolvedReferences
# TODO: GIF support?
return set([x for x in mimetypes.types_map.values()
if x.startswith('video')])
def _generate_descriptor_matrices(self, data_set, **kwargs):
"""
Generate info and descriptor matrices based on ingest type.
:param data_set: Iterable of data elements to generate combined info
and descriptor matrices for.
:type item_iter: collections.Set[smqtk.data_rep.DataElement]
:param limit: Limit the number of descriptor entries to this amount.
:type limit: int
:return: Combined info and descriptor matrices for all base images
:rtype: (numpy.core.multiarray.ndarray, numpy.core.multiarray.ndarray)
"""
descriptor_limit = kwargs.get('limit', float('inf'))
# With videos, an "item" is one video, so, collect for a while video
# as normal, then subsample from the full video collection.
per_item_limit = numpy.floor(float(descriptor_limit) / len(data_set))
# If an odd number of jobs, favor descriptor extraction
if self.PARALLEL:
descr_parallel = int(max(1, math.ceil(self.PARALLEL/2.0)))
extract_parallel = int(max(1, math.floor(self.PARALLEL/2.0)))
else:
cpuc = multiprocessing.cpu_count()
descr_parallel = int(max(1, math.ceil(cpuc/2.0)))
extract_parallel = int(max(1, math.floor(cpuc/2.0)))
# For each video, extract frames and submit colorDescriptor processing
# jobs for each frame, combining all results into a single matrix for
# return.
pool = multiprocessing.Pool(processes=descr_parallel)
# Mapping of [UID] to [frame] to tuple containing:
# (info_fp, desc_fp, async processing result)
r_map = {}
with SimpleTimer("Extracting frames and submitting descriptor jobs...",
self.log.debug):
for di in data_set:
r_map[di.uuid()] = {}
tmp_vid_fp = di.write_temp(self.temp_dir)
p = dict(self.FRAME_EXTRACTION_PARAMS)
vmd = get_metadata_info(tmp_vid_fp)
p['second_offset'] = vmd.duration * p['second_offset']
p['max_duration'] = vmd.duration * p['max_duration']
fm = video_utils.ffmpeg_extract_frame_map(
tmp_vid_fp,
parallel=extract_parallel,
**p
)
# Compute descriptors for extracted frames.
for frame, imgPath in fm.iteritems():
info_fp, desc_fp = \
self._get_standard_info_descriptors_filepath(di, frame)
r = pool.apply_async(
utils.generate_descriptors,
args=(self.PROC_COLORDESCRIPTOR, imgPath,
self.descriptor_type(), info_fp, desc_fp)
)
r_map[di.uuid()][frame] = (info_fp, desc_fp, r)
# Clean temporary file while computing descriptors
di.clean_temp()
pool.close()
# Each result is a tuple of two ndarrays: info and descriptor matrices
with SimpleTimer("Collecting shape information for super matrices...",
self.log.debug):
running_height = 0
# Known constants
i_width = 5
d_width = 384
# Transform r_map[uid] into:
# (info_mat_files, desc_mat_files, sR, ssi_list)
# -> files in frame order
uids = sorted(r_map)
for uid in uids:
video_num_desc = 0
video_info_mat_fps = [] # ordered list of frame info mat files
video_desc_mat_fps = [] # ordered list of frame desc mat files
for frame in sorted(r_map[uid]):
ifp, dfp, r = r_map[uid][frame]
i_shape, d_shape = r.get()
if None in (i_width, d_width):
i_width = i_shape[1]
d_width = d_shape[1]
video_info_mat_fps.append(ifp)
video_desc_mat_fps.append(dfp)
video_num_desc += i_shape[0]
# If combined descriptor height exceeds the per-item limit,
# generate a random subsample index list
ssi = None
if video_num_desc > per_item_limit:
ssi = sorted(
numpy.random.permutation(video_num_desc)[:per_item_limit]
)
video_num_desc = len(ssi)
r_map[uid] = (video_info_mat_fps, video_desc_mat_fps,
running_height, ssi)
running_height += video_num_desc
pool.join()
del pool
with SimpleTimer("Building master descriptor matrices...",
self.log.debug):
master_info = numpy.zeros((running_height, i_width), dtype=float)
master_desc = numpy.zeros((running_height, d_width), dtype=float)
tp = multiprocessing.pool.ThreadPool(processes=self.PARALLEL)
for uid in uids:
info_fp_list, desc_fp_list, sR, ssi = r_map[uid]
tp.apply_async(ColorDescriptor_Video._thread_load_matrices,
args=(master_info, info_fp_list, sR, ssi))
tp.apply_async(ColorDescriptor_Video._thread_load_matrices,
args=(master_desc, desc_fp_list, sR, ssi))
tp.close()
tp.join()
return master_info, master_desc
@staticmethod
def _thread_load_matrices(m, file_list, sR, subsample=None):
"""
load numpy matrices from files in ``file_list``, concatenating them
vertically. If a list of row indices is provided in ``subsample`` we
subsample those rows out of the concatenated matrix. This matrix is then
inserted into ``m`` starting at row ``sR``.
"""
c = numpy.load(file_list[0])
for i in range(1, len(file_list)):
c = numpy.vstack((c, numpy.load(file_list[i])))
if subsample:
c = c[subsample, :]
m[sR:sR+c.shape[0], :c.shape[1]] = c
# Begin automatic class type creation
valid_descriptor_types = [
'rgbhistogram',
'opponenthistogram',
'huehistogram',
'nrghistogram',
'transformedcolorhistogram',
'colormoments',
'colormomentinvariants',
'sift',
'huesift',
'hsvsift',
'opponentsift',
'rgsift',
'csift',
'rgbsift',
]
def _create_image_descriptor_class(descriptor_type_str):
"""
Create and return a ColorDescriptor class that operates over Image files
using the given descriptor type.
"""
assert descriptor_type_str in valid_descriptor_types, \
"Given ColorDescriptor type was not valid! Given: %s. Expected one " \
"of: %s" % (descriptor_type_str, valid_descriptor_types)
# noinspection PyPep8Naming
class _cd_image_impl (ColorDescriptor_Image):
def descriptor_type(self):
"""
:rtype: str
"""
return descriptor_type_str
_cd_image_impl.__name__ = "ColorDescriptor_Image_%s" % descriptor_type_str
return _cd_image_impl
def _create_video_descriptor_class(descriptor_type_str):
"""
Create and return a ColorDescriptor class that operates over Video files
using the given descriptor type.
"""
assert descriptor_type_str in valid_descriptor_types, \
"Given ColorDescriptor type was not valid! Given: %s. Expected one " \
"of: %s" % (descriptor_type_str, valid_descriptor_types)
# noinspection PyPep8Naming
class _cd_video_impl (ColorDescriptor_Video):
def descriptor_type(self):
"""
:rtype: str
"""
return descriptor_type_str
_cd_video_impl.__name__ = "ColorDescriptor_Video_%s" % descriptor_type_str
return _cd_video_impl
# In order to allow multiprocessing, class types must be concretely assigned to
# variables in the module. Dynamic generation causes issues with pickling (the
# default data transmission protocol).
ColorDescriptor_Image_rgbhistogram = _create_image_descriptor_class('rgbhistogram')
ColorDescriptor_Image_opponenthistogram = _create_image_descriptor_class('opponenthistogram')
ColorDescriptor_Image_huehistogram = _create_image_descriptor_class('huehistogram')
ColorDescriptor_Image_nrghistogram = _create_image_descriptor_class('nrghistogram')
ColorDescriptor_Image_transformedcolorhistogram = _create_image_descriptor_class('transformedcolorhistogram')
ColorDescriptor_Image_colormoments = _create_image_descriptor_class('colormoments')
ColorDescriptor_Image_colormomentinvariants = _create_image_descriptor_class('colormomentinvariants')
ColorDescriptor_Image_sift = _create_image_descriptor_class('sift')
ColorDescriptor_Image_huesift = _create_image_descriptor_class('huesift')
ColorDescriptor_Image_hsvsift = _create_image_descriptor_class('hsvsift')
ColorDescriptor_Image_opponentsift = _create_image_descriptor_class('opponentsift')
ColorDescriptor_Image_rgsift = _create_image_descriptor_class('rgsift')
ColorDescriptor_Image_csift = _create_image_descriptor_class('csift')
ColorDescriptor_Image_rgbsift = _create_image_descriptor_class('rgbsift')
ColorDescriptor_Video_rgbhistogram = _create_video_descriptor_class('rgbhistogram')
ColorDescriptor_Video_opponenthistogram = _create_video_descriptor_class('opponenthistogram')
ColorDescriptor_Video_huehistogram = _create_video_descriptor_class('huehistogram')
ColorDescriptor_Video_nrghistogram = _create_video_descriptor_class('nrghistogram')
ColorDescriptor_Video_transformedcolorhistogram = _create_video_descriptor_class('transformedcolorhistogram')
ColorDescriptor_Video_colormoments = _create_video_descriptor_class('colormoments')
ColorDescriptor_Video_colormomentinvariants = _create_video_descriptor_class('colormomentinvariants')
ColorDescriptor_Video_sift = _create_video_descriptor_class('sift')
ColorDescriptor_Video_huesift = _create_video_descriptor_class('huesift')
ColorDescriptor_Video_hsvsift = _create_video_descriptor_class('hsvsift')
ColorDescriptor_Video_opponentsift = _create_video_descriptor_class('opponentsift')
ColorDescriptor_Video_rgsift = _create_video_descriptor_class('rgsift')
ColorDescriptor_Video_csift = _create_video_descriptor_class('csift')
ColorDescriptor_Video_rgbsift = _create_video_descriptor_class('rgbsift')
cd_type_list = [
ColorDescriptor_Image_rgbhistogram,
ColorDescriptor_Video_rgbhistogram,
ColorDescriptor_Image_opponenthistogram,
ColorDescriptor_Video_opponenthistogram,
ColorDescriptor_Image_huehistogram,
ColorDescriptor_Video_huehistogram,
ColorDescriptor_Image_nrghistogram,
ColorDescriptor_Video_nrghistogram,
ColorDescriptor_Image_transformedcolorhistogram,
ColorDescriptor_Video_transformedcolorhistogram,
ColorDescriptor_Image_colormoments,
ColorDescriptor_Video_colormoments,
ColorDescriptor_Image_colormomentinvariants,
ColorDescriptor_Video_colormomentinvariants,
ColorDescriptor_Image_sift,
ColorDescriptor_Video_sift,
ColorDescriptor_Image_huesift,
ColorDescriptor_Video_huesift,
ColorDescriptor_Image_hsvsift,
ColorDescriptor_Video_hsvsift,
ColorDescriptor_Image_opponentsift,
ColorDescriptor_Video_opponentsift,
ColorDescriptor_Image_rgsift,
ColorDescriptor_Video_rgsift,
ColorDescriptor_Image_csift,
ColorDescriptor_Video_csift,
ColorDescriptor_Image_rgbsift,
ColorDescriptor_Video_rgbsift,
]
| bsd-3-clause |
tapomayukh/projects_in_python | classification/Classification_with_HMM/Single_Contact_Classification/Variable_Stiffness_Variable_Velocity/HMM/with 1.2s/hmm_crossvalidation_force_motion_20_states_scaled_wrt_all_data.py | 1 | 41652 | # Hidden Markov Model Implementation
import pylab as pyl
import numpy as np
import matplotlib.pyplot as pp
#from enthought.mayavi import mlab
import scipy as scp
import scipy.ndimage as ni
import roslib; roslib.load_manifest('sandbox_tapo_darpa_m3')
import rospy
#import hrl_lib.mayavi2_util as mu
import hrl_lib.viz as hv
import hrl_lib.util as ut
import hrl_lib.matplotlib_util as mpu
import pickle
import ghmm
import sys
sys.path.insert(0, '/home/tapo/svn/robot1_data/usr/tapo/data_code/Classification/Data/Single_Contact_HMM/Variable_Stiffness_Variable_Velocity/')
from data_variable_hshv3 import Fmat_original_hshv
from data_variable_hslv3 import Fmat_original_hslv
from data_variable_lshv3 import Fmat_original_lshv
from data_variable_lslv3 import Fmat_original_lslv
# Scaling function
def scaling(mat):
Fvec_a = mat[0:121,0:]
Fvec_b = mat[121:242,0:]
Fvec_c = mat[242:363,0:]
# With Scaling
max_a = np.max(abs(Fvec_a))
min_a = np.min(abs(Fvec_a))
mean_a = np.mean(Fvec_a)
std_a = np.std(Fvec_a)
#Fvec_a = (Fvec_a)/max_a
#Fvec_a = (Fvec_a-mean_a)
#Fvec_a = (Fvec_a-mean_a)/max_a
Fvec_a = (Fvec_a-mean_a)/std_a
# With Scaling
max_b = np.max(abs(Fvec_b))
min_b = np.min(abs(Fvec_b))
mean_b = np.mean(Fvec_b)
std_b = np.std(Fvec_b)
#Fvec_b = (Fvec_b)/max_b
#Fvec_b = (Fvec_b-mean_b)
#Fvec_b = (Fvec_b-mean_b)/max_b
#Fvec_b = (Fvec_b-mean_b)/std_b
# With Scaling
max_c = np.max(abs(Fvec_c))
min_c = np.min(abs(Fvec_c))
mean_c = np.mean(Fvec_c)
std_c = np.std(Fvec_c)
#Fvec_c = (Fvec_c)/max_c
#Fvec_c = (Fvec_c-mean_c)
#Fvec_c = (Fvec_c-mean_c)/max_c
Fvec_c = (Fvec_c-mean_c)/std_c
#Fvec_c = Fvec_c*np.max((max_a,max_b))/max_c
Fvec = np.row_stack([Fvec_a,Fvec_b,Fvec_c])
n_Fvec, m_Fvec = np.shape(Fvec)
#print 'Feature_Vector_Shape:',n_Fvec, m_Fvec
return Fvec
# Returns mu,sigma for 20 hidden-states from feature-vectors(123,35) for RF,SF,RM,SM models
def feature_to_mu_cov(fvec1,fvec2):
index = 0
m,n = np.shape(fvec1)
#print m,n
mu_1 = np.zeros((20,1))
mu_2 = np.zeros((20,1))
cov = np.zeros((20,2,2))
DIVS = m/20
while (index < 20):
m_init = index*DIVS
temp_fvec1 = fvec1[(m_init):(m_init+DIVS),0:]
temp_fvec2 = fvec2[(m_init):(m_init+DIVS),0:]
temp_fvec1 = np.reshape(temp_fvec1,DIVS*n)
temp_fvec2 = np.reshape(temp_fvec2,DIVS*n)
mu_1[index] = np.mean(temp_fvec1)
mu_2[index] = np.mean(temp_fvec2)
cov[index,:,:] = np.cov(np.concatenate((temp_fvec1,temp_fvec2),axis=0))
if index == 0:
print 'mean = ', mu_2[index]
print 'mean = ', scp.mean(fvec2[(m_init):(m_init+DIVS),0:])
print np.shape(np.concatenate((temp_fvec1,temp_fvec2),axis=0))
print cov[index,:,:]
print scp.std(fvec2[(m_init):(m_init+DIVS),0:])
print scp.std(temp_fvec2)
index = index+1
return mu_1,mu_2,cov
if __name__ == '__main__':
# Scaling wrt all data
Fmat_rf_hshv = scaling(Fmat_original_hshv[:,0:15])
Fmat_rm_hshv = Fmat_original_hshv[:,15:15]
Fmat_sf_hshv = scaling(Fmat_original_hshv[:,15:26])
Fmat_sm_hshv = scaling(Fmat_original_hshv[:,26:27])
Fmat_hshv = np.matrix(np.column_stack((Fmat_rf_hshv,Fmat_rm_hshv,Fmat_sf_hshv,Fmat_sm_hshv)))
Fmat_rf_hslv = scaling(Fmat_original_hslv[:,0:15])
Fmat_rm_hslv = scaling(Fmat_original_hslv[:,15:30])
Fmat_sf_hslv = scaling(Fmat_original_hslv[:,30:45])
Fmat_sm_hslv = scaling(Fmat_original_hslv[:,45:52])
Fmat_hslv = np.matrix(np.column_stack((Fmat_rf_hslv,Fmat_rm_hslv,Fmat_sf_hslv,Fmat_sm_hslv)))
Fmat_rf_lshv = scaling(Fmat_original_lshv[:,0:15])
Fmat_rm_lshv = scaling(Fmat_original_lshv[:,15:16])
Fmat_sf_lshv = scaling(Fmat_original_lshv[:,16:22])
Fmat_sm_lshv = scaling(Fmat_original_lshv[:,22:28])
Fmat_lshv = np.matrix(np.column_stack((Fmat_rf_lshv,Fmat_rm_lshv,Fmat_sf_lshv,Fmat_sm_lshv)))
Fmat_rf_lslv = scaling(Fmat_original_lslv[:,0:15])
Fmat_rm_lslv = scaling(Fmat_original_lslv[:,15:28])
Fmat_sf_lslv = scaling(Fmat_original_lslv[:,28:36])
Fmat_sm_lslv = scaling(Fmat_original_lslv[:,36:42])
Fmat_lslv = np.matrix(np.column_stack((Fmat_rf_lslv,Fmat_rm_lslv,Fmat_sf_lslv,Fmat_sm_lslv)))
Fmat = np.matrix(np.column_stack((Fmat_hshv,Fmat_hslv,Fmat_lshv,Fmat_lslv)))
# HMM - Implementation:
F = ghmm.Float() # emission domain of this model
# A - Transition Matrix
A = [[0.1, 0.25, 0.15, 0.15, 0.1, 0.05, 0.05, 0.03, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01],
[0.0, 0.1, 0.25, 0.25, 0.2, 0.1, 0.05, 0.03, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01],
[0.0, 0.0, 0.1, 0.25, 0.25, 0.2, 0.05, 0.03, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01],
[0.0, 0.0, 0.0, 0.1, 0.3, 0.30, 0.20, 0.09, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01],
[0.0, 0.0, 0.0, 0.0, 0.1, 0.30, 0.30, 0.15, 0.04, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01],
[0.0, 0.0, 0.0, 0.0, 0.00, 0.1, 0.35, 0.30, 0.10, 0.05, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01],
[0.0, 0.0, 0.0, 0.0, 0.00, 0.00, 0.1, 0.30, 0.20, 0.10, 0.05, 0.05, 0.05, 0.03, 0.02, 0.02, 0.02, 0.02, 0.02, 0.02],
[0.0, 0.0, 0.0, 0.0, 0.00, 0.00, 0.00, 0.1, 0.30, 0.20, 0.10, 0.05, 0.05, 0.05, 0.05, 0.02, 0.02, 0.02, 0.02, 0.02],
[0.0, 0.0, 0.0, 0.0, 0.00, 0.00, 0.00, 0.00, 0.1, 0.30, 0.20, 0.15, 0.05, 0.05, 0.05, 0.02, 0.02, 0.02, 0.02, 0.02],
[0.0, 0.0, 0.0, 0.0, 0.00, 0.00, 0.00, 0.00, 0.00, 0.1, 0.30, 0.20, 0.15, 0.10, 0.05, 0.02, 0.02, 0.02, 0.02, 0.02],
[0.0, 0.0, 0.0, 0.0, 0.00, 0.00, 0.00, 0.00, 0.00, 0.0, 0.1, 0.30, 0.30, 0.10, 0.10, 0.02, 0.02, 0.02, 0.02, 0.02],
[0.0, 0.0, 0.0, 0.0, 0.00, 0.00, 0.00, 0.00, 0.00, 0.0, 0.0, 0.1, 0.40, 0.30, 0.10, 0.02, 0.02, 0.02, 0.02, 0.02],
[0.0, 0.0, 0.0, 0.0, 0.00, 0.00, 0.00, 0.00, 0.00, 0.0, 0.0, 0.0, 0.20, 0.40, 0.20, 0.10, 0.04, 0.02, 0.02, 0.02],
[0.0, 0.0, 0.0, 0.0, 0.00, 0.00, 0.00, 0.00, 0.00, 0.0, 0.0, 0.0, 0.00, 0.20, 0.40, 0.20, 0.10, 0.05, 0.03, 0.02],
[0.0, 0.0, 0.0, 0.0, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.0, 0.0, 0.0, 0.00, 0.20, 0.40, 0.20, 0.10, 0.05, 0.05],
[0.0, 0.0, 0.0, 0.0, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.0, 0.0, 0.0, 0.00, 0.00, 0.20, 0.40, 0.20, 0.10, 0.10],
[0.0, 0.0, 0.0, 0.0, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.0, 0.0, 0.0, 0.00, 0.00, 0.00, 0.20, 0.40, 0.20, 0.20],
[0.0, 0.0, 0.0, 0.0, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.0, 0.0, 0.0, 0.00, 0.00, 0.00, 0.00, 0.30, 0.50, 0.20],
[0.0, 0.0, 0.0, 0.0, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.0, 0.0, 0.0, 0.00, 0.00, 0.00, 0.00, 0.00, 0.40, 0.60],
[0.0, 0.0, 0.0, 0.0, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.0, 0.0, 0.0, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 1.00]]
# pi - initial probabilities per state
pi = [0.05] * 20
# Confusion Matrix
cmat = np.zeros((4,4))
#############################################################################################################################################
# HSHV as testing set and Rest as training set
# Checking the Data-Matrix
mu_rf_force_hshv,mu_rf_motion_hshv,cov_rf_hshv = feature_to_mu_cov((np.matrix(np.column_stack((Fmat_hslv[0:121,0:15], Fmat_lshv[0:121,0:15], Fmat_lslv[0:121,0:15])))), (np.matrix(np.column_stack((Fmat_hslv[242:363,0:15], Fmat_lshv[242:363,0:15], Fmat_lslv[242:363,0:15])))))
mu_rm_force_hshv,mu_rm_motion_hshv,cov_rm_hshv = feature_to_mu_cov((np.matrix(np.column_stack((Fmat_hslv[0:121,15:30], Fmat_lshv[0:121,15:16], Fmat_lslv[0:121,15:28])))), (np.matrix(np.column_stack((Fmat_hslv[242:363,15:30], Fmat_lshv[242:363,15:16], Fmat_lslv[242:363,15:28])))))
mu_sf_force_hshv,mu_sf_motion_hshv,cov_sf_hshv = feature_to_mu_cov((np.matrix(np.column_stack((Fmat_hslv[0:121,30:45], Fmat_lshv[0:121,16:22], Fmat_lslv[0:121,28:36])))), (np.matrix(np.column_stack((Fmat_hslv[242:363,30:45], Fmat_lshv[242:363,16:22], Fmat_lslv[242:363,28:36])))))
mu_sm_force_hshv,mu_sm_motion_hshv,cov_sm_hshv = feature_to_mu_cov((np.matrix(np.column_stack((Fmat_hslv[0:121,45:52], Fmat_lshv[0:121,22:28], Fmat_lslv[0:121,36:42])))), (np.matrix(np.column_stack((Fmat_hslv[242:363,45:52], Fmat_lshv[242:363,22:28], Fmat_lslv[242:363,36:42])))))
# B - Emission Matrix, parameters of emission distributions in pairs of (mu, sigma)
B_rf_hshv = [0.0]*20
B_rm_hshv = [0.0]*20
B_sf_hshv = [0.0]*20
B_sm_hshv = [0.0]*20
for num_states in range(20):
B_rf_hshv[num_states] = [[mu_rf_force_hshv[num_states][0],mu_rf_motion_hshv[num_states][0]],[cov_rf_hshv[num_states][0][0],cov_rf_hshv[num_states][0][1],cov_rf_hshv[num_states][1][0],cov_rf_hshv[num_states][1][1]]]
B_rm_hshv[num_states] = [[mu_rm_force_hshv[num_states][0],mu_rm_motion_hshv[num_states][0]],[cov_rm_hshv[num_states][0][0],cov_rm_hshv[num_states][0][1],cov_rm_hshv[num_states][1][0],cov_rm_hshv[num_states][1][1]]]
B_sf_hshv[num_states] = [[mu_sf_force_hshv[num_states][0],mu_sf_motion_hshv[num_states][0]],[cov_sf_hshv[num_states][0][0],cov_sf_hshv[num_states][0][1],cov_sf_hshv[num_states][1][0],cov_sf_hshv[num_states][1][1]]]
B_sm_hshv[num_states] = [[mu_sm_force_hshv[num_states][0],mu_sm_motion_hshv[num_states][0]],[cov_sm_hshv[num_states][0][0],cov_sm_hshv[num_states][0][1],cov_sm_hshv[num_states][1][0],cov_sm_hshv[num_states][1][1]]]
print cov_sm_hshv[num_states][0][0],cov_sm_hshv[num_states][0][1],cov_sm_hshv[num_states][1][0],cov_sm_hshv[num_states][1][1]
print "----"
#print B_sm_hshv
#print mu_sm_motion_hshv
# generate RF, RM, SF, SM models from parameters
model_rf_hshv = ghmm.HMMFromMatrices(F,ghmm.MultivariateGaussianDistribution(F), A, B_rf_hshv, pi) # Will be Trained
model_rm_hshv = ghmm.HMMFromMatrices(F,ghmm.MultivariateGaussianDistribution(F), A, B_rm_hshv, pi) # Will be Trained
model_sf_hshv = ghmm.HMMFromMatrices(F,ghmm.MultivariateGaussianDistribution(F), A, B_sf_hshv, pi) # Will be Trained
model_sm_hshv = ghmm.HMMFromMatrices(F,ghmm.MultivariateGaussianDistribution(F), A, B_sm_hshv, pi) # Will be Trained
# For Training
total_seq_rf_force_hshv = np.matrix(np.column_stack((Fmat_hslv[0:121,0:15], Fmat_lshv[0:121,0:15], Fmat_lslv[0:121,0:15])))
total_seq_rm_force_hshv = np.matrix(np.column_stack((Fmat_hslv[0:121,15:30], Fmat_lshv[0:121,15:16], Fmat_lslv[0:121,15:28])))
total_seq_sf_force_hshv = np.matrix(np.column_stack((Fmat_hslv[0:121,30:45], Fmat_lshv[0:121,16:22], Fmat_lslv[0:121,28:36])))
total_seq_sm_force_hshv = np.matrix(np.column_stack((Fmat_hslv[0:121,45:52], Fmat_lshv[0:121,22:28], Fmat_lslv[0:121,36:42])))
total_seq_rf_motion_hshv = np.matrix(np.column_stack((Fmat_hslv[242:363,0:15], Fmat_lshv[242:363,0:15], Fmat_lslv[242:363,0:15])))
total_seq_rm_motion_hshv = np.matrix(np.column_stack((Fmat_hslv[242:363,15:30], Fmat_lshv[242:363,15:16], Fmat_lslv[242:363,15:28])))
total_seq_sf_motion_hshv = np.matrix(np.column_stack((Fmat_hslv[242:363,30:45], Fmat_lshv[242:363,16:22], Fmat_lslv[242:363,28:36])))
total_seq_sm_motion_hshv = np.matrix(np.column_stack((Fmat_hslv[242:363,45:52], Fmat_lshv[242:363,22:28], Fmat_lslv[242:363,36:42])))
total_seq_rf_hshv = np.zeros((242,45))
total_seq_rm_hshv = np.zeros((242,29))
total_seq_sf_hshv = np.zeros((242,29))
total_seq_sm_hshv = np.zeros((242,19))
i = 0
j = 0
while i < 242:
total_seq_rf_hshv[i] = total_seq_rf_force_hshv[j]
total_seq_rf_hshv[i+1] = total_seq_rf_motion_hshv[j]
total_seq_rm_hshv[i] = total_seq_rm_force_hshv[j]
total_seq_rm_hshv[i+1] = total_seq_rm_motion_hshv[j]
total_seq_sf_hshv[i] = total_seq_sf_force_hshv[j]
total_seq_sf_hshv[i+1] = total_seq_sf_motion_hshv[j]
total_seq_sm_hshv[i] = total_seq_sm_force_hshv[j]
total_seq_sm_hshv[i+1] = total_seq_sm_motion_hshv[j]
j=j+1
i=i+2
train_seq_rf_hshv = (np.array(total_seq_rf_hshv).T).tolist()
train_seq_rm_hshv = (np.array(total_seq_rm_hshv).T).tolist()
train_seq_sf_hshv = (np.array(total_seq_sf_hshv).T).tolist()
train_seq_sm_hshv = (np.array(total_seq_sm_hshv).T).tolist()
#print train_seq_rf_hshv
final_ts_rf_hshv = ghmm.SequenceSet(F,train_seq_rf_hshv)
final_ts_rm_hshv = ghmm.SequenceSet(F,train_seq_rm_hshv)
final_ts_sf_hshv = ghmm.SequenceSet(F,train_seq_sf_hshv)
final_ts_sm_hshv = ghmm.SequenceSet(F,train_seq_sm_hshv)
model_rf_hshv.baumWelch(final_ts_rf_hshv)
model_rm_hshv.baumWelch(final_ts_rm_hshv)
model_sf_hshv.baumWelch(final_ts_sf_hshv)
model_sm_hshv.baumWelch(final_ts_sm_hshv)
# For Testing
total_seq_obj_hshv = np.zeros((242,27))
total_seq_obj_force_hshv = Fmat_hshv[0:121,:]
total_seq_obj_motion_hshv = Fmat_hshv[242:363,:]
i = 0
j = 0
while i < 242:
total_seq_obj_hshv[i] = total_seq_obj_force_hshv[j]
total_seq_obj_hshv[i+1] = total_seq_obj_motion_hshv[j]
j=j+1
i=i+2
rf_hshv = np.matrix(np.zeros(np.size(total_seq_obj_hshv,1)))
rm_hshv = np.matrix(np.zeros(np.size(total_seq_obj_hshv,1)))
sf_hshv = np.matrix(np.zeros(np.size(total_seq_obj_hshv,1)))
sm_hshv = np.matrix(np.zeros(np.size(total_seq_obj_hshv,1)))
k = 0
while (k < np.size(total_seq_obj_hshv,1)):
test_seq_obj_hshv = (np.array(total_seq_obj_hshv[:,k]).T).tolist()
new_test_seq_obj_hshv = np.array(test_seq_obj_hshv)
#print new_test_seq_obj_hshv
ts_obj_hshv = new_test_seq_obj_hshv
#print np.shape(ts_obj_hshv)
final_ts_obj_hshv = ghmm.EmissionSequence(F,ts_obj_hshv.tolist())
# Find Viterbi Path
path_rf_obj_hshv = model_rf_hshv.viterbi(final_ts_obj_hshv)
path_rm_obj_hshv = model_rm_hshv.viterbi(final_ts_obj_hshv)
path_sf_obj_hshv = model_sf_hshv.viterbi(final_ts_obj_hshv)
path_sm_obj_hshv = model_sm_hshv.viterbi(final_ts_obj_hshv)
obj_hshv = max(path_rf_obj_hshv[1],path_rm_obj_hshv[1],path_sf_obj_hshv[1],path_sm_obj_hshv[1])
if obj_hshv == path_rf_obj_hshv[1]:
rf_hshv[0,k] = 1
elif obj_hshv == path_rm_obj_hshv[1]:
rm_hshv[0,k] = 1
elif obj_hshv == path_sf_obj_hshv[1]:
sf_hshv[0,k] = 1
else:
sm_hshv[0,k] = 1
k = k+1
#print rf_hshv.T
cmat[0][0] = cmat[0][0] + np.sum(rf_hshv[0,0:15])
cmat[0][1] = cmat[0][1] + np.sum(rf_hshv[0,15:15])
cmat[0][2] = cmat[0][2] + np.sum(rf_hshv[0,15:26])
cmat[0][3] = cmat[0][3] + np.sum(rf_hshv[0,26:27])
cmat[1][0] = cmat[1][0] + np.sum(rm_hshv[0,0:15])
cmat[1][1] = cmat[1][1] + np.sum(rm_hshv[0,15:15])
cmat[1][2] = cmat[1][2] + np.sum(rm_hshv[0,15:26])
cmat[1][3] = cmat[1][3] + np.sum(rm_hshv[0,26:27])
cmat[2][0] = cmat[2][0] + np.sum(sf_hshv[0,0:15])
cmat[2][1] = cmat[2][1] + np.sum(sf_hshv[0,15:15])
cmat[2][2] = cmat[2][2] + np.sum(sf_hshv[0,15:26])
cmat[2][3] = cmat[2][3] + np.sum(sf_hshv[0,26:27])
cmat[3][0] = cmat[3][0] + np.sum(sm_hshv[0,0:15])
cmat[3][1] = cmat[3][1] + np.sum(sm_hshv[0,15:15])
cmat[3][2] = cmat[3][2] + np.sum(sm_hshv[0,15:26])
cmat[3][3] = cmat[3][3] + np.sum(sm_hshv[0,26:27])
#print cmat
#############################################################################################################################################
# HSLV as testing set and Rest as training set
mu_rf_force_hslv,mu_rf_motion_hslv,cov_rf_hslv = feature_to_mu_cov((np.matrix(np.column_stack((Fmat_hshv[0:121,0:15], Fmat_lshv[0:121,0:15], Fmat_lslv[0:121,0:15])))), (np.matrix(np.column_stack((Fmat_hshv[242:363,0:15], Fmat_lshv[242:363,0:15], Fmat_lslv[242:363,0:15])))))
mu_rm_force_hslv,mu_rm_motion_hslv,cov_rm_hslv = feature_to_mu_cov((np.matrix(np.column_stack((Fmat_hshv[0:121,15:15], Fmat_lshv[0:121,15:16], Fmat_lslv[0:121,15:28])))), (np.matrix(np.column_stack((Fmat_hshv[242:363,15:15], Fmat_lshv[242:363,15:16], Fmat_lslv[242:363,15:28])))))
mu_sf_force_hslv,mu_sf_motion_hslv,cov_sf_hslv = feature_to_mu_cov((np.matrix(np.column_stack((Fmat_hshv[0:121,15:26], Fmat_lshv[0:121,16:22], Fmat_lslv[0:121,28:36])))), (np.matrix(np.column_stack((Fmat_hshv[242:363,15:26], Fmat_lshv[242:363,16:22], Fmat_lslv[242:363,28:36])))))
mu_sm_force_hslv,mu_sm_motion_hslv,cov_sm_hslv = feature_to_mu_cov((np.matrix(np.column_stack((Fmat_hshv[0:121,26:27], Fmat_lshv[0:121,22:28], Fmat_lslv[0:121,36:42])))), (np.matrix(np.column_stack((Fmat_hshv[242:363,26:27], Fmat_lshv[242:363,22:28], Fmat_lslv[242:363,36:42])))))
# B - Emission Matrix, parameters of emission distributions in pairs of (mu, sigma)
B_rf_hslv = [0.0]*20
B_rm_hslv = [0.0]*20
B_sf_hslv = [0.0]*20
B_sm_hslv = [0.0]*20
for num_states in range(20):
B_rf_hslv[num_states] = [[mu_rf_force_hslv[num_states][0],mu_rf_motion_hslv[num_states][0]],[cov_rf_hslv[num_states][0][0],cov_rf_hslv[num_states][0][1],cov_rf_hslv[num_states][1][0],cov_rf_hslv[num_states][1][1]]]
B_rm_hslv[num_states] = [[mu_rm_force_hslv[num_states][0],mu_rm_motion_hslv[num_states][0]],[cov_rm_hslv[num_states][0][0],cov_rm_hslv[num_states][0][1],cov_rm_hslv[num_states][1][0],cov_rm_hslv[num_states][1][1]]]
B_sf_hslv[num_states] = [[mu_sf_force_hslv[num_states][0],mu_sf_motion_hslv[num_states][0]],[cov_sf_hslv[num_states][0][0],cov_sf_hslv[num_states][0][1],cov_sf_hslv[num_states][1][0],cov_sf_hslv[num_states][1][1]]]
B_sm_hslv[num_states] = [[mu_sm_force_hslv[num_states][0],mu_sm_motion_hslv[num_states][0]],[cov_sm_hslv[num_states][0][0],cov_sm_hslv[num_states][0][1],cov_sm_hslv[num_states][1][0],cov_sm_hslv[num_states][1][1]]]
print cov_sm_hslv[num_states][0][0],cov_sm_hslv[num_states][0][1],cov_sm_hslv[num_states][1][0],cov_sm_hslv[num_states][1][1]
print "----"
#print B_sm_hslv
#print mu_sm_motion_hslv
# generate RF, RM, SF, SM models from parameters
model_rf_hslv = ghmm.HMMFromMatrices(F,ghmm.MultivariateGaussianDistribution(F), A, B_rf_hslv, pi) # Will be Trained
model_rm_hslv = ghmm.HMMFromMatrices(F,ghmm.MultivariateGaussianDistribution(F), A, B_rm_hslv, pi) # Will be Trained
model_sf_hslv = ghmm.HMMFromMatrices(F,ghmm.MultivariateGaussianDistribution(F), A, B_sf_hslv, pi) # Will be Trained
model_sm_hslv = ghmm.HMMFromMatrices(F,ghmm.MultivariateGaussianDistribution(F), A, B_sm_hslv, pi) # Will be Trained
# For Training
total_seq_rf_force_hslv = np.matrix(np.column_stack((Fmat_hshv[0:121,0:15], Fmat_lshv[0:121,0:15], Fmat_lslv[0:121,0:15])))
total_seq_rm_force_hslv = np.matrix(np.column_stack((Fmat_hshv[0:121,15:15], Fmat_lshv[0:121,15:16], Fmat_lslv[0:121,15:28])))
total_seq_sf_force_hslv = np.matrix(np.column_stack((Fmat_hshv[0:121,15:26], Fmat_lshv[0:121,16:22], Fmat_lslv[0:121,28:36])))
total_seq_sm_force_hslv = np.matrix(np.column_stack((Fmat_hshv[0:121,26:27], Fmat_lshv[0:121,22:28], Fmat_lslv[0:121,36:42])))
total_seq_rf_motion_hslv = np.matrix(np.column_stack((Fmat_hshv[242:363,0:15], Fmat_lshv[242:363,0:15], Fmat_lslv[242:363,0:15])))
total_seq_rm_motion_hslv = np.matrix(np.column_stack((Fmat_hshv[242:363,15:15], Fmat_lshv[242:363,15:16], Fmat_lslv[242:363,15:28])))
total_seq_sf_motion_hslv = np.matrix(np.column_stack((Fmat_hshv[242:363,15:26], Fmat_lshv[242:363,16:22], Fmat_lslv[242:363,28:36])))
total_seq_sm_motion_hslv = np.matrix(np.column_stack((Fmat_hshv[242:363,26:27], Fmat_lshv[242:363,22:28], Fmat_lslv[242:363,36:42])))
total_seq_rf_hslv = np.zeros((242,45))
total_seq_rm_hslv = np.zeros((242,14))
total_seq_sf_hslv = np.zeros((242,25))
total_seq_sm_hslv = np.zeros((242,13))
i = 0
j = 0
while i < 242:
total_seq_rf_hslv[i] = total_seq_rf_force_hslv[j]
total_seq_rf_hslv[i+1] = total_seq_rf_motion_hslv[j]
total_seq_rm_hslv[i] = total_seq_rm_force_hslv[j]
total_seq_rm_hslv[i+1] = total_seq_rm_motion_hslv[j]
total_seq_sf_hslv[i] = total_seq_sf_force_hslv[j]
total_seq_sf_hslv[i+1] = total_seq_sf_motion_hslv[j]
total_seq_sm_hslv[i] = total_seq_sm_force_hslv[j]
total_seq_sm_hslv[i+1] = total_seq_sm_motion_hslv[j]
j=j+1
i=i+2
train_seq_rf_hslv = (np.array(total_seq_rf_hslv).T).tolist()
train_seq_rm_hslv = (np.array(total_seq_rm_hslv).T).tolist()
train_seq_sf_hslv = (np.array(total_seq_sf_hslv).T).tolist()
train_seq_sm_hslv = (np.array(total_seq_sm_hslv).T).tolist()
#print train_seq_rf_hslv
final_ts_rf_hslv = ghmm.SequenceSet(F,train_seq_rf_hslv)
final_ts_rm_hslv = ghmm.SequenceSet(F,train_seq_rm_hslv)
final_ts_sf_hslv = ghmm.SequenceSet(F,train_seq_sf_hslv)
final_ts_sm_hslv = ghmm.SequenceSet(F,train_seq_sm_hslv)
model_rf_hslv.baumWelch(final_ts_rf_hslv)
model_rm_hslv.baumWelch(final_ts_rm_hslv)
model_sf_hslv.baumWelch(final_ts_sf_hslv)
model_sm_hslv.baumWelch(final_ts_sm_hslv)
# For Testing
total_seq_obj_hslv = np.zeros((242,52))
total_seq_obj_force_hslv = Fmat_hslv[0:121,:]
total_seq_obj_motion_hslv = Fmat_hslv[242:363,:]
i = 0
j = 0
while i < 242:
total_seq_obj_hslv[i] = total_seq_obj_force_hslv[j]
total_seq_obj_hslv[i+1] = total_seq_obj_motion_hslv[j]
j=j+1
i=i+2
rf_hslv = np.matrix(np.zeros(np.size(total_seq_obj_hslv,1)))
rm_hslv = np.matrix(np.zeros(np.size(total_seq_obj_hslv,1)))
sf_hslv = np.matrix(np.zeros(np.size(total_seq_obj_hslv,1)))
sm_hslv = np.matrix(np.zeros(np.size(total_seq_obj_hslv,1)))
k = 0
while (k < np.size(total_seq_obj_hslv,1)):
test_seq_obj_hslv = (np.array(total_seq_obj_hslv[:,k]).T).tolist()
new_test_seq_obj_hslv = np.array(test_seq_obj_hslv)
#print new_test_seq_obj_hslv
ts_obj_hslv = new_test_seq_obj_hslv
#print np.shape(ts_obj_hslv)
final_ts_obj_hslv = ghmm.EmissionSequence(F,ts_obj_hslv.tolist())
# Find Viterbi Path
path_rf_obj_hslv = model_rf_hslv.viterbi(final_ts_obj_hslv)
path_rm_obj_hslv = model_rm_hslv.viterbi(final_ts_obj_hslv)
path_sf_obj_hslv = model_sf_hslv.viterbi(final_ts_obj_hslv)
path_sm_obj_hslv = model_sm_hslv.viterbi(final_ts_obj_hslv)
obj_hslv = max(path_rf_obj_hslv[1],path_rm_obj_hslv[1],path_sf_obj_hslv[1],path_sm_obj_hslv[1])
if obj_hslv == path_rf_obj_hslv[1]:
rf_hslv[0,k] = 1
elif obj_hslv == path_rm_obj_hslv[1]:
rm_hslv[0,k] = 1
elif obj_hslv == path_sf_obj_hslv[1]:
sf_hslv[0,k] = 1
else:
sm_hslv[0,k] = 1
k = k+1
#print rf_hshv.T
cmat[0][0] = cmat[0][0] + np.sum(rf_hslv[0,0:15])
cmat[0][1] = cmat[0][1] + np.sum(rf_hslv[0,15:30])
cmat[0][2] = cmat[0][2] + np.sum(rf_hslv[0,30:45])
cmat[0][3] = cmat[0][3] + np.sum(rf_hslv[0,45:52])
cmat[1][0] = cmat[1][0] + np.sum(rm_hslv[0,0:15])
cmat[1][1] = cmat[1][1] + np.sum(rm_hslv[0,15:30])
cmat[1][2] = cmat[1][2] + np.sum(rm_hslv[0,30:45])
cmat[1][3] = cmat[1][3] + np.sum(rm_hslv[0,45:52])
cmat[2][0] = cmat[2][0] + np.sum(sf_hslv[0,0:15])
cmat[2][1] = cmat[2][1] + np.sum(sf_hslv[0,15:30])
cmat[2][2] = cmat[2][2] + np.sum(sf_hslv[0,30:45])
cmat[2][3] = cmat[2][3] + np.sum(sf_hslv[0,45:52])
cmat[3][0] = cmat[3][0] + np.sum(sm_hslv[0,0:15])
cmat[3][1] = cmat[3][1] + np.sum(sm_hslv[0,15:30])
cmat[3][2] = cmat[3][2] + np.sum(sm_hslv[0,30:45])
cmat[3][3] = cmat[3][3] + np.sum(sm_hslv[0,45:52])
#print cmat
############################################################################################################################################
# LSHV as testing set and Rest as training set
mu_rf_force_lshv,mu_rf_motion_lshv,cov_rf_lshv = feature_to_mu_cov((np.matrix(np.column_stack((Fmat_hshv[0:121,0:15], Fmat_hslv[0:121,0:15], Fmat_lslv[0:121,0:15])))), (np.matrix(np.column_stack((Fmat_hshv[242:363,0:15], Fmat_hslv[242:363,0:15], Fmat_lslv[242:363,0:15])))))
mu_rm_force_lshv,mu_rm_motion_lshv,cov_rm_lshv = feature_to_mu_cov((np.matrix(np.column_stack((Fmat_hshv[0:121,15:15], Fmat_hslv[0:121,15:30], Fmat_lslv[0:121,15:28])))), (np.matrix(np.column_stack((Fmat_hshv[242:363,15:15], Fmat_hslv[242:363,15:30], Fmat_lslv[242:363,15:28])))))
mu_sf_force_lshv,mu_sf_motion_lshv,cov_sf_lshv = feature_to_mu_cov((np.matrix(np.column_stack((Fmat_hshv[0:121,15:26], Fmat_hslv[0:121,30:45], Fmat_lslv[0:121,28:36])))), (np.matrix(np.column_stack((Fmat_hshv[242:363,15:26], Fmat_hslv[242:363,30:45], Fmat_lslv[242:363,28:36])))))
mu_sm_force_lshv,mu_sm_motion_lshv,cov_sm_lshv = feature_to_mu_cov((np.matrix(np.column_stack((Fmat_hshv[0:121,26:27], Fmat_hslv[0:121,45:52], Fmat_lslv[0:121,36:42])))), (np.matrix(np.column_stack((Fmat_hshv[242:363,26:27], Fmat_hslv[242:363,45:52], Fmat_lslv[242:363,36:42])))))
# B - Emission Matrix, parameters of emission distributions in pairs of (mu, sigma)
B_rf_lshv = [0.0]*20
B_rm_lshv = [0.0]*20
B_sf_lshv = [0.0]*20
B_sm_lshv = [0.0]*20
for num_states in range(20):
B_rf_lshv[num_states] = [[mu_rf_force_lshv[num_states][0],mu_rf_motion_lshv[num_states][0]],[cov_rf_lshv[num_states][0][0],cov_rf_lshv[num_states][0][1],cov_rf_lshv[num_states][1][0],cov_rf_lshv[num_states][1][1]]]
B_rm_lshv[num_states] = [[mu_rm_force_lshv[num_states][0],mu_rm_motion_lshv[num_states][0]],[cov_rm_lshv[num_states][0][0],cov_rm_lshv[num_states][0][1],cov_rm_lshv[num_states][1][0],cov_rm_lshv[num_states][1][1]]]
B_sf_lshv[num_states] = [[mu_sf_force_lshv[num_states][0],mu_sf_motion_lshv[num_states][0]],[cov_sf_lshv[num_states][0][0],cov_sf_lshv[num_states][0][1],cov_sf_lshv[num_states][1][0],cov_sf_lshv[num_states][1][1]]]
B_sm_lshv[num_states] = [[mu_sm_force_lshv[num_states][0],mu_sm_motion_lshv[num_states][0]],[cov_sm_lshv[num_states][0][0],cov_sm_lshv[num_states][0][1],cov_sm_lshv[num_states][1][0],cov_sm_lshv[num_states][1][1]]]
print cov_sm_lshv[num_states][0][0],cov_sm_lshv[num_states][0][1],cov_sm_lshv[num_states][1][0],cov_sm_lshv[num_states][1][1]
print "----"
#print B_sm_lshv
#print mu_sm_motion_lshv
# generate RF, RM, SF, SM models from parameters
model_rf_lshv = ghmm.HMMFromMatrices(F,ghmm.MultivariateGaussianDistribution(F), A, B_rf_lshv, pi) # Will be Trained
model_rm_lshv = ghmm.HMMFromMatrices(F,ghmm.MultivariateGaussianDistribution(F), A, B_rm_lshv, pi) # Will be Trained
model_sf_lshv = ghmm.HMMFromMatrices(F,ghmm.MultivariateGaussianDistribution(F), A, B_sf_lshv, pi) # Will be Trained
model_sm_lshv = ghmm.HMMFromMatrices(F,ghmm.MultivariateGaussianDistribution(F), A, B_sm_lshv, pi) # Will be Trained
# For Training
total_seq_rf_force_lshv = np.matrix(np.column_stack((Fmat_hshv[0:121,0:15], Fmat_hslv[0:121,0:15], Fmat_lslv[0:121,0:15])))
total_seq_rm_force_lshv = np.matrix(np.column_stack((Fmat_hshv[0:121,15:15], Fmat_hslv[0:121,15:30], Fmat_lslv[0:121,15:28])))
total_seq_sf_force_lshv = np.matrix(np.column_stack((Fmat_hshv[0:121,15:26], Fmat_hslv[0:121,30:45], Fmat_lslv[0:121,28:36])))
total_seq_sm_force_lshv = np.matrix(np.column_stack((Fmat_hshv[0:121,26:27], Fmat_hslv[0:121,45:52], Fmat_lslv[0:121,36:42])))
total_seq_rf_motion_lshv = np.matrix(np.column_stack((Fmat_hshv[242:363,0:15], Fmat_hslv[242:363,0:15], Fmat_lslv[242:363,0:15])))
total_seq_rm_motion_lshv = np.matrix(np.column_stack((Fmat_hshv[242:363,15:15], Fmat_hslv[242:363,15:30], Fmat_lslv[242:363,15:28])))
total_seq_sf_motion_lshv = np.matrix(np.column_stack((Fmat_hshv[242:363,15:26], Fmat_hslv[242:363,30:45], Fmat_lslv[242:363,28:36])))
total_seq_sm_motion_lshv = np.matrix(np.column_stack((Fmat_hshv[242:363,26:27], Fmat_hslv[242:363,45:52], Fmat_lslv[242:363,36:42])))
total_seq_rf_lshv = np.zeros((242,45))
total_seq_rm_lshv = np.zeros((242,28))
total_seq_sf_lshv = np.zeros((242,34))
total_seq_sm_lshv = np.zeros((242,14))
i = 0
j = 0
while i < 242:
total_seq_rf_lshv[i] = total_seq_rf_force_lshv[j]
total_seq_rf_lshv[i+1] = total_seq_rf_motion_lshv[j]
total_seq_rm_lshv[i] = total_seq_rm_force_lshv[j]
total_seq_rm_lshv[i+1] = total_seq_rm_motion_lshv[j]
total_seq_sf_lshv[i] = total_seq_sf_force_lshv[j]
total_seq_sf_lshv[i+1] = total_seq_sf_motion_lshv[j]
total_seq_sm_lshv[i] = total_seq_sm_force_lshv[j]
total_seq_sm_lshv[i+1] = total_seq_sm_motion_lshv[j]
j=j+1
i=i+2
train_seq_rf_lshv = (np.array(total_seq_rf_lshv).T).tolist()
train_seq_rm_lshv = (np.array(total_seq_rm_lshv).T).tolist()
train_seq_sf_lshv = (np.array(total_seq_sf_lshv).T).tolist()
train_seq_sm_lshv = (np.array(total_seq_sm_lshv).T).tolist()
#print train_seq_rf_lshv
final_ts_rf_lshv = ghmm.SequenceSet(F,train_seq_rf_lshv)
final_ts_rm_lshv = ghmm.SequenceSet(F,train_seq_rm_lshv)
final_ts_sf_lshv = ghmm.SequenceSet(F,train_seq_sf_lshv)
final_ts_sm_lshv = ghmm.SequenceSet(F,train_seq_sm_lshv)
model_rf_lshv.baumWelch(final_ts_rf_lshv)
model_rm_lshv.baumWelch(final_ts_rm_lshv)
model_sf_lshv.baumWelch(final_ts_sf_lshv)
model_sm_lshv.baumWelch(final_ts_sm_lshv)
# For Testing
total_seq_obj_lshv = np.zeros((242,28))
total_seq_obj_force_lshv = Fmat_lshv[0:121,:]
total_seq_obj_motion_lshv = Fmat_lshv[242:363,:]
i = 0
j = 0
while i < 242:
total_seq_obj_lshv[i] = total_seq_obj_force_lshv[j]
total_seq_obj_lshv[i+1] = total_seq_obj_motion_lshv[j]
j=j+1
i=i+2
rf_lshv = np.matrix(np.zeros(np.size(total_seq_obj_lshv,1)))
rm_lshv = np.matrix(np.zeros(np.size(total_seq_obj_lshv,1)))
sf_lshv = np.matrix(np.zeros(np.size(total_seq_obj_lshv,1)))
sm_lshv = np.matrix(np.zeros(np.size(total_seq_obj_lshv,1)))
k = 0
while (k < np.size(total_seq_obj_lshv,1)):
test_seq_obj_lshv = (np.array(total_seq_obj_lshv[:,k]).T).tolist()
new_test_seq_obj_lshv = np.array(test_seq_obj_lshv)
#print new_test_seq_obj_lshv
ts_obj_lshv = new_test_seq_obj_lshv
#print np.shape(ts_obj_lshv)
final_ts_obj_lshv = ghmm.EmissionSequence(F,ts_obj_lshv.tolist())
# Find Viterbi Path
path_rf_obj_lshv = model_rf_lshv.viterbi(final_ts_obj_lshv)
path_rm_obj_lshv = model_rm_lshv.viterbi(final_ts_obj_lshv)
path_sf_obj_lshv = model_sf_lshv.viterbi(final_ts_obj_lshv)
path_sm_obj_lshv = model_sm_lshv.viterbi(final_ts_obj_lshv)
obj_lshv = max(path_rf_obj_lshv[1],path_rm_obj_lshv[1],path_sf_obj_lshv[1],path_sm_obj_lshv[1])
if obj_lshv == path_rf_obj_lshv[1]:
rf_lshv[0,k] = 1
elif obj_lshv == path_rm_obj_lshv[1]:
rm_lshv[0,k] = 1
elif obj_lshv == path_sf_obj_lshv[1]:
sf_lshv[0,k] = 1
else:
sm_lshv[0,k] = 1
k = k+1
#print rf_lshv.T
cmat[0][0] = cmat[0][0] + np.sum(rf_lshv[0,0:15])
cmat[0][1] = cmat[0][1] + np.sum(rf_lshv[0,15:16])
cmat[0][2] = cmat[0][2] + np.sum(rf_lshv[0,16:22])
cmat[0][3] = cmat[0][3] + np.sum(rf_lshv[0,22:28])
cmat[1][0] = cmat[1][0] + np.sum(rm_lshv[0,0:15])
cmat[1][1] = cmat[1][1] + np.sum(rm_lshv[0,15:16])
cmat[1][2] = cmat[1][2] + np.sum(rm_lshv[0,16:22])
cmat[1][3] = cmat[1][3] + np.sum(rm_lshv[0,22:28])
cmat[2][0] = cmat[2][0] + np.sum(sf_lshv[0,0:15])
cmat[2][1] = cmat[2][1] + np.sum(sf_lshv[0,15:16])
cmat[2][2] = cmat[2][2] + np.sum(sf_lshv[0,16:22])
cmat[2][3] = cmat[2][3] + np.sum(sf_lshv[0,22:28])
cmat[3][0] = cmat[3][0] + np.sum(sm_lshv[0,0:15])
cmat[3][1] = cmat[3][1] + np.sum(sm_lshv[0,15:16])
cmat[3][2] = cmat[3][2] + np.sum(sm_lshv[0,16:22])
cmat[3][3] = cmat[3][3] + np.sum(sm_lshv[0,22:28])
#print cmat
#############################################################################################################################################
# LSLV as testing set and Rest as training set
mu_rf_force_lslv,mu_rf_motion_lslv,cov_rf_lslv = feature_to_mu_cov((np.matrix(np.column_stack((Fmat_hshv[0:121,0:15], Fmat_hslv[0:121,0:15], Fmat_lshv[0:121,0:15])))), (np.matrix(np.column_stack((Fmat_hshv[242:363,0:15], Fmat_hslv[242:363,0:15], Fmat_lshv[242:363,0:15])))))
mu_rm_force_lslv,mu_rm_motion_lslv,cov_rm_lslv = feature_to_mu_cov((np.matrix(np.column_stack((Fmat_hshv[0:121,15:15], Fmat_hslv[0:121,15:30], Fmat_lshv[0:121,15:16])))), (np.matrix(np.column_stack((Fmat_hshv[242:363,15:15], Fmat_hslv[242:363,15:30], Fmat_lshv[242:363,15:16])))))
mu_sf_force_lslv,mu_sf_motion_lslv,cov_sf_lslv = feature_to_mu_cov((np.matrix(np.column_stack((Fmat_hshv[0:121,15:26], Fmat_hslv[0:121,30:45], Fmat_lshv[0:121,16:22])))), (np.matrix(np.column_stack((Fmat_hshv[242:363,15:26], Fmat_hslv[242:363,30:45], Fmat_lshv[242:363,16:22])))))
mu_sm_force_lslv,mu_sm_motion_lslv,cov_sm_lslv = feature_to_mu_cov((np.matrix(np.column_stack((Fmat_hshv[0:121,26:27], Fmat_hslv[0:121,45:52], Fmat_lshv[0:121,22:28])))), (np.matrix(np.column_stack((Fmat_hshv[242:363,26:27], Fmat_hslv[242:363,45:52], Fmat_lshv[242:363,22:28])))))
# B - Emission Matrix, parameters of emission distributions in pairs of (mu, sigma)
B_rf_lslv = [0.0]*20
B_rm_lslv = [0.0]*20
B_sf_lslv = [0.0]*20
B_sm_lslv = [0.0]*20
for num_states in range(20):
B_rf_lslv[num_states] = [[mu_rf_force_lslv[num_states][0],mu_rf_motion_lslv[num_states][0]],[cov_rf_lslv[num_states][0][0],cov_rf_lslv[num_states][0][1],cov_rf_lslv[num_states][1][0],cov_rf_lslv[num_states][1][1]]]
B_rm_lslv[num_states] = [[mu_rm_force_lslv[num_states][0],mu_rm_motion_lslv[num_states][0]],[cov_rm_lslv[num_states][0][0],cov_rm_lslv[num_states][0][1],cov_rm_lslv[num_states][1][0],cov_rm_lslv[num_states][1][1]]]
B_sf_lslv[num_states] = [[mu_sf_force_lslv[num_states][0],mu_sf_motion_lslv[num_states][0]],[cov_sf_lslv[num_states][0][0],cov_sf_lslv[num_states][0][1],cov_sf_lslv[num_states][1][0],cov_sf_lslv[num_states][1][1]]]
B_sm_lslv[num_states] = [[mu_sm_force_lslv[num_states][0],mu_sm_motion_lslv[num_states][0]],[cov_sm_lslv[num_states][0][0],cov_sm_lslv[num_states][0][1],cov_sm_lslv[num_states][1][0],cov_sm_lslv[num_states][1][1]]]
print cov_sm_lslv[num_states][0][0],cov_sm_lslv[num_states][0][1],cov_sm_lslv[num_states][1][0],cov_sm_lslv[num_states][1][1]
print "----"
#print B_sm_lslv
#print mu_sm_motion_lslv
# generate RF, RM, SF, SM models from parameters
model_rf_lslv = ghmm.HMMFromMatrices(F,ghmm.MultivariateGaussianDistribution(F), A, B_rf_lslv, pi) # Will be Trained
model_rm_lslv = ghmm.HMMFromMatrices(F,ghmm.MultivariateGaussianDistribution(F), A, B_rm_lslv, pi) # Will be Trained
model_sf_lslv = ghmm.HMMFromMatrices(F,ghmm.MultivariateGaussianDistribution(F), A, B_sf_lslv, pi) # Will be Trained
model_sm_lslv = ghmm.HMMFromMatrices(F,ghmm.MultivariateGaussianDistribution(F), A, B_sm_lslv, pi) # Will be Trained
# For Training
total_seq_rf_force_lslv = np.matrix(np.column_stack((Fmat_hshv[0:121,0:15], Fmat_hslv[0:121,0:15], Fmat_lshv[0:121,0:15])))
total_seq_rm_force_lslv = np.matrix(np.column_stack((Fmat_hshv[0:121,15:15], Fmat_hslv[0:121,15:30], Fmat_lshv[0:121,15:16])))
total_seq_sf_force_lslv = np.matrix(np.column_stack((Fmat_hshv[0:121,15:26], Fmat_hslv[0:121,30:45], Fmat_lshv[0:121,16:22])))
total_seq_sm_force_lslv = np.matrix(np.column_stack((Fmat_hshv[0:121,26:27], Fmat_hslv[0:121,45:52], Fmat_lshv[0:121,22:28])))
total_seq_rf_motion_lslv = np.matrix(np.column_stack((Fmat_hshv[242:363,0:15], Fmat_hslv[242:363,0:15], Fmat_lshv[242:363,0:15])))
total_seq_rm_motion_lslv = np.matrix(np.column_stack((Fmat_hshv[242:363,15:15], Fmat_hslv[242:363,15:30], Fmat_lshv[242:363,15:16])))
total_seq_sf_motion_lslv = np.matrix(np.column_stack((Fmat_hshv[242:363,15:26], Fmat_hslv[242:363,30:45], Fmat_lshv[242:363,16:22])))
total_seq_sm_motion_lslv = np.matrix(np.column_stack((Fmat_hshv[242:363,26:27], Fmat_hslv[242:363,45:52], Fmat_lshv[242:363,22:28])))
total_seq_rf_lslv = np.zeros((242,45))
total_seq_rm_lslv = np.zeros((242,16))
total_seq_sf_lslv = np.zeros((242,32))
total_seq_sm_lslv = np.zeros((242,14))
i = 0
j = 0
while i < 242:
total_seq_rf_lslv[i] = total_seq_rf_force_lslv[j]
total_seq_rf_lslv[i+1] = total_seq_rf_motion_lslv[j]
total_seq_rm_lslv[i] = total_seq_rm_force_lslv[j]
total_seq_rm_lslv[i+1] = total_seq_rm_motion_lslv[j]
total_seq_sf_lslv[i] = total_seq_sf_force_lslv[j]
total_seq_sf_lslv[i+1] = total_seq_sf_motion_lslv[j]
total_seq_sm_lslv[i] = total_seq_sm_force_lslv[j]
total_seq_sm_lslv[i+1] = total_seq_sm_motion_lslv[j]
j=j+1
i=i+2
train_seq_rf_lslv = (np.array(total_seq_rf_lslv).T).tolist()
train_seq_rm_lslv = (np.array(total_seq_rm_lslv).T).tolist()
train_seq_sf_lslv = (np.array(total_seq_sf_lslv).T).tolist()
train_seq_sm_lslv = (np.array(total_seq_sm_lslv).T).tolist()
#print train_seq_rf_lslv
final_ts_rf_lslv = ghmm.SequenceSet(F,train_seq_rf_lslv)
final_ts_rm_lslv = ghmm.SequenceSet(F,train_seq_rm_lslv)
final_ts_sf_lslv = ghmm.SequenceSet(F,train_seq_sf_lslv)
final_ts_sm_lslv = ghmm.SequenceSet(F,train_seq_sm_lslv)
model_rf_lslv.baumWelch(final_ts_rf_lslv)
model_rm_lslv.baumWelch(final_ts_rm_lslv)
model_sf_lslv.baumWelch(final_ts_sf_lslv)
model_sm_lslv.baumWelch(final_ts_sm_lslv)
# For Testing
total_seq_obj_lslv = np.zeros((242,42))
total_seq_obj_force_lslv = Fmat_lslv[0:121,:]
total_seq_obj_motion_lslv = Fmat_lslv[242:363,:]
i = 0
j = 0
while i < 242:
total_seq_obj_lslv[i] = total_seq_obj_force_lslv[j]
total_seq_obj_lslv[i+1] = total_seq_obj_motion_lslv[j]
j=j+1
i=i+2
rf_lslv = np.matrix(np.zeros(np.size(total_seq_obj_lslv,1)))
rm_lslv = np.matrix(np.zeros(np.size(total_seq_obj_lslv,1)))
sf_lslv = np.matrix(np.zeros(np.size(total_seq_obj_lslv,1)))
sm_lslv = np.matrix(np.zeros(np.size(total_seq_obj_lslv,1)))
k = 0
while (k < np.size(total_seq_obj_lslv,1)):
test_seq_obj_lslv = (np.array(total_seq_obj_lslv[:,k]).T).tolist()
new_test_seq_obj_lslv = np.array(test_seq_obj_lslv)
#print new_test_seq_obj_lslv
ts_obj_lslv = new_test_seq_obj_lslv
#print np.shape(ts_obj_lslv)
# Find Viterbi Path
final_ts_obj_lslv = ghmm.EmissionSequence(F,ts_obj_lslv.tolist())
path_rf_obj_lslv = model_rf_lslv.viterbi(final_ts_obj_lslv)
path_rm_obj_lslv = model_rm_lslv.viterbi(final_ts_obj_lslv)
path_sf_obj_lslv = model_sf_lslv.viterbi(final_ts_obj_lslv)
path_sm_obj_lslv = model_sm_lslv.viterbi(final_ts_obj_lslv)
obj_lslv = max(path_rf_obj_lslv[1],path_rm_obj_lslv[1],path_sf_obj_lslv[1],path_sm_obj_lslv[1])
if obj_lslv == path_rf_obj_lslv[1]:
rf_lslv[0,k] = 1
elif obj_lslv == path_rm_obj_lslv[1]:
rm_lslv[0,k] = 1
elif obj_lslv == path_sf_obj_lslv[1]:
sf_lslv[0,k] = 1
else:
sm_lslv[0,k] = 1
k = k+1
#print rf_lslv.T
cmat[0][0] = cmat[0][0] + np.sum(rf_lslv[0,0:15])
cmat[0][1] = cmat[0][1] + np.sum(rf_lslv[0,15:28])
cmat[0][2] = cmat[0][2] + np.sum(rf_lslv[0,28:36])
cmat[0][3] = cmat[0][3] + np.sum(rf_lslv[0,36:42])
cmat[1][0] = cmat[1][0] + np.sum(rm_lslv[0,0:15])
cmat[1][1] = cmat[1][1] + np.sum(rm_lslv[0,15:28])
cmat[1][2] = cmat[1][2] + np.sum(rm_lslv[0,28:36])
cmat[1][3] = cmat[1][3] + np.sum(rm_lslv[0,36:42])
cmat[2][0] = cmat[2][0] + np.sum(sf_lslv[0,0:15])
cmat[2][1] = cmat[2][1] + np.sum(sf_lslv[0,15:28])
cmat[2][2] = cmat[2][2] + np.sum(sf_lslv[0,28:36])
cmat[2][3] = cmat[2][3] + np.sum(sf_lslv[0,36:42])
cmat[3][0] = cmat[3][0] + np.sum(sm_lslv[0,0:15])
cmat[3][1] = cmat[3][1] + np.sum(sm_lslv[0,15:28])
cmat[3][2] = cmat[3][2] + np.sum(sm_lslv[0,28:36])
cmat[3][3] = cmat[3][3] + np.sum(sm_lslv[0,36:42])
#print cmat
############################################################################################################################################
# Plot Confusion Matrix
# Plot Confusion Matrix
Nlabels = 4
fig = pp.figure()
ax = fig.add_subplot(111)
figplot = ax.matshow(cmat, interpolation = 'nearest', origin = 'upper', extent=[0, Nlabels, 0, Nlabels])
ax.set_title('Performance of HMM Models')
pp.xlabel("Targets")
pp.ylabel("Predictions")
ax.set_xticks([0.5,1.5,2.5,3.5])
ax.set_xticklabels(['Rigid-Fixed', 'Rigid-Movable', 'Soft-Fixed', 'Soft-Movable'])
ax.set_yticks([3.5,2.5,1.5,0.5])
ax.set_yticklabels(['Rigid-Fixed', 'Rigid-Movable', 'Soft-Fixed', 'Soft-Movable'])
figbar = fig.colorbar(figplot)
i = 0
while (i < 4):
j = 0
while (j < 4):
pp.text(j+0.5,3.5-i,cmat[i][j])
j = j+1
i = i+1
pp.savefig('results_force_motion_20_states.png')
pp.show()
| mit |
LukeC92/iris | lib/iris/tests/unit/quickplot/test_points.py | 11 | 2168 | # (C) British Crown Copyright 2014 - 2016, Met Office
#
# This file is part of Iris.
#
# Iris is free software: you can redistribute it and/or modify it under
# the terms of the GNU Lesser General Public License as published by the
# Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Iris is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with Iris. If not, see <http://www.gnu.org/licenses/>.
"""Unit tests for the `iris.quickplot.points` function."""
from __future__ import (absolute_import, division, print_function)
from six.moves import (filter, input, map, range, zip) # noqa
# Import iris.tests first so that some things can be initialised before
# importing anything else.
import iris.tests as tests
import numpy as np
from iris.tests.stock import simple_2d
from iris.tests.unit.plot import TestGraphicStringCoord, MixinCoords
if tests.MPL_AVAILABLE:
import iris.quickplot as qplt
@tests.skip_plot
class TestStringCoordPlot(TestGraphicStringCoord):
def test_yaxis_labels(self):
qplt.points(self.cube, coords=('bar', 'str_coord'))
self.assertBoundsTickLabels('yaxis')
def test_xaxis_labels(self):
qplt.points(self.cube, coords=('str_coord', 'bar'))
self.assertBoundsTickLabels('xaxis')
@tests.skip_plot
class TestCoords(tests.IrisTest, MixinCoords):
def setUp(self):
# We have a 2d cube with dimensionality (bar: 3; foo: 4)
self.cube = simple_2d(with_bounds=False)
self.foo = self.cube.coord('foo').points
self.foo_index = np.arange(self.foo.size)
self.bar = self.cube.coord('bar').points
self.bar_index = np.arange(self.bar.size)
self.data = None
self.dataT = None
self.mpl_patch = self.patch('matplotlib.pyplot.scatter')
self.draw_func = qplt.points
if __name__ == "__main__":
tests.main()
| lgpl-3.0 |
IssamLaradji/scikit-learn | sklearn/covariance/tests/test_robust_covariance.py | 31 | 3340 | # Author: Alexandre Gramfort <alexandre.gramfort@inria.fr>
# Gael Varoquaux <gael.varoquaux@normalesup.org>
# Virgile Fritsch <virgile.fritsch@inria.fr>
#
# License: BSD 3 clause
import numpy as np
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_raises
from sklearn import datasets
from sklearn.covariance import empirical_covariance, MinCovDet, \
EllipticEnvelope
X = datasets.load_iris().data
X_1d = X[:, 0]
n_samples, n_features = X.shape
def test_mcd():
"""Tests the FastMCD algorithm implementation
"""
### Small data set
# test without outliers (random independent normal data)
launch_mcd_on_dataset(100, 5, 0, 0.01, 0.1, 80)
# test with a contaminated data set (medium contamination)
launch_mcd_on_dataset(100, 5, 20, 0.01, 0.01, 70)
# test with a contaminated data set (strong contamination)
launch_mcd_on_dataset(100, 5, 40, 0.1, 0.1, 50)
### Medium data set
launch_mcd_on_dataset(1000, 5, 450, 0.1, 0.1, 540)
### Large data set
launch_mcd_on_dataset(1700, 5, 800, 0.1, 0.1, 870)
### 1D data set
launch_mcd_on_dataset(500, 1, 100, 0.001, 0.001, 350)
def launch_mcd_on_dataset(n_samples, n_features, n_outliers, tol_loc, tol_cov,
tol_support):
rand_gen = np.random.RandomState(0)
data = rand_gen.randn(n_samples, n_features)
# add some outliers
outliers_index = rand_gen.permutation(n_samples)[:n_outliers]
outliers_offset = 10. * \
(rand_gen.randint(2, size=(n_outliers, n_features)) - 0.5)
data[outliers_index] += outliers_offset
inliers_mask = np.ones(n_samples).astype(bool)
inliers_mask[outliers_index] = False
pure_data = data[inliers_mask]
# compute MCD by fitting an object
mcd_fit = MinCovDet(random_state=rand_gen).fit(data)
T = mcd_fit.location_
S = mcd_fit.covariance_
H = mcd_fit.support_
# compare with the estimates learnt from the inliers
error_location = np.mean((pure_data.mean(0) - T) ** 2)
assert(error_location < tol_loc)
error_cov = np.mean((empirical_covariance(pure_data) - S) ** 2)
assert(error_cov < tol_cov)
assert(np.sum(H) >= tol_support)
assert_array_almost_equal(mcd_fit.mahalanobis(data), mcd_fit.dist_)
def test_mcd_issue1127():
# Check that the code does not break with X.shape = (3, 1)
# (i.e. n_support = n_samples)
rnd = np.random.RandomState(0)
X = rnd.normal(size=(3, 1))
mcd = MinCovDet()
mcd.fit(X)
def test_outlier_detection():
rnd = np.random.RandomState(0)
X = rnd.randn(100, 10)
clf = EllipticEnvelope(contamination=0.1)
print(clf.threshold)
assert_raises(Exception, clf.predict, X)
assert_raises(Exception, clf.decision_function, X)
clf.fit(X)
y_pred = clf.predict(X)
decision = clf.decision_function(X, raw_values=True)
decision_transformed = clf.decision_function(X, raw_values=False)
assert_array_almost_equal(
decision, clf.mahalanobis(X))
assert_array_almost_equal(clf.mahalanobis(X), clf.dist_)
assert_almost_equal(clf.score(X, np.ones(100)),
(100 - y_pred[y_pred == -1].size) / 100.)
assert(sum(y_pred == -1) == sum(decision_transformed < 0))
| bsd-3-clause |
glennq/scikit-learn | examples/text/hashing_vs_dict_vectorizer.py | 93 | 3243 | """
===========================================
FeatureHasher and DictVectorizer Comparison
===========================================
Compares FeatureHasher and DictVectorizer by using both to vectorize
text documents.
The example demonstrates syntax and speed only; it doesn't actually do
anything useful with the extracted vectors. See the example scripts
{document_classification_20newsgroups,clustering}.py for actual learning
on text documents.
A discrepancy between the number of terms reported for DictVectorizer and
for FeatureHasher is to be expected due to hash collisions.
"""
# Author: Lars Buitinck
# License: BSD 3 clause
from __future__ import print_function
from collections import defaultdict
import re
import sys
from time import time
import numpy as np
from sklearn.datasets import fetch_20newsgroups
from sklearn.feature_extraction import DictVectorizer, FeatureHasher
def n_nonzero_columns(X):
"""Returns the number of non-zero columns in a CSR matrix X."""
return len(np.unique(X.nonzero()[1]))
def tokens(doc):
"""Extract tokens from doc.
This uses a simple regex to break strings into tokens. For a more
principled approach, see CountVectorizer or TfidfVectorizer.
"""
return (tok.lower() for tok in re.findall(r"\w+", doc))
def token_freqs(doc):
"""Extract a dict mapping tokens from doc to their frequencies."""
freq = defaultdict(int)
for tok in tokens(doc):
freq[tok] += 1
return freq
categories = [
'alt.atheism',
'comp.graphics',
'comp.sys.ibm.pc.hardware',
'misc.forsale',
'rec.autos',
'sci.space',
'talk.religion.misc',
]
# Uncomment the following line to use a larger set (11k+ documents)
#categories = None
print(__doc__)
print("Usage: %s [n_features_for_hashing]" % sys.argv[0])
print(" The default number of features is 2**18.")
print()
try:
n_features = int(sys.argv[1])
except IndexError:
n_features = 2 ** 18
except ValueError:
print("not a valid number of features: %r" % sys.argv[1])
sys.exit(1)
print("Loading 20 newsgroups training data")
raw_data = fetch_20newsgroups(subset='train', categories=categories).data
data_size_mb = sum(len(s.encode('utf-8')) for s in raw_data) / 1e6
print("%d documents - %0.3fMB" % (len(raw_data), data_size_mb))
print()
print("DictVectorizer")
t0 = time()
vectorizer = DictVectorizer()
vectorizer.fit_transform(token_freqs(d) for d in raw_data)
duration = time() - t0
print("done in %fs at %0.3fMB/s" % (duration, data_size_mb / duration))
print("Found %d unique terms" % len(vectorizer.get_feature_names()))
print()
print("FeatureHasher on frequency dicts")
t0 = time()
hasher = FeatureHasher(n_features=n_features)
X = hasher.transform(token_freqs(d) for d in raw_data)
duration = time() - t0
print("done in %fs at %0.3fMB/s" % (duration, data_size_mb / duration))
print("Found %d unique terms" % n_nonzero_columns(X))
print()
print("FeatureHasher on raw tokens")
t0 = time()
hasher = FeatureHasher(n_features=n_features, input_type="string")
X = hasher.transform(tokens(d) for d in raw_data)
duration = time() - t0
print("done in %fs at %0.3fMB/s" % (duration, data_size_mb / duration))
print("Found %d unique terms" % n_nonzero_columns(X))
| bsd-3-clause |
kushalbhola/MyStuff | Practice/PythonApplication/env/Lib/site-packages/pandas/tests/io/parser/test_index_col.py | 2 | 5308 | """
Tests that the specified index column (a.k.a "index_col")
is properly handled or inferred during parsing for all of
the parsers defined in parsers.py
"""
from io import StringIO
import pytest
from pandas import DataFrame, Index, MultiIndex
import pandas.util.testing as tm
@pytest.mark.parametrize("with_header", [True, False])
def test_index_col_named(all_parsers, with_header):
parser = all_parsers
no_header = """\
KORD1,19990127, 19:00:00, 18:56:00, 0.8100, 2.8100, 7.2000, 0.0000, 280.0000
KORD2,19990127, 20:00:00, 19:56:00, 0.0100, 2.2100, 7.2000, 0.0000, 260.0000
KORD3,19990127, 21:00:00, 20:56:00, -0.5900, 2.2100, 5.7000, 0.0000, 280.0000
KORD4,19990127, 21:00:00, 21:18:00, -0.9900, 2.0100, 3.6000, 0.0000, 270.0000
KORD5,19990127, 22:00:00, 21:56:00, -0.5900, 1.7100, 5.1000, 0.0000, 290.0000
KORD6,19990127, 23:00:00, 22:56:00, -0.5900, 1.7100, 4.6000, 0.0000, 280.0000""" # noqa
header = (
"ID,date,NominalTime,ActualTime,TDew,TAir,Windspeed,Precip,WindDir\n"
) # noqa
if with_header:
data = header + no_header
result = parser.read_csv(StringIO(data), index_col="ID")
expected = parser.read_csv(StringIO(data), header=0).set_index("ID")
tm.assert_frame_equal(result, expected)
else:
data = no_header
msg = "Index ID invalid"
with pytest.raises(ValueError, match=msg):
parser.read_csv(StringIO(data), index_col="ID")
def test_index_col_named2(all_parsers):
parser = all_parsers
data = """\
1,2,3,4,hello
5,6,7,8,world
9,10,11,12,foo
"""
expected = DataFrame(
{"a": [1, 5, 9], "b": [2, 6, 10], "c": [3, 7, 11], "d": [4, 8, 12]},
index=Index(["hello", "world", "foo"], name="message"),
)
names = ["a", "b", "c", "d", "message"]
result = parser.read_csv(StringIO(data), names=names, index_col=["message"])
tm.assert_frame_equal(result, expected)
def test_index_col_is_true(all_parsers):
# see gh-9798
data = "a,b\n1,2"
parser = all_parsers
msg = "The value of index_col couldn't be 'True'"
with pytest.raises(ValueError, match=msg):
parser.read_csv(StringIO(data), index_col=True)
def test_infer_index_col(all_parsers):
data = """A,B,C
foo,1,2,3
bar,4,5,6
baz,7,8,9
"""
parser = all_parsers
result = parser.read_csv(StringIO(data))
expected = DataFrame(
[[1, 2, 3], [4, 5, 6], [7, 8, 9]],
index=["foo", "bar", "baz"],
columns=["A", "B", "C"],
)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize(
"index_col,kwargs",
[
(None, dict(columns=["x", "y", "z"])),
(False, dict(columns=["x", "y", "z"])),
(0, dict(columns=["y", "z"], index=Index([], name="x"))),
(1, dict(columns=["x", "z"], index=Index([], name="y"))),
("x", dict(columns=["y", "z"], index=Index([], name="x"))),
("y", dict(columns=["x", "z"], index=Index([], name="y"))),
(
[0, 1],
dict(
columns=["z"], index=MultiIndex.from_arrays([[]] * 2, names=["x", "y"])
),
),
(
["x", "y"],
dict(
columns=["z"], index=MultiIndex.from_arrays([[]] * 2, names=["x", "y"])
),
),
(
[1, 0],
dict(
columns=["z"], index=MultiIndex.from_arrays([[]] * 2, names=["y", "x"])
),
),
(
["y", "x"],
dict(
columns=["z"], index=MultiIndex.from_arrays([[]] * 2, names=["y", "x"])
),
),
],
)
def test_index_col_empty_data(all_parsers, index_col, kwargs):
data = "x,y,z"
parser = all_parsers
result = parser.read_csv(StringIO(data), index_col=index_col)
expected = DataFrame(**kwargs)
tm.assert_frame_equal(result, expected)
def test_empty_with_index_col_false(all_parsers):
# see gh-10413
data = "x,y"
parser = all_parsers
result = parser.read_csv(StringIO(data), index_col=False)
expected = DataFrame(columns=["x", "y"])
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize(
"index_names",
[
["", ""],
["foo", ""],
["", "bar"],
["foo", "bar"],
["NotReallyUnnamed", "Unnamed: 0"],
],
)
def test_multi_index_naming(all_parsers, index_names):
parser = all_parsers
# We don't want empty index names being replaced with "Unnamed: 0"
data = ",".join(index_names + ["col\na,c,1\na,d,2\nb,c,3\nb,d,4"])
result = parser.read_csv(StringIO(data), index_col=[0, 1])
expected = DataFrame(
{"col": [1, 2, 3, 4]}, index=MultiIndex.from_product([["a", "b"], ["c", "d"]])
)
expected.index.names = [name if name else None for name in index_names]
tm.assert_frame_equal(result, expected)
def test_multi_index_naming_not_all_at_beginning(all_parsers):
parser = all_parsers
data = ",Unnamed: 2,\na,c,1\na,d,2\nb,c,3\nb,d,4"
result = parser.read_csv(StringIO(data), index_col=[0, 2])
expected = DataFrame(
{"Unnamed: 2": ["c", "d", "c", "d"]},
index=MultiIndex(
levels=[["a", "b"], [1, 2, 3, 4]], codes=[[0, 0, 1, 1], [0, 1, 2, 3]]
),
)
tm.assert_frame_equal(result, expected)
| apache-2.0 |
liberatorqjw/scikit-learn | sklearn/semi_supervised/label_propagation.py | 15 | 15050 | # coding=utf8
"""
Label propagation in the context of this module refers to a set of
semisupervised classification algorithms. In the high level, these algorithms
work by forming a fully-connected graph between all points given and solving
for the steady-state distribution of labels at each point.
These algorithms perform very well in practice. The cost of running can be very
expensive, at approximately O(N^3) where N is the number of (labeled and
unlabeled) points. The theory (why they perform so well) is motivated by
intuitions from random walk algorithms and geometric relationships in the data.
For more information see the references below.
Model Features
--------------
Label clamping:
The algorithm tries to learn distributions of labels over the dataset. In the
"Hard Clamp" mode, the true ground labels are never allowed to change. They
are clamped into position. In the "Soft Clamp" mode, they are allowed some
wiggle room, but some alpha of their original value will always be retained.
Hard clamp is the same as soft clamping with alpha set to 1.
Kernel:
A function which projects a vector into some higher dimensional space. This
implementation supprots RBF and KNN kernels. Using the RBF kernel generates
a dense matrix of size O(N^2). KNN kernel will generate a sparse matrix of
size O(k*N) which will run much faster. See the documentation for SVMs for
more info on kernels.
Examples
--------
>>> from sklearn import datasets
>>> from sklearn.semi_supervised import LabelPropagation
>>> label_prop_model = LabelPropagation()
>>> iris = datasets.load_iris()
>>> random_unlabeled_points = np.where(np.random.random_integers(0, 1,
... size=len(iris.target)))
>>> labels = np.copy(iris.target)
>>> labels[random_unlabeled_points] = -1
>>> label_prop_model.fit(iris.data, labels)
... # doctest: +NORMALIZE_WHITESPACE +ELLIPSIS
LabelPropagation(...)
Notes
-----
References:
[1] Yoshua Bengio, Olivier Delalleau, Nicolas Le Roux. In Semi-Supervised
Learning (2006), pp. 193-216
[2] Olivier Delalleau, Yoshua Bengio, Nicolas Le Roux. Efficient
Non-Parametric Function Induction in Semi-Supervised Learning. AISTAT 2005
"""
# Authors: Clay Woolam <clay@woolam.org>
# Licence: BSD
from abc import ABCMeta, abstractmethod
from scipy import sparse
import numpy as np
from ..base import BaseEstimator, ClassifierMixin
from ..metrics.pairwise import rbf_kernel
from ..utils.graph import graph_laplacian
from ..utils.extmath import safe_sparse_dot
from ..utils.validation import check_X_y
from ..externals import six
from ..neighbors.unsupervised import NearestNeighbors
### Helper functions
def _not_converged(y_truth, y_prediction, tol=1e-3):
"""basic convergence check"""
return np.abs(y_truth - y_prediction).sum() > tol
class BaseLabelPropagation(six.with_metaclass(ABCMeta, BaseEstimator,
ClassifierMixin)):
"""Base class for label propagation module.
Parameters
----------
kernel : {'knn', 'rbf'}
String identifier for kernel function to use.
Only 'rbf' and 'knn' kernels are currently supported..
gamma : float
Parameter for rbf kernel
alpha : float
Clamping factor
max_iter : float
Change maximum number of iterations allowed
tol : float
Convergence tolerance: threshold to consider the system at steady
state
"""
def __init__(self, kernel='rbf', gamma=20, n_neighbors=7,
alpha=1, max_iter=30, tol=1e-3):
self.max_iter = max_iter
self.tol = tol
# kernel parameters
self.kernel = kernel
self.gamma = gamma
self.n_neighbors = n_neighbors
# clamping factor
self.alpha = alpha
def _get_kernel(self, X, y=None):
if self.kernel == "rbf":
if y is None:
return rbf_kernel(X, X, gamma=self.gamma)
else:
return rbf_kernel(X, y, gamma=self.gamma)
elif self.kernel == "knn":
if self.nn_fit is None:
self.nn_fit = NearestNeighbors(self.n_neighbors).fit(X)
if y is None:
return self.nn_fit.kneighbors_graph(self.nn_fit._fit_X,
self.n_neighbors,
mode='connectivity')
else:
return self.nn_fit.kneighbors(y, return_distance=False)
else:
raise ValueError("%s is not a valid kernel. Only rbf and knn"
" are supported at this time" % self.kernel)
@abstractmethod
def _build_graph(self):
raise NotImplementedError("Graph construction must be implemented"
" to fit a label propagation model.")
def predict(self, X):
"""Performs inductive inference across the model.
Parameters
----------
X : array_like, shape = [n_samples, n_features]
Returns
-------
y : array_like, shape = [n_samples]
Predictions for input data
"""
probas = self.predict_proba(X)
return self.classes_[np.argmax(probas, axis=1)].ravel()
def predict_proba(self, X):
"""Predict probability for each possible outcome.
Compute the probability estimates for each single sample in X
and each possible outcome seen during training (categorical
distribution).
Parameters
----------
X : array_like, shape = [n_samples, n_features]
Returns
-------
probabilities : array, shape = [n_samples, n_classes]
Normalized probability distributions across
class labels
"""
if sparse.isspmatrix(X):
X_2d = X
else:
X_2d = np.atleast_2d(X)
weight_matrices = self._get_kernel(self.X_, X_2d)
if self.kernel == 'knn':
probabilities = []
for weight_matrix in weight_matrices:
ine = np.sum(self.label_distributions_[weight_matrix], axis=0)
probabilities.append(ine)
probabilities = np.array(probabilities)
else:
weight_matrices = weight_matrices.T
probabilities = np.dot(weight_matrices, self.label_distributions_)
normalizer = np.atleast_2d(np.sum(probabilities, axis=1)).T
probabilities /= normalizer
return probabilities
def fit(self, X, y):
"""Fit a semi-supervised label propagation model based
All the input data is provided matrix X (labeled and unlabeled)
and corresponding label matrix y with a dedicated marker value for
unlabeled samples.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
A {n_samples by n_samples} size matrix will be created from this
y : array_like, shape = [n_samples]
n_labeled_samples (unlabeled points are marked as -1)
All unlabeled samples will be transductively assigned labels
Returns
-------
self : returns an instance of self.
"""
X, y = check_X_y(X, y)
self.X_ = X
# actual graph construction (implementations should override this)
graph_matrix = self._build_graph()
# label construction
# construct a categorical distribution for classification only
classes = np.unique(y)
classes = (classes[classes != -1])
self.classes_ = classes
n_samples, n_classes = len(y), len(classes)
y = np.asarray(y)
unlabeled = y == -1
clamp_weights = np.ones((n_samples, 1))
clamp_weights[unlabeled, 0] = self.alpha
# initialize distributions
self.label_distributions_ = np.zeros((n_samples, n_classes))
for label in classes:
self.label_distributions_[y == label, classes == label] = 1
y_static = np.copy(self.label_distributions_)
if self.alpha > 0.:
y_static *= 1 - self.alpha
y_static[unlabeled] = 0
l_previous = np.zeros((self.X_.shape[0], n_classes))
remaining_iter = self.max_iter
if sparse.isspmatrix(graph_matrix):
graph_matrix = graph_matrix.tocsr()
while (_not_converged(self.label_distributions_, l_previous, self.tol)
and remaining_iter > 1):
l_previous = self.label_distributions_
self.label_distributions_ = safe_sparse_dot(
graph_matrix, self.label_distributions_)
# clamp
self.label_distributions_ = np.multiply(
clamp_weights, self.label_distributions_) + y_static
remaining_iter -= 1
normalizer = np.sum(self.label_distributions_, axis=1)[:, np.newaxis]
self.label_distributions_ /= normalizer
# set the transduction item
transduction = self.classes_[np.argmax(self.label_distributions_,
axis=1)]
self.transduction_ = transduction.ravel()
self.n_iter_ = self.max_iter - remaining_iter
return self
class LabelPropagation(BaseLabelPropagation):
"""Label Propagation classifier
Parameters
----------
kernel : {'knn', 'rbf'}
String identifier for kernel function to use.
Only 'rbf' and 'knn' kernels are currently supported..
gamma : float
parameter for rbf kernel
n_neighbors : integer > 0
parameter for knn kernel
alpha : float
clamping factor
max_iter : float
change maximum number of iterations allowed
tol : float
Convergence tolerance: threshold to consider the system at steady
state
Attributes
----------
X_ : array, shape = [n_samples, n_features]
Input array.
classes_ : array, shape = [n_classes]
The distinct labels used in classifying instances.
label_distributions_ : array, shape = [n_samples, n_classes]
Categorical distribution for each item.
transduction_ : array, shape = [n_samples]
Label assigned to each item via the transduction.
n_iter_ : int
Number of iterations run.
Examples
--------
>>> from sklearn import datasets
>>> from sklearn.semi_supervised import LabelPropagation
>>> label_prop_model = LabelPropagation()
>>> iris = datasets.load_iris()
>>> random_unlabeled_points = np.where(np.random.random_integers(0, 1,
... size=len(iris.target)))
>>> labels = np.copy(iris.target)
>>> labels[random_unlabeled_points] = -1
>>> label_prop_model.fit(iris.data, labels)
... # doctest: +NORMALIZE_WHITESPACE +ELLIPSIS
LabelPropagation(...)
References
----------
Xiaojin Zhu and Zoubin Ghahramani. Learning from labeled and unlabeled data
with label propagation. Technical Report CMU-CALD-02-107, Carnegie Mellon
University, 2002 http://pages.cs.wisc.edu/~jerryzhu/pub/CMU-CALD-02-107.pdf
See Also
--------
LabelSpreading : Alternate label propagation strategy more robust to noise
"""
def _build_graph(self):
"""Matrix representing a fully connected graph between each sample
This basic implementation creates a non-stochastic affinity matrix, so
class distributions will exceed 1 (normalization may be desired).
"""
if self.kernel == 'knn':
self.nn_fit = None
affinity_matrix = self._get_kernel(self.X_)
normalizer = affinity_matrix.sum(axis=0)
if sparse.isspmatrix(affinity_matrix):
affinity_matrix.data /= np.diag(np.array(normalizer))
else:
affinity_matrix /= normalizer[:, np.newaxis]
return affinity_matrix
class LabelSpreading(BaseLabelPropagation):
"""LabelSpreading model for semi-supervised learning
This model is similar to the basic Label Propgation algorithm,
but uses affinity matrix based on the normalized graph Laplacian
and soft clamping across the labels.
Parameters
----------
kernel : {'knn', 'rbf'}
String identifier for kernel function to use.
Only 'rbf' and 'knn' kernels are currently supported.
gamma : float
parameter for rbf kernel
n_neighbors : integer > 0
parameter for knn kernel
alpha : float
clamping factor
max_iter : float
maximum number of iterations allowed
tol : float
Convergence tolerance: threshold to consider the system at steady
state
Attributes
----------
X_ : array, shape = [n_samples, n_features]
Input array.
classes_ : array, shape = [n_classes]
The distinct labels used in classifying instances.
label_distributions_ : array, shape = [n_samples, n_classes]
Categorical distribution for each item.
transduction_ : array, shape = [n_samples]
Label assigned to each item via the transduction.
n_iter_ : int
Number of iterations run.
Examples
--------
>>> from sklearn import datasets
>>> from sklearn.semi_supervised import LabelSpreading
>>> label_prop_model = LabelSpreading()
>>> iris = datasets.load_iris()
>>> random_unlabeled_points = np.where(np.random.random_integers(0, 1,
... size=len(iris.target)))
>>> labels = np.copy(iris.target)
>>> labels[random_unlabeled_points] = -1
>>> label_prop_model.fit(iris.data, labels)
... # doctest: +NORMALIZE_WHITESPACE +ELLIPSIS
LabelSpreading(...)
References
----------
Dengyong Zhou, Olivier Bousquet, Thomas Navin Lal, Jason Weston,
Bernhard Schoelkopf. Learning with local and global consistency (2004)
http://citeseer.ist.psu.edu/viewdoc/summary?doi=10.1.1.115.3219
See Also
--------
LabelPropagation : Unregularized graph based semi-supervised learning
"""
def __init__(self, kernel='rbf', gamma=20, n_neighbors=7, alpha=0.2,
max_iter=30, tol=1e-3):
# this one has different base parameters
super(LabelSpreading, self).__init__(kernel=kernel, gamma=gamma,
n_neighbors=n_neighbors,
alpha=alpha, max_iter=max_iter,
tol=tol)
def _build_graph(self):
"""Graph matrix for Label Spreading computes the graph laplacian"""
# compute affinity matrix (or gram matrix)
if self.kernel == 'knn':
self.nn_fit = None
n_samples = self.X_.shape[0]
affinity_matrix = self._get_kernel(self.X_)
laplacian = graph_laplacian(affinity_matrix, normed=True)
laplacian = -laplacian
if sparse.isspmatrix(laplacian):
diag_mask = (laplacian.row == laplacian.col)
laplacian.data[diag_mask] = 0.0
else:
laplacian.flat[::n_samples + 1] = 0.0 # set diag to 0.0
return laplacian
| bsd-3-clause |
rseubert/scikit-learn | sklearn/feature_extraction/image.py | 32 | 17167 | """
The :mod:`sklearn.feature_extraction.image` submodule gathers utilities to
extract features from images.
"""
# Authors: Emmanuelle Gouillart <emmanuelle.gouillart@normalesup.org>
# Gael Varoquaux <gael.varoquaux@normalesup.org>
# Olivier Grisel
# Vlad Niculae
# License: BSD 3 clause
from itertools import product
import numbers
import numpy as np
from scipy import sparse
from numpy.lib.stride_tricks import as_strided
from ..utils import check_array, check_random_state
from ..utils.fixes import astype
from ..base import BaseEstimator
__all__ = ['PatchExtractor',
'extract_patches_2d',
'grid_to_graph',
'img_to_graph',
'reconstruct_from_patches_2d']
###############################################################################
# From an image to a graph
def _make_edges_3d(n_x, n_y, n_z=1):
"""Returns a list of edges for a 3D image.
Parameters
===========
n_x: integer
The size of the grid in the x direction.
n_y: integer
The size of the grid in the y direction.
n_z: integer, optional
The size of the grid in the z direction, defaults to 1
"""
vertices = np.arange(n_x * n_y * n_z).reshape((n_x, n_y, n_z))
edges_deep = np.vstack((vertices[:, :, :-1].ravel(),
vertices[:, :, 1:].ravel()))
edges_right = np.vstack((vertices[:, :-1].ravel(),
vertices[:, 1:].ravel()))
edges_down = np.vstack((vertices[:-1].ravel(), vertices[1:].ravel()))
edges = np.hstack((edges_deep, edges_right, edges_down))
return edges
def _compute_gradient_3d(edges, img):
n_x, n_y, n_z = img.shape
gradient = np.abs(img[edges[0] // (n_y * n_z),
(edges[0] % (n_y * n_z)) // n_z,
(edges[0] % (n_y * n_z)) % n_z] -
img[edges[1] // (n_y * n_z),
(edges[1] % (n_y * n_z)) // n_z,
(edges[1] % (n_y * n_z)) % n_z])
return gradient
# XXX: Why mask the image after computing the weights?
def _mask_edges_weights(mask, edges, weights=None):
"""Apply a mask to edges (weighted or not)"""
inds = np.arange(mask.size)
inds = inds[mask.ravel()]
ind_mask = np.logical_and(np.in1d(edges[0], inds),
np.in1d(edges[1], inds))
edges = edges[:, ind_mask]
if weights is not None:
weights = weights[ind_mask]
if len(edges.ravel()):
maxval = edges.max()
else:
maxval = 0
order = np.searchsorted(np.unique(edges.ravel()), np.arange(maxval + 1))
edges = order[edges]
if weights is None:
return edges
else:
return edges, weights
def _to_graph(n_x, n_y, n_z, mask=None, img=None,
return_as=sparse.coo_matrix, dtype=None):
"""Auxiliary function for img_to_graph and grid_to_graph
"""
edges = _make_edges_3d(n_x, n_y, n_z)
if dtype is None:
if img is None:
dtype = np.int
else:
dtype = img.dtype
if img is not None:
img = np.atleast_3d(img)
weights = _compute_gradient_3d(edges, img)
if mask is not None:
edges, weights = _mask_edges_weights(mask, edges, weights)
diag = img.squeeze()[mask]
else:
diag = img.ravel()
n_voxels = diag.size
else:
if mask is not None:
mask = astype(mask, dtype=np.bool, copy=False)
mask = np.asarray(mask, dtype=np.bool)
edges = _mask_edges_weights(mask, edges)
n_voxels = np.sum(mask)
else:
n_voxels = n_x * n_y * n_z
weights = np.ones(edges.shape[1], dtype=dtype)
diag = np.ones(n_voxels, dtype=dtype)
diag_idx = np.arange(n_voxels)
i_idx = np.hstack((edges[0], edges[1]))
j_idx = np.hstack((edges[1], edges[0]))
graph = sparse.coo_matrix((np.hstack((weights, weights, diag)),
(np.hstack((i_idx, diag_idx)),
np.hstack((j_idx, diag_idx)))),
(n_voxels, n_voxels),
dtype=dtype)
if return_as is np.ndarray:
return graph.toarray()
return return_as(graph)
def img_to_graph(img, mask=None, return_as=sparse.coo_matrix, dtype=None):
"""Graph of the pixel-to-pixel gradient connections
Edges are weighted with the gradient values.
Parameters
===========
img: ndarray, 2D or 3D
2D or 3D image
mask : ndarray of booleans, optional
An optional mask of the image, to consider only part of the
pixels.
return_as: np.ndarray or a sparse matrix class, optional
The class to use to build the returned adjacency matrix.
dtype: None or dtype, optional
The data of the returned sparse matrix. By default it is the
dtype of img
Notes
=====
For sklearn versions 0.14.1 and prior, return_as=np.ndarray was handled
by returning a dense np.matrix instance. Going forward, np.ndarray
returns an np.ndarray, as expected.
For compatibility, user code relying on this method should wrap its
calls in ``np.asarray`` to avoid type issues.
"""
img = np.atleast_3d(img)
n_x, n_y, n_z = img.shape
return _to_graph(n_x, n_y, n_z, mask, img, return_as, dtype)
def grid_to_graph(n_x, n_y, n_z=1, mask=None, return_as=sparse.coo_matrix,
dtype=np.int):
"""Graph of the pixel-to-pixel connections
Edges exist if 2 voxels are connected.
Parameters
===========
n_x: int
Dimension in x axis
n_y: int
Dimension in y axis
n_z: int, optional, default 1
Dimension in z axis
mask : ndarray of booleans, optional
An optional mask of the image, to consider only part of the
pixels.
return_as: np.ndarray or a sparse matrix class, optional
The class to use to build the returned adjacency matrix.
dtype: dtype, optional, default int
The data of the returned sparse matrix. By default it is int
Notes
=====
For sklearn versions 0.14.1 and prior, return_as=np.ndarray was handled
by returning a dense np.matrix instance. Going forward, np.ndarray
returns an np.ndarray, as expected.
For compatibility, user code relying on this method should wrap its
calls in ``np.asarray`` to avoid type issues.
"""
return _to_graph(n_x, n_y, n_z, mask=mask, return_as=return_as,
dtype=dtype)
###############################################################################
# From an image to a set of small image patches
def _compute_n_patches(i_h, i_w, p_h, p_w, max_patches=None):
"""Compute the number of patches that will be extracted in an image.
Parameters
===========
i_h: int
The image height
i_w: int
The image with
p_h: int
The height of a patch
p_w: int
The width of a patch
max_patches: integer or float, optional default is None
The maximum number of patches to extract. If max_patches is a float
between 0 and 1, it is taken to be a proportion of the total number
of patches.
"""
n_h = i_h - p_h + 1
n_w = i_w - p_w + 1
all_patches = n_h * n_w
if max_patches:
if (isinstance(max_patches, (numbers.Integral))
and max_patches < all_patches):
return max_patches
elif (isinstance(max_patches, (numbers.Real))
and 0 < max_patches < 1):
return int(max_patches * all_patches)
else:
raise ValueError("Invalid value for max_patches: %r" % max_patches)
else:
return all_patches
def extract_patches(arr, patch_shape=8, extraction_step=1):
"""Extracts patches of any n-dimensional array in place using strides.
Given an n-dimensional array it will return a 2n-dimensional array with
the first n dimensions indexing patch position and the last n indexing
the patch content. This operation is immediate (O(1)). A reshape
performed on the first n dimensions will cause numpy to copy data, leading
to a list of extracted patches.
Parameters
----------
arr: ndarray
n-dimensional array of which patches are to be extracted
patch_shape: integer or tuple of length arr.ndim
Indicates the shape of the patches to be extracted. If an
integer is given, the shape will be a hypercube of
sidelength given by its value.
extraction_step: integer or tuple of length arr.ndim
Indicates step size at which extraction shall be performed.
If integer is given, then the step is uniform in all dimensions.
Returns
-------
patches: strided ndarray
2n-dimensional array indexing patches on first n dimensions and
containing patches on the last n dimensions. These dimensions
are fake, but this way no data is copied. A simple reshape invokes
a copying operation to obtain a list of patches:
result.reshape([-1] + list(patch_shape))
"""
arr_ndim = arr.ndim
if isinstance(patch_shape, numbers.Number):
patch_shape = tuple([patch_shape] * arr_ndim)
if isinstance(extraction_step, numbers.Number):
extraction_step = tuple([extraction_step] * arr_ndim)
patch_strides = arr.strides
slices = [slice(None, None, st) for st in extraction_step]
indexing_strides = arr[slices].strides
patch_indices_shape = ((np.array(arr.shape) - np.array(patch_shape)) //
np.array(extraction_step)) + 1
shape = tuple(list(patch_indices_shape) + list(patch_shape))
strides = tuple(list(indexing_strides) + list(patch_strides))
patches = as_strided(arr, shape=shape, strides=strides)
return patches
def extract_patches_2d(image, patch_size, max_patches=None, random_state=None):
"""Reshape a 2D image into a collection of patches
The resulting patches are allocated in a dedicated array.
Parameters
----------
image: array, shape = (image_height, image_width) or
(image_height, image_width, n_channels)
The original image data. For color images, the last dimension specifies
the channel: a RGB image would have `n_channels=3`.
patch_size: tuple of ints (patch_height, patch_width)
the dimensions of one patch
max_patches: integer or float, optional default is None
The maximum number of patches to extract. If max_patches is a float
between 0 and 1, it is taken to be a proportion of the total number
of patches.
random_state: int or RandomState
Pseudo number generator state used for random sampling to use if
`max_patches` is not None.
Returns
-------
patches: array, shape = (n_patches, patch_height, patch_width) or
(n_patches, patch_height, patch_width, n_channels)
The collection of patches extracted from the image, where `n_patches`
is either `max_patches` or the total number of patches that can be
extracted.
Examples
--------
>>> from sklearn.feature_extraction import image
>>> one_image = np.arange(16).reshape((4, 4))
>>> one_image
array([[ 0, 1, 2, 3],
[ 4, 5, 6, 7],
[ 8, 9, 10, 11],
[12, 13, 14, 15]])
>>> patches = image.extract_patches_2d(one_image, (2, 2))
>>> print(patches.shape)
(9, 2, 2)
>>> patches[0]
array([[0, 1],
[4, 5]])
>>> patches[1]
array([[1, 2],
[5, 6]])
>>> patches[8]
array([[10, 11],
[14, 15]])
"""
i_h, i_w = image.shape[:2]
p_h, p_w = patch_size
if p_h > i_h:
raise ValueError("Height of the patch should be less than the height"
" of the image.")
if p_w > i_w:
raise ValueError("Width of the patch should be less than the width"
" of the image.")
image = check_array(image, allow_nd=True)
image = image.reshape((i_h, i_w, -1))
n_colors = image.shape[-1]
extracted_patches = extract_patches(image,
patch_shape=(p_h, p_w, n_colors),
extraction_step=1)
n_patches = _compute_n_patches(i_h, i_w, p_h, p_w, max_patches)
if max_patches:
rng = check_random_state(random_state)
i_s = rng.randint(i_h - p_h + 1, size=n_patches)
j_s = rng.randint(i_w - p_w + 1, size=n_patches)
patches = extracted_patches[i_s, j_s, 0]
else:
patches = extracted_patches
patches = patches.reshape(-1, p_h, p_w, n_colors)
# remove the color dimension if useless
if patches.shape[-1] == 1:
return patches.reshape((n_patches, p_h, p_w))
else:
return patches
def reconstruct_from_patches_2d(patches, image_size):
"""Reconstruct the image from all of its patches.
Patches are assumed to overlap and the image is constructed by filling in
the patches from left to right, top to bottom, averaging the overlapping
regions.
Parameters
----------
patches: array, shape = (n_patches, patch_height, patch_width) or
(n_patches, patch_height, patch_width, n_channels)
The complete set of patches. If the patches contain colour information,
channels are indexed along the last dimension: RGB patches would
have `n_channels=3`.
image_size: tuple of ints (image_height, image_width) or
(image_height, image_width, n_channels)
the size of the image that will be reconstructed
Returns
-------
image: array, shape = image_size
the reconstructed image
"""
i_h, i_w = image_size[:2]
p_h, p_w = patches.shape[1:3]
img = np.zeros(image_size)
# compute the dimensions of the patches array
n_h = i_h - p_h + 1
n_w = i_w - p_w + 1
for p, (i, j) in zip(patches, product(range(n_h), range(n_w))):
img[i:i + p_h, j:j + p_w] += p
for i in range(i_h):
for j in range(i_w):
# divide by the amount of overlap
# XXX: is this the most efficient way? memory-wise yes, cpu wise?
img[i, j] /= float(min(i + 1, p_h, i_h - i) *
min(j + 1, p_w, i_w - j))
return img
class PatchExtractor(BaseEstimator):
"""Extracts patches from a collection of images
Parameters
----------
patch_size: tuple of ints (patch_height, patch_width)
the dimensions of one patch
max_patches: integer or float, optional default is None
The maximum number of patches per image to extract. If max_patches is a
float in (0, 1), it is taken to mean a proportion of the total number
of patches.
random_state: int or RandomState
Pseudo number generator state used for random sampling.
"""
def __init__(self, patch_size=None, max_patches=None, random_state=None):
self.patch_size = patch_size
self.max_patches = max_patches
self.random_state = random_state
def fit(self, X, y=None):
"""Do nothing and return the estimator unchanged
This method is just there to implement the usual API and hence
work in pipelines.
"""
return self
def transform(self, X):
"""Transforms the image samples in X into a matrix of patch data.
Parameters
----------
X : array, shape = (n_samples, image_height, image_width) or
(n_samples, image_height, image_width, n_channels)
Array of images from which to extract patches. For color images,
the last dimension specifies the channel: a RGB image would have
`n_channels=3`.
Returns
-------
patches: array, shape = (n_patches, patch_height, patch_width) or
(n_patches, patch_height, patch_width, n_channels)
The collection of patches extracted from the images, where
`n_patches` is either `n_samples * max_patches` or the total
number of patches that can be extracted.
"""
self.random_state = check_random_state(self.random_state)
n_images, i_h, i_w = X.shape[:3]
X = np.reshape(X, (n_images, i_h, i_w, -1))
n_channels = X.shape[-1]
if self.patch_size is None:
patch_size = i_h // 10, i_w // 10
else:
patch_size = self.patch_size
# compute the dimensions of the patches array
p_h, p_w = patch_size
n_patches = _compute_n_patches(i_h, i_w, p_h, p_w, self.max_patches)
patches_shape = (n_images * n_patches,) + patch_size
if n_channels > 1:
patches_shape += (n_channels,)
# extract the patches
patches = np.empty(patches_shape)
for ii, image in enumerate(X):
patches[ii * n_patches:(ii + 1) * n_patches] = extract_patches_2d(
image, patch_size, self.max_patches, self.random_state)
return patches
| bsd-3-clause |
bdh1011/wau | venv/lib/python2.7/site-packages/pandas/tests/test_common.py | 1 | 35517 | # -*- coding: utf-8 -*-
import collections
from datetime import datetime
import re
import sys
import nose
from nose.tools import assert_equal
import numpy as np
from pandas.tslib import iNaT, NaT
from pandas import Series, DataFrame, date_range, DatetimeIndex, Timestamp, Float64Index
from pandas import compat
from pandas.compat import range, long, lrange, lmap, u
from pandas.core.common import notnull, isnull, array_equivalent
import pandas.core.common as com
import pandas.util.testing as tm
import pandas.core.config as cf
_multiprocess_can_split_ = True
def test_mut_exclusive():
msg = "mutually exclusive arguments: '[ab]' and '[ab]'"
with tm.assertRaisesRegexp(TypeError, msg):
com._mut_exclusive(a=1, b=2)
assert com._mut_exclusive(a=1, b=None) == 1
assert com._mut_exclusive(major=None, major_axis=None) is None
def test_is_sequence():
is_seq = com.is_sequence
assert(is_seq((1, 2)))
assert(is_seq([1, 2]))
assert(not is_seq("abcd"))
assert(not is_seq(u("abcd")))
assert(not is_seq(np.int64))
class A(object):
def __getitem__(self):
return 1
assert(not is_seq(A()))
def test_get_callable_name():
from functools import partial
getname = com._get_callable_name
def fn(x):
return x
lambda_ = lambda x: x
part1 = partial(fn)
part2 = partial(part1)
class somecall(object):
def __call__(self):
return x
assert getname(fn) == 'fn'
assert getname(lambda_)
assert getname(part1) == 'fn'
assert getname(part2) == 'fn'
assert getname(somecall()) == 'somecall'
assert getname(1) is None
def test_notnull():
assert notnull(1.)
assert not notnull(None)
assert not notnull(np.NaN)
with cf.option_context("mode.use_inf_as_null", False):
assert notnull(np.inf)
assert notnull(-np.inf)
arr = np.array([1.5, np.inf, 3.5, -np.inf])
result = notnull(arr)
assert result.all()
with cf.option_context("mode.use_inf_as_null", True):
assert not notnull(np.inf)
assert not notnull(-np.inf)
arr = np.array([1.5, np.inf, 3.5, -np.inf])
result = notnull(arr)
assert result.sum() == 2
with cf.option_context("mode.use_inf_as_null", False):
for s in [tm.makeFloatSeries(),tm.makeStringSeries(),
tm.makeObjectSeries(),tm.makeTimeSeries(),tm.makePeriodSeries()]:
assert(isinstance(isnull(s), Series))
def test_isnull():
assert not isnull(1.)
assert isnull(None)
assert isnull(np.NaN)
assert not isnull(np.inf)
assert not isnull(-np.inf)
# series
for s in [tm.makeFloatSeries(),tm.makeStringSeries(),
tm.makeObjectSeries(),tm.makeTimeSeries(),tm.makePeriodSeries()]:
assert(isinstance(isnull(s), Series))
# frame
for df in [tm.makeTimeDataFrame(),tm.makePeriodFrame(),tm.makeMixedDataFrame()]:
result = isnull(df)
expected = df.apply(isnull)
tm.assert_frame_equal(result, expected)
# panel
for p in [ tm.makePanel(), tm.makePeriodPanel(), tm.add_nans(tm.makePanel()) ]:
result = isnull(p)
expected = p.apply(isnull)
tm.assert_panel_equal(result, expected)
# panel 4d
for p in [ tm.makePanel4D(), tm.add_nans_panel4d(tm.makePanel4D()) ]:
result = isnull(p)
expected = p.apply(isnull)
tm.assert_panel4d_equal(result, expected)
def test_isnull_lists():
result = isnull([[False]])
exp = np.array([[False]])
assert(np.array_equal(result, exp))
result = isnull([[1], [2]])
exp = np.array([[False], [False]])
assert(np.array_equal(result, exp))
# list of strings / unicode
result = isnull(['foo', 'bar'])
assert(not result.any())
result = isnull([u('foo'), u('bar')])
assert(not result.any())
def test_isnull_nat():
result = isnull([NaT])
exp = np.array([True])
assert(np.array_equal(result, exp))
result = isnull(np.array([NaT], dtype=object))
exp = np.array([True])
assert(np.array_equal(result, exp))
def test_isnull_datetime():
assert (not isnull(datetime.now()))
assert notnull(datetime.now())
idx = date_range('1/1/1990', periods=20)
assert(notnull(idx).all())
idx = np.asarray(idx)
idx[0] = iNaT
idx = DatetimeIndex(idx)
mask = isnull(idx)
assert(mask[0])
assert(not mask[1:].any())
# GH 9129
pidx = idx.to_period(freq='M')
mask = isnull(pidx)
assert(mask[0])
assert(not mask[1:].any())
mask = isnull(pidx[1:])
assert(not mask.any())
class TestIsNull(tm.TestCase):
def test_0d_array(self):
self.assertTrue(isnull(np.array(np.nan)))
self.assertFalse(isnull(np.array(0.0)))
self.assertFalse(isnull(np.array(0)))
# test object dtype
self.assertTrue(isnull(np.array(np.nan, dtype=object)))
self.assertFalse(isnull(np.array(0.0, dtype=object)))
self.assertFalse(isnull(np.array(0, dtype=object)))
def test_downcast_conv():
# test downcasting
arr = np.array([8.5, 8.6, 8.7, 8.8, 8.9999999999995])
result = com._possibly_downcast_to_dtype(arr, 'infer')
assert (np.array_equal(result, arr))
arr = np.array([8., 8., 8., 8., 8.9999999999995])
result = com._possibly_downcast_to_dtype(arr, 'infer')
expected = np.array([8, 8, 8, 8, 9])
assert (np.array_equal(result, expected))
arr = np.array([8., 8., 8., 8., 9.0000000000005])
result = com._possibly_downcast_to_dtype(arr, 'infer')
expected = np.array([8, 8, 8, 8, 9])
assert (np.array_equal(result, expected))
# conversions
expected = np.array([1,2])
for dtype in [np.float64,object,np.int64]:
arr = np.array([1.0,2.0],dtype=dtype)
result = com._possibly_downcast_to_dtype(arr,'infer')
tm.assert_almost_equal(result, expected)
expected = np.array([1.0,2.0,np.nan])
for dtype in [np.float64,object]:
arr = np.array([1.0,2.0,np.nan],dtype=dtype)
result = com._possibly_downcast_to_dtype(arr,'infer')
tm.assert_almost_equal(result, expected)
# empties
for dtype in [np.int32,np.float64,np.float32,np.bool_,np.int64,object]:
arr = np.array([],dtype=dtype)
result = com._possibly_downcast_to_dtype(arr,'int64')
tm.assert_almost_equal(result, np.array([],dtype=np.int64))
assert result.dtype == np.int64
def test_array_equivalent():
assert array_equivalent(np.array([np.nan, np.nan]),
np.array([np.nan, np.nan]))
assert array_equivalent(np.array([np.nan, 1, np.nan]),
np.array([np.nan, 1, np.nan]))
assert array_equivalent(np.array([np.nan, None], dtype='object'),
np.array([np.nan, None], dtype='object'))
assert array_equivalent(np.array([np.nan, 1+1j], dtype='complex'),
np.array([np.nan, 1+1j], dtype='complex'))
assert not array_equivalent(np.array([np.nan, 1+1j], dtype='complex'),
np.array([np.nan, 1+2j], dtype='complex'))
assert not array_equivalent(np.array([np.nan, 1, np.nan]),
np.array([np.nan, 2, np.nan]))
assert not array_equivalent(np.array(['a', 'b', 'c', 'd']), np.array(['e', 'e']))
assert array_equivalent(Float64Index([0, np.nan]), Float64Index([0, np.nan]))
assert not array_equivalent(Float64Index([0, np.nan]), Float64Index([1, np.nan]))
assert array_equivalent(DatetimeIndex([0, np.nan]), DatetimeIndex([0, np.nan]))
assert not array_equivalent(DatetimeIndex([0, np.nan]), DatetimeIndex([1, np.nan]))
def test_datetimeindex_from_empty_datetime64_array():
for unit in [ 'ms', 'us', 'ns' ]:
idx = DatetimeIndex(np.array([], dtype='datetime64[%s]' % unit))
assert(len(idx) == 0)
def test_nan_to_nat_conversions():
df = DataFrame(dict({
'A' : np.asarray(lrange(10),dtype='float64'),
'B' : Timestamp('20010101') }))
df.iloc[3:6,:] = np.nan
result = df.loc[4,'B'].value
assert(result == iNaT)
s = df['B'].copy()
s._data = s._data.setitem(indexer=tuple([slice(8,9)]),value=np.nan)
assert(isnull(s[8]))
# numpy < 1.7.0 is wrong
from distutils.version import LooseVersion
if LooseVersion(np.__version__) >= '1.7.0':
assert(s[8].value == np.datetime64('NaT').astype(np.int64))
def test_any_none():
assert(com._any_none(1, 2, 3, None))
assert(not com._any_none(1, 2, 3, 4))
def test_all_not_none():
assert(com._all_not_none(1, 2, 3, 4))
assert(not com._all_not_none(1, 2, 3, None))
assert(not com._all_not_none(None, None, None, None))
def test_repr_binary_type():
import string
letters = string.ascii_letters
btype = compat.binary_type
try:
raw = btype(letters, encoding=cf.get_option('display.encoding'))
except TypeError:
raw = btype(letters)
b = compat.text_type(compat.bytes_to_str(raw))
res = com.pprint_thing(b, quote_strings=True)
assert_equal(res, repr(b))
res = com.pprint_thing(b, quote_strings=False)
assert_equal(res, b)
def test_adjoin():
data = [['a', 'b', 'c'],
['dd', 'ee', 'ff'],
['ggg', 'hhh', 'iii']]
expected = 'a dd ggg\nb ee hhh\nc ff iii'
adjoined = com.adjoin(2, *data)
assert(adjoined == expected)
def test_iterpairs():
data = [1, 2, 3, 4]
expected = [(1, 2),
(2, 3),
(3, 4)]
result = list(com.iterpairs(data))
assert(result == expected)
def test_split_ranges():
def _bin(x, width):
"return int(x) as a base2 string of given width"
return ''.join(str((x >> i) & 1) for i in range(width - 1, -1, -1))
def test_locs(mask):
nfalse = sum(np.array(mask) == 0)
remaining = 0
for s, e in com.split_ranges(mask):
remaining += e - s
assert 0 not in mask[s:e]
# make sure the total items covered by the ranges are a complete cover
assert remaining + nfalse == len(mask)
# exhaustively test all possible mask sequences of length 8
ncols = 8
for i in range(2 ** ncols):
cols = lmap(int, list(_bin(i, ncols))) # count up in base2
mask = [cols[i] == 1 for i in range(len(cols))]
test_locs(mask)
# base cases
test_locs([])
test_locs([0])
test_locs([1])
def test_indent():
s = 'a b c\nd e f'
result = com.indent(s, spaces=6)
assert(result == ' a b c\n d e f')
def test_banner():
ban = com.banner('hi')
assert(ban == ('%s\nhi\n%s' % ('=' * 80, '=' * 80)))
def test_map_indices_py():
data = [4, 3, 2, 1]
expected = {4: 0, 3: 1, 2: 2, 1: 3}
result = com.map_indices_py(data)
assert(result == expected)
def test_union():
a = [1, 2, 3]
b = [4, 5, 6]
union = sorted(com.union(a, b))
assert((a + b) == union)
def test_difference():
a = [1, 2, 3]
b = [1, 2, 3, 4, 5, 6]
inter = sorted(com.difference(b, a))
assert([4, 5, 6] == inter)
def test_intersection():
a = [1, 2, 3]
b = [1, 2, 3, 4, 5, 6]
inter = sorted(com.intersection(a, b))
assert(a == inter)
def test_groupby():
values = ['foo', 'bar', 'baz', 'baz2', 'qux', 'foo3']
expected = {'f': ['foo', 'foo3'],
'b': ['bar', 'baz', 'baz2'],
'q': ['qux']}
grouped = com.groupby(values, lambda x: x[0])
for k, v in grouped:
assert v == expected[k]
def test_is_list_like():
passes = ([], [1], (1,), (1, 2), {'a': 1}, set([1, 'a']), Series([1]),
Series([]), Series(['a']).str)
fails = (1, '2', object())
for p in passes:
assert com.is_list_like(p)
for f in fails:
assert not com.is_list_like(f)
def test_is_hashable():
# all new-style classes are hashable by default
class HashableClass(object):
pass
class UnhashableClass1(object):
__hash__ = None
class UnhashableClass2(object):
def __hash__(self):
raise TypeError("Not hashable")
hashable = (
1, 3.14, np.float64(3.14), 'a', tuple(), (1,), HashableClass(),
)
not_hashable = (
[], UnhashableClass1(),
)
abc_hashable_not_really_hashable = (
([],), UnhashableClass2(),
)
for i in hashable:
assert com.is_hashable(i)
for i in not_hashable:
assert not com.is_hashable(i)
for i in abc_hashable_not_really_hashable:
assert not com.is_hashable(i)
# numpy.array is no longer collections.Hashable as of
# https://github.com/numpy/numpy/pull/5326, just test
# pandas.common.is_hashable()
assert not com.is_hashable(np.array([]))
# old-style classes in Python 2 don't appear hashable to
# collections.Hashable but also seem to support hash() by default
if sys.version_info[0] == 2:
class OldStyleClass():
pass
c = OldStyleClass()
assert not isinstance(c, collections.Hashable)
assert com.is_hashable(c)
hash(c) # this will not raise
def test_ensure_int32():
values = np.arange(10, dtype=np.int32)
result = com._ensure_int32(values)
assert(result.dtype == np.int32)
values = np.arange(10, dtype=np.int64)
result = com._ensure_int32(values)
assert(result.dtype == np.int32)
def test_ensure_platform_int():
# verify that when we create certain types of indices
# they remain the correct type under platform conversions
from pandas.core.index import Int64Index
# int64
x = Int64Index([1, 2, 3], dtype='int64')
assert(x.dtype == np.int64)
pi = com._ensure_platform_int(x)
assert(pi.dtype == np.int_)
# int32
x = Int64Index([1, 2, 3], dtype='int32')
assert(x.dtype == np.int32)
pi = com._ensure_platform_int(x)
assert(pi.dtype == np.int_)
# TODO: fix this broken test
# def test_console_encode():
# """
# On Python 2, if sys.stdin.encoding is None (IPython with zmq frontend)
# common.console_encode should encode things as utf-8.
# """
# if compat.PY3:
# raise nose.SkipTest
# with tm.stdin_encoding(encoding=None):
# result = com.console_encode(u"\u05d0")
# expected = u"\u05d0".encode('utf-8')
# assert (result == expected)
def test_is_re():
passes = re.compile('ad'),
fails = 'x', 2, 3, object()
for p in passes:
assert com.is_re(p)
for f in fails:
assert not com.is_re(f)
def test_is_recompilable():
passes = (r'a', u('x'), r'asdf', re.compile('adsf'),
u(r'\u2233\s*'), re.compile(r''))
fails = 1, [], object()
for p in passes:
assert com.is_re_compilable(p)
for f in fails:
assert not com.is_re_compilable(f)
def test_random_state():
import numpy.random as npr
# Check with seed
state = com._random_state(5)
assert_equal(state.uniform(), npr.RandomState(5).uniform())
# Check with random state object
state2 = npr.RandomState(10)
assert_equal(com._random_state(state2).uniform(), npr.RandomState(10).uniform())
# check with no arg random state
assert isinstance(com._random_state(), npr.RandomState)
# Error for floats or strings
with tm.assertRaises(ValueError):
com._random_state('test')
with tm.assertRaises(ValueError):
com._random_state(5.5)
def test_maybe_match_name():
matched = com._maybe_match_name(Series([1], name='x'), Series([2], name='x'))
assert(matched == 'x')
matched = com._maybe_match_name(Series([1], name='x'), Series([2], name='y'))
assert(matched is None)
matched = com._maybe_match_name(Series([1]), Series([2], name='x'))
assert(matched is None)
matched = com._maybe_match_name(Series([1], name='x'), Series([2]))
assert(matched is None)
matched = com._maybe_match_name(Series([1], name='x'), [2])
assert(matched == 'x')
matched = com._maybe_match_name([1], Series([2], name='y'))
assert(matched == 'y')
class TestTake(tm.TestCase):
# standard incompatible fill error
fill_error = re.compile("Incompatible type for fill_value")
_multiprocess_can_split_ = True
def test_1d_with_out(self):
def _test_dtype(dtype, can_hold_na):
data = np.random.randint(0, 2, 4).astype(dtype)
indexer = [2, 1, 0, 1]
out = np.empty(4, dtype=dtype)
com.take_1d(data, indexer, out=out)
expected = data.take(indexer)
tm.assert_almost_equal(out, expected)
indexer = [2, 1, 0, -1]
out = np.empty(4, dtype=dtype)
if can_hold_na:
com.take_1d(data, indexer, out=out)
expected = data.take(indexer)
expected[3] = np.nan
tm.assert_almost_equal(out, expected)
else:
with tm.assertRaisesRegexp(TypeError, self.fill_error):
com.take_1d(data, indexer, out=out)
# no exception o/w
data.take(indexer, out=out)
_test_dtype(np.float64, True)
_test_dtype(np.float32, True)
_test_dtype(np.uint64, False)
_test_dtype(np.uint32, False)
_test_dtype(np.uint16, False)
_test_dtype(np.uint8, False)
_test_dtype(np.int64, False)
_test_dtype(np.int32, False)
_test_dtype(np.int16, False)
_test_dtype(np.int8, False)
_test_dtype(np.object_, True)
_test_dtype(np.bool, False)
def test_1d_fill_nonna(self):
def _test_dtype(dtype, fill_value, out_dtype):
data = np.random.randint(0, 2, 4).astype(dtype)
indexer = [2, 1, 0, -1]
result = com.take_1d(data, indexer, fill_value=fill_value)
assert((result[[0, 1, 2]] == data[[2, 1, 0]]).all())
assert(result[3] == fill_value)
assert(result.dtype == out_dtype)
indexer = [2, 1, 0, 1]
result = com.take_1d(data, indexer, fill_value=fill_value)
assert((result[[0, 1, 2, 3]] == data[indexer]).all())
assert(result.dtype == dtype)
_test_dtype(np.int8, np.int16(127), np.int8)
_test_dtype(np.int8, np.int16(128), np.int16)
_test_dtype(np.int32, 1, np.int32)
_test_dtype(np.int32, 2.0, np.float64)
_test_dtype(np.int32, 3.0 + 4.0j, np.complex128)
_test_dtype(np.int32, True, np.object_)
_test_dtype(np.int32, '', np.object_)
_test_dtype(np.float64, 1, np.float64)
_test_dtype(np.float64, 2.0, np.float64)
_test_dtype(np.float64, 3.0 + 4.0j, np.complex128)
_test_dtype(np.float64, True, np.object_)
_test_dtype(np.float64, '', np.object_)
_test_dtype(np.complex128, 1, np.complex128)
_test_dtype(np.complex128, 2.0, np.complex128)
_test_dtype(np.complex128, 3.0 + 4.0j, np.complex128)
_test_dtype(np.complex128, True, np.object_)
_test_dtype(np.complex128, '', np.object_)
_test_dtype(np.bool_, 1, np.object_)
_test_dtype(np.bool_, 2.0, np.object_)
_test_dtype(np.bool_, 3.0 + 4.0j, np.object_)
_test_dtype(np.bool_, True, np.bool_)
_test_dtype(np.bool_, '', np.object_)
def test_2d_with_out(self):
def _test_dtype(dtype, can_hold_na, writeable=True):
data = np.random.randint(0, 2, (5, 3)).astype(dtype)
data.flags.writeable = writeable
indexer = [2, 1, 0, 1]
out0 = np.empty((4, 3), dtype=dtype)
out1 = np.empty((5, 4), dtype=dtype)
com.take_nd(data, indexer, out=out0, axis=0)
com.take_nd(data, indexer, out=out1, axis=1)
expected0 = data.take(indexer, axis=0)
expected1 = data.take(indexer, axis=1)
tm.assert_almost_equal(out0, expected0)
tm.assert_almost_equal(out1, expected1)
indexer = [2, 1, 0, -1]
out0 = np.empty((4, 3), dtype=dtype)
out1 = np.empty((5, 4), dtype=dtype)
if can_hold_na:
com.take_nd(data, indexer, out=out0, axis=0)
com.take_nd(data, indexer, out=out1, axis=1)
expected0 = data.take(indexer, axis=0)
expected1 = data.take(indexer, axis=1)
expected0[3, :] = np.nan
expected1[:, 3] = np.nan
tm.assert_almost_equal(out0, expected0)
tm.assert_almost_equal(out1, expected1)
else:
for i, out in enumerate([out0, out1]):
with tm.assertRaisesRegexp(TypeError, self.fill_error):
com.take_nd(data, indexer, out=out, axis=i)
# no exception o/w
data.take(indexer, out=out, axis=i)
for writeable in [True, False]:
# Check that take_nd works both with writeable arrays (in which
# case fast typed memoryviews implementation) and read-only
# arrays alike.
_test_dtype(np.float64, True, writeable=writeable)
_test_dtype(np.float32, True, writeable=writeable)
_test_dtype(np.uint64, False, writeable=writeable)
_test_dtype(np.uint32, False, writeable=writeable)
_test_dtype(np.uint16, False, writeable=writeable)
_test_dtype(np.uint8, False, writeable=writeable)
_test_dtype(np.int64, False, writeable=writeable)
_test_dtype(np.int32, False, writeable=writeable)
_test_dtype(np.int16, False, writeable=writeable)
_test_dtype(np.int8, False, writeable=writeable)
_test_dtype(np.object_, True, writeable=writeable)
_test_dtype(np.bool, False, writeable=writeable)
def test_2d_fill_nonna(self):
def _test_dtype(dtype, fill_value, out_dtype):
data = np.random.randint(0, 2, (5, 3)).astype(dtype)
indexer = [2, 1, 0, -1]
result = com.take_nd(data, indexer, axis=0, fill_value=fill_value)
assert((result[[0, 1, 2], :] == data[[2, 1, 0], :]).all())
assert((result[3, :] == fill_value).all())
assert(result.dtype == out_dtype)
result = com.take_nd(data, indexer, axis=1, fill_value=fill_value)
assert((result[:, [0, 1, 2]] == data[:, [2, 1, 0]]).all())
assert((result[:, 3] == fill_value).all())
assert(result.dtype == out_dtype)
indexer = [2, 1, 0, 1]
result = com.take_nd(data, indexer, axis=0, fill_value=fill_value)
assert((result[[0, 1, 2, 3], :] == data[indexer, :]).all())
assert(result.dtype == dtype)
result = com.take_nd(data, indexer, axis=1, fill_value=fill_value)
assert((result[:, [0, 1, 2, 3]] == data[:, indexer]).all())
assert(result.dtype == dtype)
_test_dtype(np.int8, np.int16(127), np.int8)
_test_dtype(np.int8, np.int16(128), np.int16)
_test_dtype(np.int32, 1, np.int32)
_test_dtype(np.int32, 2.0, np.float64)
_test_dtype(np.int32, 3.0 + 4.0j, np.complex128)
_test_dtype(np.int32, True, np.object_)
_test_dtype(np.int32, '', np.object_)
_test_dtype(np.float64, 1, np.float64)
_test_dtype(np.float64, 2.0, np.float64)
_test_dtype(np.float64, 3.0 + 4.0j, np.complex128)
_test_dtype(np.float64, True, np.object_)
_test_dtype(np.float64, '', np.object_)
_test_dtype(np.complex128, 1, np.complex128)
_test_dtype(np.complex128, 2.0, np.complex128)
_test_dtype(np.complex128, 3.0 + 4.0j, np.complex128)
_test_dtype(np.complex128, True, np.object_)
_test_dtype(np.complex128, '', np.object_)
_test_dtype(np.bool_, 1, np.object_)
_test_dtype(np.bool_, 2.0, np.object_)
_test_dtype(np.bool_, 3.0 + 4.0j, np.object_)
_test_dtype(np.bool_, True, np.bool_)
_test_dtype(np.bool_, '', np.object_)
def test_3d_with_out(self):
def _test_dtype(dtype, can_hold_na):
data = np.random.randint(0, 2, (5, 4, 3)).astype(dtype)
indexer = [2, 1, 0, 1]
out0 = np.empty((4, 4, 3), dtype=dtype)
out1 = np.empty((5, 4, 3), dtype=dtype)
out2 = np.empty((5, 4, 4), dtype=dtype)
com.take_nd(data, indexer, out=out0, axis=0)
com.take_nd(data, indexer, out=out1, axis=1)
com.take_nd(data, indexer, out=out2, axis=2)
expected0 = data.take(indexer, axis=0)
expected1 = data.take(indexer, axis=1)
expected2 = data.take(indexer, axis=2)
tm.assert_almost_equal(out0, expected0)
tm.assert_almost_equal(out1, expected1)
tm.assert_almost_equal(out2, expected2)
indexer = [2, 1, 0, -1]
out0 = np.empty((4, 4, 3), dtype=dtype)
out1 = np.empty((5, 4, 3), dtype=dtype)
out2 = np.empty((5, 4, 4), dtype=dtype)
if can_hold_na:
com.take_nd(data, indexer, out=out0, axis=0)
com.take_nd(data, indexer, out=out1, axis=1)
com.take_nd(data, indexer, out=out2, axis=2)
expected0 = data.take(indexer, axis=0)
expected1 = data.take(indexer, axis=1)
expected2 = data.take(indexer, axis=2)
expected0[3, :, :] = np.nan
expected1[:, 3, :] = np.nan
expected2[:, :, 3] = np.nan
tm.assert_almost_equal(out0, expected0)
tm.assert_almost_equal(out1, expected1)
tm.assert_almost_equal(out2, expected2)
else:
for i, out in enumerate([out0, out1, out2]):
with tm.assertRaisesRegexp(TypeError, self.fill_error):
com.take_nd(data, indexer, out=out, axis=i)
# no exception o/w
data.take(indexer, out=out, axis=i)
_test_dtype(np.float64, True)
_test_dtype(np.float32, True)
_test_dtype(np.uint64, False)
_test_dtype(np.uint32, False)
_test_dtype(np.uint16, False)
_test_dtype(np.uint8, False)
_test_dtype(np.int64, False)
_test_dtype(np.int32, False)
_test_dtype(np.int16, False)
_test_dtype(np.int8, False)
_test_dtype(np.object_, True)
_test_dtype(np.bool, False)
def test_3d_fill_nonna(self):
def _test_dtype(dtype, fill_value, out_dtype):
data = np.random.randint(0, 2, (5, 4, 3)).astype(dtype)
indexer = [2, 1, 0, -1]
result = com.take_nd(data, indexer, axis=0, fill_value=fill_value)
assert((result[[0, 1, 2], :, :] == data[[2, 1, 0], :, :]).all())
assert((result[3, :, :] == fill_value).all())
assert(result.dtype == out_dtype)
result = com.take_nd(data, indexer, axis=1, fill_value=fill_value)
assert((result[:, [0, 1, 2], :] == data[:, [2, 1, 0], :]).all())
assert((result[:, 3, :] == fill_value).all())
assert(result.dtype == out_dtype)
result = com.take_nd(data, indexer, axis=2, fill_value=fill_value)
assert((result[:, :, [0, 1, 2]] == data[:, :, [2, 1, 0]]).all())
assert((result[:, :, 3] == fill_value).all())
assert(result.dtype == out_dtype)
indexer = [2, 1, 0, 1]
result = com.take_nd(data, indexer, axis=0, fill_value=fill_value)
assert((result[[0, 1, 2, 3], :, :] == data[indexer, :, :]).all())
assert(result.dtype == dtype)
result = com.take_nd(data, indexer, axis=1, fill_value=fill_value)
assert((result[:, [0, 1, 2, 3], :] == data[:, indexer, :]).all())
assert(result.dtype == dtype)
result = com.take_nd(data, indexer, axis=2, fill_value=fill_value)
assert((result[:, :, [0, 1, 2, 3]] == data[:, :, indexer]).all())
assert(result.dtype == dtype)
_test_dtype(np.int8, np.int16(127), np.int8)
_test_dtype(np.int8, np.int16(128), np.int16)
_test_dtype(np.int32, 1, np.int32)
_test_dtype(np.int32, 2.0, np.float64)
_test_dtype(np.int32, 3.0 + 4.0j, np.complex128)
_test_dtype(np.int32, True, np.object_)
_test_dtype(np.int32, '', np.object_)
_test_dtype(np.float64, 1, np.float64)
_test_dtype(np.float64, 2.0, np.float64)
_test_dtype(np.float64, 3.0 + 4.0j, np.complex128)
_test_dtype(np.float64, True, np.object_)
_test_dtype(np.float64, '', np.object_)
_test_dtype(np.complex128, 1, np.complex128)
_test_dtype(np.complex128, 2.0, np.complex128)
_test_dtype(np.complex128, 3.0 + 4.0j, np.complex128)
_test_dtype(np.complex128, True, np.object_)
_test_dtype(np.complex128, '', np.object_)
_test_dtype(np.bool_, 1, np.object_)
_test_dtype(np.bool_, 2.0, np.object_)
_test_dtype(np.bool_, 3.0 + 4.0j, np.object_)
_test_dtype(np.bool_, True, np.bool_)
_test_dtype(np.bool_, '', np.object_)
def test_1d_other_dtypes(self):
arr = np.random.randn(10).astype(np.float32)
indexer = [1, 2, 3, -1]
result = com.take_1d(arr, indexer)
expected = arr.take(indexer)
expected[-1] = np.nan
tm.assert_almost_equal(result, expected)
def test_2d_other_dtypes(self):
arr = np.random.randn(10, 5).astype(np.float32)
indexer = [1, 2, 3, -1]
# axis=0
result = com.take_nd(arr, indexer, axis=0)
expected = arr.take(indexer, axis=0)
expected[-1] = np.nan
tm.assert_almost_equal(result, expected)
# axis=1
result = com.take_nd(arr, indexer, axis=1)
expected = arr.take(indexer, axis=1)
expected[:, -1] = np.nan
tm.assert_almost_equal(result, expected)
def test_1d_bool(self):
arr = np.array([0, 1, 0], dtype=bool)
result = com.take_1d(arr, [0, 2, 2, 1])
expected = arr.take([0, 2, 2, 1])
self.assert_numpy_array_equal(result, expected)
result = com.take_1d(arr, [0, 2, -1])
self.assertEqual(result.dtype, np.object_)
def test_2d_bool(self):
arr = np.array([[0, 1, 0],
[1, 0, 1],
[0, 1, 1]], dtype=bool)
result = com.take_nd(arr, [0, 2, 2, 1])
expected = arr.take([0, 2, 2, 1], axis=0)
self.assert_numpy_array_equal(result, expected)
result = com.take_nd(arr, [0, 2, 2, 1], axis=1)
expected = arr.take([0, 2, 2, 1], axis=1)
self.assert_numpy_array_equal(result, expected)
result = com.take_nd(arr, [0, 2, -1])
self.assertEqual(result.dtype, np.object_)
def test_2d_float32(self):
arr = np.random.randn(4, 3).astype(np.float32)
indexer = [0, 2, -1, 1, -1]
# axis=0
result = com.take_nd(arr, indexer, axis=0)
result2 = np.empty_like(result)
com.take_nd(arr, indexer, axis=0, out=result2)
tm.assert_almost_equal(result, result2)
expected = arr.take(indexer, axis=0)
expected[[2, 4], :] = np.nan
tm.assert_almost_equal(result, expected)
#### this now accepts a float32! # test with float64 out buffer
out = np.empty((len(indexer), arr.shape[1]), dtype='float32')
com.take_nd(arr, indexer, out=out) # it works!
# axis=1
result = com.take_nd(arr, indexer, axis=1)
result2 = np.empty_like(result)
com.take_nd(arr, indexer, axis=1, out=result2)
tm.assert_almost_equal(result, result2)
expected = arr.take(indexer, axis=1)
expected[:, [2, 4]] = np.nan
tm.assert_almost_equal(result, expected)
def test_2d_datetime64(self):
# 2005/01/01 - 2006/01/01
arr = np.random.randint(long(11045376), long(11360736), (5,3))*100000000000
arr = arr.view(dtype='datetime64[ns]')
indexer = [0, 2, -1, 1, -1]
# axis=0
result = com.take_nd(arr, indexer, axis=0)
result2 = np.empty_like(result)
com.take_nd(arr, indexer, axis=0, out=result2)
tm.assert_almost_equal(result, result2)
expected = arr.take(indexer, axis=0)
expected.view(np.int64)[[2, 4], :] = iNaT
tm.assert_almost_equal(result, expected)
result = com.take_nd(arr, indexer, axis=0,
fill_value=datetime(2007, 1, 1))
result2 = np.empty_like(result)
com.take_nd(arr, indexer, out=result2, axis=0,
fill_value=datetime(2007, 1, 1))
tm.assert_almost_equal(result, result2)
expected = arr.take(indexer, axis=0)
expected[[2, 4], :] = datetime(2007, 1, 1)
tm.assert_almost_equal(result, expected)
# axis=1
result = com.take_nd(arr, indexer, axis=1)
result2 = np.empty_like(result)
com.take_nd(arr, indexer, axis=1, out=result2)
tm.assert_almost_equal(result, result2)
expected = arr.take(indexer, axis=1)
expected.view(np.int64)[:, [2, 4]] = iNaT
tm.assert_almost_equal(result, expected)
result = com.take_nd(arr, indexer, axis=1,
fill_value=datetime(2007, 1, 1))
result2 = np.empty_like(result)
com.take_nd(arr, indexer, out=result2, axis=1,
fill_value=datetime(2007, 1, 1))
tm.assert_almost_equal(result, result2)
expected = arr.take(indexer, axis=1)
expected[:, [2, 4]] = datetime(2007, 1, 1)
tm.assert_almost_equal(result, expected)
class TestMaybe(tm.TestCase):
def test_maybe_convert_string_to_array(self):
result = com._maybe_convert_string_to_object('x')
tm.assert_numpy_array_equal(result, np.array(['x'], dtype=object))
self.assertTrue(result.dtype == object)
result = com._maybe_convert_string_to_object(1)
self.assertEqual(result, 1)
arr = np.array(['x', 'y'], dtype=str)
result = com._maybe_convert_string_to_object(arr)
tm.assert_numpy_array_equal(result, np.array(['x', 'y'], dtype=object))
self.assertTrue(result.dtype == object)
# unicode
arr = np.array(['x', 'y']).astype('U')
result = com._maybe_convert_string_to_object(arr)
tm.assert_numpy_array_equal(result, np.array(['x', 'y'], dtype=object))
self.assertTrue(result.dtype == object)
# object
arr = np.array(['x', 2], dtype=object)
result = com._maybe_convert_string_to_object(arr)
tm.assert_numpy_array_equal(result, np.array(['x', 2], dtype=object))
self.assertTrue(result.dtype == object)
def test_dict_compat():
data_datetime64 = {np.datetime64('1990-03-15'): 1,
np.datetime64('2015-03-15'): 2}
data_unchanged = {1: 2, 3: 4, 5: 6}
expected = {Timestamp('1990-3-15'): 1, Timestamp('2015-03-15'): 2}
assert(com._dict_compat(data_datetime64) == expected)
assert(com._dict_compat(expected) == expected)
assert(com._dict_compat(data_unchanged) == data_unchanged)
if __name__ == '__main__':
nose.runmodule(argv=[__file__, '-vvs', '-x', '--pdb', '--pdb-failure'],
exit=False)
| mit |
leesavide/pythonista-docs | Documentation/matplotlib/examples/user_interfaces/embedding_in_tk.py | 9 | 1419 | #!/usr/bin/env python
import matplotlib
matplotlib.use('TkAgg')
from numpy import arange, sin, pi
from matplotlib.backends.backend_tkagg import FigureCanvasTkAgg, NavigationToolbar2TkAgg
# implement the default mpl key bindings
from matplotlib.backend_bases import key_press_handler
from matplotlib.figure import Figure
import sys
if sys.version_info[0] < 3:
import Tkinter as Tk
else:
import tkinter as Tk
root = Tk.Tk()
root.wm_title("Embedding in TK")
f = Figure(figsize=(5,4), dpi=100)
a = f.add_subplot(111)
t = arange(0.0,3.0,0.01)
s = sin(2*pi*t)
a.plot(t,s)
# a tk.DrawingArea
canvas = FigureCanvasTkAgg(f, master=root)
canvas.show()
canvas.get_tk_widget().pack(side=Tk.TOP, fill=Tk.BOTH, expand=1)
toolbar = NavigationToolbar2TkAgg( canvas, root )
toolbar.update()
canvas._tkcanvas.pack(side=Tk.TOP, fill=Tk.BOTH, expand=1)
def on_key_event(event):
print('you pressed %s'%event.key)
key_press_handler(event, canvas, toolbar)
canvas.mpl_connect('key_press_event', on_key_event)
def _quit():
root.quit() # stops mainloop
root.destroy() # this is necessary on Windows to prevent
# Fatal Python Error: PyEval_RestoreThread: NULL tstate
button = Tk.Button(master=root, text='Quit', command=_quit)
button.pack(side=Tk.BOTTOM)
Tk.mainloop()
# If you put root.destroy() here, it will cause an error if
# the window is closed with the window manager.
| apache-2.0 |
kenshay/ImageScripter | ProgramData/SystemFiles/Python/Lib/site-packages/pandas/io/stata.py | 7 | 82769 | """
Module contains tools for processing Stata files into DataFrames
The StataReader below was originally written by Joe Presbrey as part of PyDTA.
It has been extended and improved by Skipper Seabold from the Statsmodels
project who also developed the StataWriter and was finally added to pandas in
a once again improved version.
You can find more information on http://presbrey.mit.edu/PyDTA and
http://www.statsmodels.org/devel/
"""
import numpy as np
import sys
import struct
from dateutil.relativedelta import relativedelta
from pandas.types.common import (is_categorical_dtype, is_datetime64_dtype,
_ensure_object)
from pandas.core.base import StringMixin
from pandas.core.categorical import Categorical
from pandas.core.frame import DataFrame
from pandas.core.series import Series
import datetime
from pandas import compat, to_timedelta, to_datetime, isnull, DatetimeIndex
from pandas.compat import lrange, lmap, lzip, text_type, string_types, range, \
zip, BytesIO
from pandas.util.decorators import Appender
import pandas as pd
from pandas.io.common import get_filepath_or_buffer, BaseIterator
from pandas.lib import max_len_string_array, infer_dtype
from pandas.tslib import NaT, Timestamp
_version_error = ("Version of given Stata file is not 104, 105, 108, "
"111 (Stata 7SE), 113 (Stata 8/9), 114 (Stata 10/11), "
"115 (Stata 12), 117 (Stata 13), or 118 (Stata 14)")
_statafile_processing_params1 = """\
convert_dates : boolean, defaults to True
Convert date variables to DataFrame time values
convert_categoricals : boolean, defaults to True
Read value labels and convert columns to Categorical/Factor variables"""
_encoding_params = """\
encoding : string, None or encoding
Encoding used to parse the files. None defaults to iso-8859-1."""
_statafile_processing_params2 = """\
index : identifier of index column
identifier of column that should be used as index of the DataFrame
convert_missing : boolean, defaults to False
Flag indicating whether to convert missing values to their Stata
representations. If False, missing values are replaced with nans.
If True, columns containing missing values are returned with
object data types and missing values are represented by
StataMissingValue objects.
preserve_dtypes : boolean, defaults to True
Preserve Stata datatypes. If False, numeric data are upcast to pandas
default types for foreign data (float64 or int64)
columns : list or None
Columns to retain. Columns will be returned in the given order. None
returns all columns
order_categoricals : boolean, defaults to True
Flag indicating whether converted categorical data are ordered."""
_chunksize_params = """\
chunksize : int, default None
Return StataReader object for iterations, returns chunks with
given number of lines"""
_iterator_params = """\
iterator : boolean, default False
Return StataReader object"""
_read_stata_doc = """Read Stata file into DataFrame
Parameters
----------
filepath_or_buffer : string or file-like object
Path to .dta file or object implementing a binary read() functions
%s
%s
%s
%s
%s
Returns
-------
DataFrame or StataReader
Examples
--------
Read a Stata dta file:
>>> df = pandas.read_stata('filename.dta')
Read a Stata dta file in 10,000 line chunks:
>>> itr = pandas.read_stata('filename.dta', chunksize=10000)
>>> for chunk in itr:
>>> do_something(chunk)
""" % (_statafile_processing_params1, _encoding_params,
_statafile_processing_params2, _chunksize_params,
_iterator_params)
_data_method_doc = """Reads observations from Stata file, converting them into a dataframe
This is a legacy method. Use `read` in new code.
Parameters
----------
%s
%s
Returns
-------
DataFrame
""" % (_statafile_processing_params1, _statafile_processing_params2)
_read_method_doc = """\
Reads observations from Stata file, converting them into a dataframe
Parameters
----------
nrows : int
Number of lines to read from data file, if None read whole file.
%s
%s
Returns
-------
DataFrame
""" % (_statafile_processing_params1, _statafile_processing_params2)
_stata_reader_doc = """\
Class for reading Stata dta files.
Parameters
----------
path_or_buf : string or file-like object
Path to .dta file or object implementing a binary read() functions
%s
%s
%s
%s
""" % (_statafile_processing_params1, _statafile_processing_params2,
_encoding_params, _chunksize_params)
@Appender(_read_stata_doc)
def read_stata(filepath_or_buffer, convert_dates=True,
convert_categoricals=True, encoding=None, index=None,
convert_missing=False, preserve_dtypes=True, columns=None,
order_categoricals=True, chunksize=None, iterator=False):
reader = StataReader(filepath_or_buffer,
convert_dates=convert_dates,
convert_categoricals=convert_categoricals,
index=index, convert_missing=convert_missing,
preserve_dtypes=preserve_dtypes,
columns=columns,
order_categoricals=order_categoricals,
chunksize=chunksize, encoding=encoding)
if iterator or chunksize:
data = reader
else:
data = reader.read()
reader.close()
return data
_date_formats = ["%tc", "%tC", "%td", "%d", "%tw", "%tm", "%tq", "%th", "%ty"]
stata_epoch = datetime.datetime(1960, 1, 1)
def _stata_elapsed_date_to_datetime_vec(dates, fmt):
"""
Convert from SIF to datetime. http://www.stata.com/help.cgi?datetime
Parameters
----------
dates : Series
The Stata Internal Format date to convert to datetime according to fmt
fmt : str
The format to convert to. Can be, tc, td, tw, tm, tq, th, ty
Returns
Returns
-------
converted : Series
The converted dates
Examples
--------
>>> import pandas as pd
>>> dates = pd.Series([52])
>>> _stata_elapsed_date_to_datetime_vec(dates , "%tw")
0 1961-01-01
dtype: datetime64[ns]
Notes
-----
datetime/c - tc
milliseconds since 01jan1960 00:00:00.000, assuming 86,400 s/day
datetime/C - tC - NOT IMPLEMENTED
milliseconds since 01jan1960 00:00:00.000, adjusted for leap seconds
date - td
days since 01jan1960 (01jan1960 = 0)
weekly date - tw
weeks since 1960w1
This assumes 52 weeks in a year, then adds 7 * remainder of the weeks.
The datetime value is the start of the week in terms of days in the
year, not ISO calendar weeks.
monthly date - tm
months since 1960m1
quarterly date - tq
quarters since 1960q1
half-yearly date - th
half-years since 1960h1 yearly
date - ty
years since 0000
If you don't have pandas with datetime support, then you can't do
milliseconds accurately.
"""
MIN_YEAR, MAX_YEAR = Timestamp.min.year, Timestamp.max.year
MAX_DAY_DELTA = (Timestamp.max - datetime.datetime(1960, 1, 1)).days
MIN_DAY_DELTA = (Timestamp.min - datetime.datetime(1960, 1, 1)).days
MIN_MS_DELTA = MIN_DAY_DELTA * 24 * 3600 * 1000
MAX_MS_DELTA = MAX_DAY_DELTA * 24 * 3600 * 1000
def convert_year_month_safe(year, month):
"""
Convert year and month to datetimes, using pandas vectorized versions
when the date range falls within the range supported by pandas. Other
wise it falls back to a slower but more robust method using datetime.
"""
if year.max() < MAX_YEAR and year.min() > MIN_YEAR:
return to_datetime(100 * year + month, format='%Y%m')
else:
index = getattr(year, 'index', None)
return Series(
[datetime.datetime(y, m, 1) for y, m in zip(year, month)],
index=index)
def convert_year_days_safe(year, days):
"""
Converts year (e.g. 1999) and days since the start of the year to a
datetime or datetime64 Series
"""
if year.max() < (MAX_YEAR - 1) and year.min() > MIN_YEAR:
return (to_datetime(year, format='%Y') +
to_timedelta(days, unit='d'))
else:
index = getattr(year, 'index', None)
value = [datetime.datetime(y, 1, 1) + relativedelta(days=int(d))
for y, d in zip(year, days)]
return Series(value, index=index)
def convert_delta_safe(base, deltas, unit):
"""
Convert base dates and deltas to datetimes, using pandas vectorized
versions if the deltas satisfy restrictions required to be expressed
as dates in pandas.
"""
index = getattr(deltas, 'index', None)
if unit == 'd':
if deltas.max() > MAX_DAY_DELTA or deltas.min() < MIN_DAY_DELTA:
values = [base + relativedelta(days=int(d)) for d in deltas]
return Series(values, index=index)
elif unit == 'ms':
if deltas.max() > MAX_MS_DELTA or deltas.min() < MIN_MS_DELTA:
values = [base + relativedelta(microseconds=(int(d) * 1000))
for d in deltas]
return Series(values, index=index)
else:
raise ValueError('format not understood')
base = to_datetime(base)
deltas = to_timedelta(deltas, unit=unit)
return base + deltas
# TODO: If/when pandas supports more than datetime64[ns], this should be
# improved to use correct range, e.g. datetime[Y] for yearly
bad_locs = np.isnan(dates)
has_bad_values = False
if bad_locs.any():
has_bad_values = True
data_col = Series(dates)
data_col[bad_locs] = 1.0 # Replace with NaT
dates = dates.astype(np.int64)
if fmt in ["%tc", "tc"]: # Delta ms relative to base
base = stata_epoch
ms = dates
conv_dates = convert_delta_safe(base, ms, 'ms')
elif fmt in ["%tC", "tC"]:
from warnings import warn
warn("Encountered %tC format. Leaving in Stata Internal Format.")
conv_dates = Series(dates, dtype=np.object)
if has_bad_values:
conv_dates[bad_locs] = pd.NaT
return conv_dates
elif fmt in ["%td", "td", "%d", "d"]: # Delta days relative to base
base = stata_epoch
days = dates
conv_dates = convert_delta_safe(base, days, 'd')
elif fmt in ["%tw", "tw"]: # does not count leap days - 7 days is a week
year = stata_epoch.year + dates // 52
days = (dates % 52) * 7
conv_dates = convert_year_days_safe(year, days)
elif fmt in ["%tm", "tm"]: # Delta months relative to base
year = stata_epoch.year + dates // 12
month = (dates % 12) + 1
conv_dates = convert_year_month_safe(year, month)
elif fmt in ["%tq", "tq"]: # Delta quarters relative to base
year = stata_epoch.year + dates // 4
month = (dates % 4) * 3 + 1
conv_dates = convert_year_month_safe(year, month)
elif fmt in ["%th", "th"]: # Delta half-years relative to base
year = stata_epoch.year + dates // 2
month = (dates % 2) * 6 + 1
conv_dates = convert_year_month_safe(year, month)
elif fmt in ["%ty", "ty"]: # Years -- not delta
year = dates
month = np.ones_like(dates)
conv_dates = convert_year_month_safe(year, month)
else:
raise ValueError("Date fmt %s not understood" % fmt)
if has_bad_values: # Restore NaT for bad values
conv_dates[bad_locs] = NaT
return conv_dates
def _datetime_to_stata_elapsed_vec(dates, fmt):
"""
Convert from datetime to SIF. http://www.stata.com/help.cgi?datetime
Parameters
----------
dates : Series
Series or array containing datetime.datetime or datetime64[ns] to
convert to the Stata Internal Format given by fmt
fmt : str
The format to convert to. Can be, tc, td, tw, tm, tq, th, ty
"""
index = dates.index
NS_PER_DAY = 24 * 3600 * 1000 * 1000 * 1000
US_PER_DAY = NS_PER_DAY / 1000
def parse_dates_safe(dates, delta=False, year=False, days=False):
d = {}
if is_datetime64_dtype(dates.values):
if delta:
delta = dates - stata_epoch
d['delta'] = delta.values.astype(
np.int64) // 1000 # microseconds
if days or year:
dates = DatetimeIndex(dates)
d['year'], d['month'] = dates.year, dates.month
if days:
days = (dates.astype(np.int64) -
to_datetime(d['year'], format='%Y').astype(np.int64))
d['days'] = days // NS_PER_DAY
elif infer_dtype(dates) == 'datetime':
if delta:
delta = dates.values - stata_epoch
f = lambda x: \
US_PER_DAY * x.days + 1000000 * x.seconds + x.microseconds
v = np.vectorize(f)
d['delta'] = v(delta)
if year:
year_month = dates.apply(lambda x: 100 * x.year + x.month)
d['year'] = year_month.values // 100
d['month'] = (year_month.values - d['year'] * 100)
if days:
f = lambda x: (x - datetime.datetime(x.year, 1, 1)).days
v = np.vectorize(f)
d['days'] = v(dates)
else:
raise ValueError('Columns containing dates must contain either '
'datetime64, datetime.datetime or null values.')
return DataFrame(d, index=index)
bad_loc = isnull(dates)
index = dates.index
if bad_loc.any():
dates = Series(dates)
if is_datetime64_dtype(dates):
dates[bad_loc] = to_datetime(stata_epoch)
else:
dates[bad_loc] = stata_epoch
if fmt in ["%tc", "tc"]:
d = parse_dates_safe(dates, delta=True)
conv_dates = d.delta / 1000
elif fmt in ["%tC", "tC"]:
from warnings import warn
warn("Stata Internal Format tC not supported.")
conv_dates = dates
elif fmt in ["%td", "td"]:
d = parse_dates_safe(dates, delta=True)
conv_dates = d.delta // US_PER_DAY
elif fmt in ["%tw", "tw"]:
d = parse_dates_safe(dates, year=True, days=True)
conv_dates = (52 * (d.year - stata_epoch.year) + d.days // 7)
elif fmt in ["%tm", "tm"]:
d = parse_dates_safe(dates, year=True)
conv_dates = (12 * (d.year - stata_epoch.year) + d.month - 1)
elif fmt in ["%tq", "tq"]:
d = parse_dates_safe(dates, year=True)
conv_dates = 4 * (d.year - stata_epoch.year) + (d.month - 1) // 3
elif fmt in ["%th", "th"]:
d = parse_dates_safe(dates, year=True)
conv_dates = 2 * (d.year - stata_epoch.year) + \
(d.month > 6).astype(np.int)
elif fmt in ["%ty", "ty"]:
d = parse_dates_safe(dates, year=True)
conv_dates = d.year
else:
raise ValueError("Format %s is not a known Stata date format" % fmt)
conv_dates = Series(conv_dates, dtype=np.float64)
missing_value = struct.unpack('<d', b'\x00\x00\x00\x00\x00\x00\xe0\x7f')[0]
conv_dates[bad_loc] = missing_value
return Series(conv_dates, index=index)
excessive_string_length_error = """
Fixed width strings in Stata .dta files are limited to 244 (or fewer)
characters. Column '%s' does not satisfy this restriction.
"""
class PossiblePrecisionLoss(Warning):
pass
precision_loss_doc = """
Column converted from %s to %s, and some data are outside of the lossless
conversion range. This may result in a loss of precision in the saved data.
"""
class ValueLabelTypeMismatch(Warning):
pass
value_label_mismatch_doc = """
Stata value labels (pandas categories) must be strings. Column {0} contains
non-string labels which will be converted to strings. Please check that the
Stata data file created has not lost information due to duplicate labels.
"""
class InvalidColumnName(Warning):
pass
invalid_name_doc = """
Not all pandas column names were valid Stata variable names.
The following replacements have been made:
{0}
If this is not what you expect, please make sure you have Stata-compliant
column names in your DataFrame (strings only, max 32 characters, only
alphanumerics and underscores, no Stata reserved words)
"""
def _cast_to_stata_types(data):
"""Checks the dtypes of the columns of a pandas DataFrame for
compatibility with the data types and ranges supported by Stata, and
converts if necessary.
Parameters
----------
data : DataFrame
The DataFrame to check and convert
Notes
-----
Numeric columns in Stata must be one of int8, int16, int32, float32 or
float64, with some additional value restrictions. int8 and int16 columns
are checked for violations of the value restrictions and upcast if needed.
int64 data is not usable in Stata, and so it is downcast to int32 whenever
the value are in the int32 range, and sidecast to float64 when larger than
this range. If the int64 values are outside of the range of those
perfectly representable as float64 values, a warning is raised.
bool columns are cast to int8. uint colums are converted to int of the
same size if there is no loss in precision, other wise are upcast to a
larger type. uint64 is currently not supported since it is concerted to
object in a DataFrame.
"""
ws = ''
# original, if small, if large
conversion_data = ((np.bool, np.int8, np.int8),
(np.uint8, np.int8, np.int16),
(np.uint16, np.int16, np.int32),
(np.uint32, np.int32, np.int64))
float32_max = struct.unpack('<f', b'\xff\xff\xff\x7e')[0]
float64_max = struct.unpack('<d', b'\xff\xff\xff\xff\xff\xff\xdf\x7f')[0]
for col in data:
dtype = data[col].dtype
# Cast from unsupported types to supported types
for c_data in conversion_data:
if dtype == c_data[0]:
if data[col].max() <= np.iinfo(c_data[1]).max:
dtype = c_data[1]
else:
dtype = c_data[2]
if c_data[2] == np.float64: # Warn if necessary
if data[col].max() >= 2 ** 53:
ws = precision_loss_doc % ('uint64', 'float64')
data[col] = data[col].astype(dtype)
# Check values and upcast if necessary
if dtype == np.int8:
if data[col].max() > 100 or data[col].min() < -127:
data[col] = data[col].astype(np.int16)
elif dtype == np.int16:
if data[col].max() > 32740 or data[col].min() < -32767:
data[col] = data[col].astype(np.int32)
elif dtype == np.int64:
if (data[col].max() <= 2147483620 and
data[col].min() >= -2147483647):
data[col] = data[col].astype(np.int32)
else:
data[col] = data[col].astype(np.float64)
if data[col].max() >= 2 ** 53 or data[col].min() <= -2 ** 53:
ws = precision_loss_doc % ('int64', 'float64')
elif dtype in (np.float32, np.float64):
value = data[col].max()
if np.isinf(value):
msg = 'Column {0} has a maximum value of infinity which is ' \
'outside the range supported by Stata.'
raise ValueError(msg.format(col))
if dtype == np.float32 and value > float32_max:
data[col] = data[col].astype(np.float64)
elif dtype == np.float64:
if value > float64_max:
msg = 'Column {0} has a maximum value ({1}) outside the ' \
'range supported by Stata ({1})'
raise ValueError(msg.format(col, value, float64_max))
if ws:
import warnings
warnings.warn(ws, PossiblePrecisionLoss)
return data
class StataValueLabel(object):
"""
Parse a categorical column and prepare formatted output
Parameters
-----------
value : int8, int16, int32, float32 or float64
The Stata missing value code
Attributes
----------
string : string
String representation of the Stata missing value
value : int8, int16, int32, float32 or float64
The original encoded missing value
Methods
-------
generate_value_label
"""
def __init__(self, catarray):
self.labname = catarray.name
categories = catarray.cat.categories
self.value_labels = list(zip(np.arange(len(categories)), categories))
self.value_labels.sort(key=lambda x: x[0])
self.text_len = np.int32(0)
self.off = []
self.val = []
self.txt = []
self.n = 0
# Compute lengths and setup lists of offsets and labels
for vl in self.value_labels:
category = vl[1]
if not isinstance(category, string_types):
category = str(category)
import warnings
warnings.warn(value_label_mismatch_doc.format(catarray.name),
ValueLabelTypeMismatch)
self.off.append(self.text_len)
self.text_len += len(category) + 1 # +1 for the padding
self.val.append(vl[0])
self.txt.append(category)
self.n += 1
if self.text_len > 32000:
raise ValueError('Stata value labels for a single variable must '
'have a combined length less than 32,000 '
'characters.')
# Ensure int32
self.off = np.array(self.off, dtype=np.int32)
self.val = np.array(self.val, dtype=np.int32)
# Total length
self.len = 4 + 4 + 4 * self.n + 4 * self.n + self.text_len
def _encode(self, s):
"""
Python 3 compatability shim
"""
if compat.PY3:
return s.encode(self._encoding)
else:
return s
def generate_value_label(self, byteorder, encoding):
"""
Parameters
----------
byteorder : str
Byte order of the output
encoding : str
File encoding
Returns
-------
value_label : bytes
Bytes containing the formatted value label
"""
self._encoding = encoding
bio = BytesIO()
null_string = '\x00'
null_byte = b'\x00'
# len
bio.write(struct.pack(byteorder + 'i', self.len))
# labname
labname = self._encode(_pad_bytes(self.labname[:32], 33))
bio.write(labname)
# padding - 3 bytes
for i in range(3):
bio.write(struct.pack('c', null_byte))
# value_label_table
# n - int32
bio.write(struct.pack(byteorder + 'i', self.n))
# textlen - int32
bio.write(struct.pack(byteorder + 'i', self.text_len))
# off - int32 array (n elements)
for offset in self.off:
bio.write(struct.pack(byteorder + 'i', offset))
# val - int32 array (n elements)
for value in self.val:
bio.write(struct.pack(byteorder + 'i', value))
# txt - Text labels, null terminated
for text in self.txt:
bio.write(self._encode(text + null_string))
bio.seek(0)
return bio.read()
class StataMissingValue(StringMixin):
"""
An observation's missing value.
Parameters
-----------
value : int8, int16, int32, float32 or float64
The Stata missing value code
Attributes
----------
string : string
String representation of the Stata missing value
value : int8, int16, int32, float32 or float64
The original encoded missing value
Notes
-----
More information: <http://www.stata.com/help.cgi?missing>
Integer missing values make the code '.', '.a', ..., '.z' to the ranges
101 ... 127 (for int8), 32741 ... 32767 (for int16) and 2147483621 ...
2147483647 (for int32). Missing values for floating point data types are
more complex but the pattern is simple to discern from the following table.
np.float32 missing values (float in Stata)
0000007f .
0008007f .a
0010007f .b
...
00c0007f .x
00c8007f .y
00d0007f .z
np.float64 missing values (double in Stata)
000000000000e07f .
000000000001e07f .a
000000000002e07f .b
...
000000000018e07f .x
000000000019e07f .y
00000000001ae07f .z
"""
# Construct a dictionary of missing values
MISSING_VALUES = {}
bases = (101, 32741, 2147483621)
for b in bases:
# Conversion to long to avoid hash issues on 32 bit platforms #8968
MISSING_VALUES[compat.long(b)] = '.'
for i in range(1, 27):
MISSING_VALUES[compat.long(i + b)] = '.' + chr(96 + i)
float32_base = b'\x00\x00\x00\x7f'
increment = struct.unpack('<i', b'\x00\x08\x00\x00')[0]
for i in range(27):
value = struct.unpack('<f', float32_base)[0]
MISSING_VALUES[value] = '.'
if i > 0:
MISSING_VALUES[value] += chr(96 + i)
int_value = struct.unpack('<i', struct.pack('<f', value))[
0] + increment
float32_base = struct.pack('<i', int_value)
float64_base = b'\x00\x00\x00\x00\x00\x00\xe0\x7f'
increment = struct.unpack('q', b'\x00\x00\x00\x00\x00\x01\x00\x00')[0]
for i in range(27):
value = struct.unpack('<d', float64_base)[0]
MISSING_VALUES[value] = '.'
if i > 0:
MISSING_VALUES[value] += chr(96 + i)
int_value = struct.unpack('q', struct.pack('<d', value))[0] + increment
float64_base = struct.pack('q', int_value)
BASE_MISSING_VALUES = {'int8': 101,
'int16': 32741,
'int32': 2147483621,
'float32': struct.unpack('<f', float32_base)[0],
'float64': struct.unpack('<d', float64_base)[0]}
def __init__(self, value):
self._value = value
# Conversion to long to avoid hash issues on 32 bit platforms #8968
value = compat.long(value) if value < 2147483648 else float(value)
self._str = self.MISSING_VALUES[value]
string = property(lambda self: self._str,
doc="The Stata representation of the missing value: "
"'.', '.a'..'.z'")
value = property(lambda self: self._value,
doc='The binary representation of the missing value.')
def __unicode__(self):
return self.string
def __repr__(self):
# not perfect :-/
return "%s(%s)" % (self.__class__, self)
def __eq__(self, other):
return (isinstance(other, self.__class__) and
self.string == other.string and self.value == other.value)
@classmethod
def get_base_missing_value(cls, dtype):
if dtype == np.int8:
value = cls.BASE_MISSING_VALUES['int8']
elif dtype == np.int16:
value = cls.BASE_MISSING_VALUES['int16']
elif dtype == np.int32:
value = cls.BASE_MISSING_VALUES['int32']
elif dtype == np.float32:
value = cls.BASE_MISSING_VALUES['float32']
elif dtype == np.float64:
value = cls.BASE_MISSING_VALUES['float64']
else:
raise ValueError('Unsupported dtype')
return value
class StataParser(object):
_default_encoding = 'iso-8859-1'
def __init__(self, encoding):
self._encoding = encoding
# type code.
# --------------------
# str1 1 = 0x01
# str2 2 = 0x02
# ...
# str244 244 = 0xf4
# byte 251 = 0xfb (sic)
# int 252 = 0xfc
# long 253 = 0xfd
# float 254 = 0xfe
# double 255 = 0xff
# --------------------
# NOTE: the byte type seems to be reserved for categorical variables
# with a label, but the underlying variable is -127 to 100
# we're going to drop the label and cast to int
self.DTYPE_MAP = \
dict(
lzip(range(1, 245), ['a' + str(i) for i in range(1, 245)]) +
[
(251, np.int8),
(252, np.int16),
(253, np.int32),
(254, np.float32),
(255, np.float64)
]
)
self.DTYPE_MAP_XML = \
dict(
[
(32768, np.uint8), # Keys to GSO
(65526, np.float64),
(65527, np.float32),
(65528, np.int32),
(65529, np.int16),
(65530, np.int8)
]
)
self.TYPE_MAP = lrange(251) + list('bhlfd')
self.TYPE_MAP_XML = \
dict(
[
# Not really a Q, unclear how to handle byteswap
(32768, 'Q'),
(65526, 'd'),
(65527, 'f'),
(65528, 'l'),
(65529, 'h'),
(65530, 'b')
]
)
# NOTE: technically, some of these are wrong. there are more numbers
# that can be represented. it's the 27 ABOVE and BELOW the max listed
# numeric data type in [U] 12.2.2 of the 11.2 manual
float32_min = b'\xff\xff\xff\xfe'
float32_max = b'\xff\xff\xff\x7e'
float64_min = b'\xff\xff\xff\xff\xff\xff\xef\xff'
float64_max = b'\xff\xff\xff\xff\xff\xff\xdf\x7f'
self.VALID_RANGE = {
'b': (-127, 100),
'h': (-32767, 32740),
'l': (-2147483647, 2147483620),
'f': (np.float32(struct.unpack('<f', float32_min)[0]),
np.float32(struct.unpack('<f', float32_max)[0])),
'd': (np.float64(struct.unpack('<d', float64_min)[0]),
np.float64(struct.unpack('<d', float64_max)[0]))
}
self.OLD_TYPE_MAPPING = {
98: 251, # byte
105: 252, # int
108: 253, # long
102: 254 # float
# don't know old code for double
}
# These missing values are the generic '.' in Stata, and are used
# to replace nans
self.MISSING_VALUES = {
'b': 101,
'h': 32741,
'l': 2147483621,
'f': np.float32(struct.unpack('<f', b'\x00\x00\x00\x7f')[0]),
'd': np.float64(
struct.unpack('<d', b'\x00\x00\x00\x00\x00\x00\xe0\x7f')[0])
}
self.NUMPY_TYPE_MAP = {
'b': 'i1',
'h': 'i2',
'l': 'i4',
'f': 'f4',
'd': 'f8',
'Q': 'u8'
}
# Reserved words cannot be used as variable names
self.RESERVED_WORDS = ('aggregate', 'array', 'boolean', 'break',
'byte', 'case', 'catch', 'class', 'colvector',
'complex', 'const', 'continue', 'default',
'delegate', 'delete', 'do', 'double', 'else',
'eltypedef', 'end', 'enum', 'explicit',
'export', 'external', 'float', 'for', 'friend',
'function', 'global', 'goto', 'if', 'inline',
'int', 'local', 'long', 'NULL', 'pragma',
'protected', 'quad', 'rowvector', 'short',
'typedef', 'typename', 'virtual')
class StataReader(StataParser, BaseIterator):
__doc__ = _stata_reader_doc
def __init__(self, path_or_buf, convert_dates=True,
convert_categoricals=True, index=None,
convert_missing=False, preserve_dtypes=True,
columns=None, order_categoricals=True,
encoding='iso-8859-1', chunksize=None):
super(StataReader, self).__init__(encoding)
self.col_sizes = ()
# Arguments to the reader (can be temporarily overridden in
# calls to read).
self._convert_dates = convert_dates
self._convert_categoricals = convert_categoricals
self._index = index
self._convert_missing = convert_missing
self._preserve_dtypes = preserve_dtypes
self._columns = columns
self._order_categoricals = order_categoricals
self._encoding = encoding
self._chunksize = chunksize
# State variables for the file
self._has_string_data = False
self._missing_values = False
self._can_read_value_labels = False
self._column_selector_set = False
self._value_labels_read = False
self._data_read = False
self._dtype = None
self._lines_read = 0
self._native_byteorder = _set_endianness(sys.byteorder)
if isinstance(path_or_buf, str):
path_or_buf, encoding, _ = get_filepath_or_buffer(
path_or_buf, encoding=self._default_encoding
)
if isinstance(path_or_buf, (str, compat.text_type, bytes)):
self.path_or_buf = open(path_or_buf, 'rb')
else:
# Copy to BytesIO, and ensure no encoding
contents = path_or_buf.read()
try:
contents = contents.encode(self._default_encoding)
except:
pass
self.path_or_buf = BytesIO(contents)
self._read_header()
def __enter__(self):
""" enter context manager """
return self
def __exit__(self, exc_type, exc_value, traceback):
""" exit context manager """
self.close()
def close(self):
""" close the handle if its open """
try:
self.path_or_buf.close()
except IOError:
pass
def _read_header(self):
first_char = self.path_or_buf.read(1)
if struct.unpack('c', first_char)[0] == b'<':
self._read_new_header(first_char)
else:
self._read_old_header(first_char)
self.has_string_data = len([x for x in self.typlist
if type(x) is int]) > 0
# calculate size of a data record
self.col_sizes = lmap(lambda x: self._calcsize(x), self.typlist)
# remove format details from %td
self.fmtlist = ["%td" if x.startswith("%td") else x
for x in self.fmtlist]
def _read_new_header(self, first_char):
# The first part of the header is common to 117 and 118.
self.path_or_buf.read(27) # stata_dta><header><release>
self.format_version = int(self.path_or_buf.read(3))
if self.format_version not in [117, 118]:
raise ValueError(_version_error)
self.path_or_buf.read(21) # </release><byteorder>
self.byteorder = self.path_or_buf.read(3) == "MSF" and '>' or '<'
self.path_or_buf.read(15) # </byteorder><K>
self.nvar = struct.unpack(self.byteorder + 'H',
self.path_or_buf.read(2))[0]
self.path_or_buf.read(7) # </K><N>
self.nobs = self._get_nobs()
self.path_or_buf.read(11) # </N><label>
self.data_label = self._get_data_label()
self.path_or_buf.read(19) # </label><timestamp>
self.time_stamp = self._get_time_stamp()
self.path_or_buf.read(26) # </timestamp></header><map>
self.path_or_buf.read(8) # 0x0000000000000000
self.path_or_buf.read(8) # position of <map>
self._seek_vartypes = struct.unpack(
self.byteorder + 'q', self.path_or_buf.read(8))[0] + 16
self._seek_varnames = struct.unpack(
self.byteorder + 'q', self.path_or_buf.read(8))[0] + 10
self._seek_sortlist = struct.unpack(
self.byteorder + 'q', self.path_or_buf.read(8))[0] + 10
self._seek_formats = struct.unpack(
self.byteorder + 'q', self.path_or_buf.read(8))[0] + 9
self._seek_value_label_names = struct.unpack(
self.byteorder + 'q', self.path_or_buf.read(8))[0] + 19
# Requires version-specific treatment
self._seek_variable_labels = self._get_seek_variable_labels()
self.path_or_buf.read(8) # <characteristics>
self.data_location = struct.unpack(
self.byteorder + 'q', self.path_or_buf.read(8))[0] + 6
self.seek_strls = struct.unpack(
self.byteorder + 'q', self.path_or_buf.read(8))[0] + 7
self.seek_value_labels = struct.unpack(
self.byteorder + 'q', self.path_or_buf.read(8))[0] + 14
self.typlist, self.dtyplist = self._get_dtypes(self._seek_vartypes)
self.path_or_buf.seek(self._seek_varnames)
self.varlist = self._get_varlist()
self.path_or_buf.seek(self._seek_sortlist)
self.srtlist = struct.unpack(
self.byteorder + ('h' * (self.nvar + 1)),
self.path_or_buf.read(2 * (self.nvar + 1))
)[:-1]
self.path_or_buf.seek(self._seek_formats)
self.fmtlist = self._get_fmtlist()
self.path_or_buf.seek(self._seek_value_label_names)
self.lbllist = self._get_lbllist()
self.path_or_buf.seek(self._seek_variable_labels)
self._variable_labels = self._get_variable_labels()
# Get data type information, works for versions 117-118.
def _get_dtypes(self, seek_vartypes):
self.path_or_buf.seek(seek_vartypes)
raw_typlist = [struct.unpack(self.byteorder + 'H',
self.path_or_buf.read(2))[0]
for i in range(self.nvar)]
def f(typ):
if typ <= 2045:
return typ
try:
return self.TYPE_MAP_XML[typ]
except KeyError:
raise ValueError("cannot convert stata types [{0}]".
format(typ))
typlist = [f(x) for x in raw_typlist]
def f(typ):
if typ <= 2045:
return str(typ)
try:
return self.DTYPE_MAP_XML[typ]
except KeyError:
raise ValueError("cannot convert stata dtype [{0}]"
.format(typ))
dtyplist = [f(x) for x in raw_typlist]
return typlist, dtyplist
def _get_varlist(self):
if self.format_version == 117:
b = 33
elif self.format_version == 118:
b = 129
return [self._null_terminate(self.path_or_buf.read(b))
for i in range(self.nvar)]
# Returns the format list
def _get_fmtlist(self):
if self.format_version == 118:
b = 57
elif self.format_version > 113:
b = 49
elif self.format_version > 104:
b = 12
else:
b = 7
return [self._null_terminate(self.path_or_buf.read(b))
for i in range(self.nvar)]
# Returns the label list
def _get_lbllist(self):
if self.format_version >= 118:
b = 129
elif self.format_version > 108:
b = 33
else:
b = 9
return [self._null_terminate(self.path_or_buf.read(b))
for i in range(self.nvar)]
def _get_variable_labels(self):
if self.format_version == 118:
vlblist = [self._decode(self.path_or_buf.read(321))
for i in range(self.nvar)]
elif self.format_version > 105:
vlblist = [self._null_terminate(self.path_or_buf.read(81))
for i in range(self.nvar)]
else:
vlblist = [self._null_terminate(self.path_or_buf.read(32))
for i in range(self.nvar)]
return vlblist
def _get_nobs(self):
if self.format_version == 118:
return struct.unpack(self.byteorder + 'Q',
self.path_or_buf.read(8))[0]
else:
return struct.unpack(self.byteorder + 'I',
self.path_or_buf.read(4))[0]
def _get_data_label(self):
if self.format_version == 118:
strlen = struct.unpack(self.byteorder + 'H',
self.path_or_buf.read(2))[0]
return self._decode(self.path_or_buf.read(strlen))
elif self.format_version == 117:
strlen = struct.unpack('b', self.path_or_buf.read(1))[0]
return self._null_terminate(self.path_or_buf.read(strlen))
elif self.format_version > 105:
return self._null_terminate(self.path_or_buf.read(81))
else:
return self._null_terminate(self.path_or_buf.read(32))
def _get_time_stamp(self):
if self.format_version == 118:
strlen = struct.unpack('b', self.path_or_buf.read(1))[0]
return self.path_or_buf.read(strlen).decode("utf-8")
elif self.format_version == 117:
strlen = struct.unpack('b', self.path_or_buf.read(1))[0]
return self._null_terminate(self.path_or_buf.read(strlen))
elif self.format_version > 104:
return self._null_terminate(self.path_or_buf.read(18))
else:
raise ValueError()
def _get_seek_variable_labels(self):
if self.format_version == 117:
self.path_or_buf.read(8) # <variable_lables>, throw away
# Stata 117 data files do not follow the described format. This is
# a work around that uses the previous label, 33 bytes for each
# variable, 20 for the closing tag and 17 for the opening tag
return self._seek_value_label_names + (33 * self.nvar) + 20 + 17
elif self.format_version == 118:
return struct.unpack(self.byteorder + 'q',
self.path_or_buf.read(8))[0] + 17
else:
raise ValueError()
def _read_old_header(self, first_char):
self.format_version = struct.unpack('b', first_char)[0]
if self.format_version not in [104, 105, 108, 111, 113, 114, 115]:
raise ValueError(_version_error)
self.byteorder = struct.unpack('b', self.path_or_buf.read(1))[
0] == 0x1 and '>' or '<'
self.filetype = struct.unpack('b', self.path_or_buf.read(1))[0]
self.path_or_buf.read(1) # unused
self.nvar = struct.unpack(self.byteorder + 'H',
self.path_or_buf.read(2))[0]
self.nobs = self._get_nobs()
self.data_label = self._get_data_label()
self.time_stamp = self._get_time_stamp()
# descriptors
if self.format_version > 108:
typlist = [ord(self.path_or_buf.read(1))
for i in range(self.nvar)]
else:
buf = self.path_or_buf.read(self.nvar)
typlistb = np.frombuffer(buf, dtype=np.uint8)
typlist = []
for tp in typlistb:
if tp in self.OLD_TYPE_MAPPING:
typlist.append(self.OLD_TYPE_MAPPING[tp])
else:
typlist.append(tp - 127) # py2 string, py3 bytes
try:
self.typlist = [self.TYPE_MAP[typ] for typ in typlist]
except:
raise ValueError("cannot convert stata types [{0}]"
.format(','.join(str(x) for x in typlist)))
try:
self.dtyplist = [self.DTYPE_MAP[typ] for typ in typlist]
except:
raise ValueError("cannot convert stata dtypes [{0}]"
.format(','.join(str(x) for x in typlist)))
if self.format_version > 108:
self.varlist = [self._null_terminate(self.path_or_buf.read(33))
for i in range(self.nvar)]
else:
self.varlist = [self._null_terminate(self.path_or_buf.read(9))
for i in range(self.nvar)]
self.srtlist = struct.unpack(
self.byteorder + ('h' * (self.nvar + 1)),
self.path_or_buf.read(2 * (self.nvar + 1))
)[:-1]
self.fmtlist = self._get_fmtlist()
self.lbllist = self._get_lbllist()
self._variable_labels = self._get_variable_labels()
# ignore expansion fields (Format 105 and later)
# When reading, read five bytes; the last four bytes now tell you
# the size of the next read, which you discard. You then continue
# like this until you read 5 bytes of zeros.
if self.format_version > 104:
while True:
data_type = struct.unpack(self.byteorder + 'b',
self.path_or_buf.read(1))[0]
if self.format_version > 108:
data_len = struct.unpack(self.byteorder + 'i',
self.path_or_buf.read(4))[0]
else:
data_len = struct.unpack(self.byteorder + 'h',
self.path_or_buf.read(2))[0]
if data_type == 0:
break
self.path_or_buf.read(data_len)
# necessary data to continue parsing
self.data_location = self.path_or_buf.tell()
def _calcsize(self, fmt):
return (type(fmt) is int and fmt or
struct.calcsize(self.byteorder + fmt))
def _decode(self, s):
s = s.partition(b"\0")[0]
return s.decode('utf-8')
def _null_terminate(self, s):
if compat.PY3 or self._encoding is not None:
# have bytes not strings, so must decode
s = s.partition(b"\0")[0]
return s.decode(self._encoding or self._default_encoding)
else:
null_byte = "\0"
try:
return s.lstrip(null_byte)[:s.index(null_byte)]
except:
return s
def _read_value_labels(self):
if self.format_version <= 108:
# Value labels are not supported in version 108 and earlier.
return
if self._value_labels_read:
# Don't read twice
return
if self.format_version >= 117:
self.path_or_buf.seek(self.seek_value_labels)
else:
offset = self.nobs * self._dtype.itemsize
self.path_or_buf.seek(self.data_location + offset)
self._value_labels_read = True
self.value_label_dict = dict()
while True:
if self.format_version >= 117:
if self.path_or_buf.read(5) == b'</val': # <lbl>
break # end of value label table
slength = self.path_or_buf.read(4)
if not slength:
break # end of value label table (format < 117)
if self.format_version <= 117:
labname = self._null_terminate(self.path_or_buf.read(33))
else:
labname = self._decode(self.path_or_buf.read(129))
self.path_or_buf.read(3) # padding
n = struct.unpack(self.byteorder + 'I',
self.path_or_buf.read(4))[0]
txtlen = struct.unpack(self.byteorder + 'I',
self.path_or_buf.read(4))[0]
off = np.frombuffer(self.path_or_buf.read(4 * n),
dtype=self.byteorder + "i4",
count=n)
val = np.frombuffer(self.path_or_buf.read(4 * n),
dtype=self.byteorder + "i4",
count=n)
ii = np.argsort(off)
off = off[ii]
val = val[ii]
txt = self.path_or_buf.read(txtlen)
self.value_label_dict[labname] = dict()
for i in range(n):
end = off[i + 1] if i < n - 1 else txtlen
if self.format_version <= 117:
self.value_label_dict[labname][val[i]] = (
self._null_terminate(txt[off[i]:end]))
else:
self.value_label_dict[labname][val[i]] = (
self._decode(txt[off[i]:end]))
if self.format_version >= 117:
self.path_or_buf.read(6) # </lbl>
self._value_labels_read = True
def _read_strls(self):
self.path_or_buf.seek(self.seek_strls)
self.GSO = {0: ''}
while True:
if self.path_or_buf.read(3) != b'GSO':
break
if self.format_version == 117:
v_o = struct.unpack(self.byteorder + 'Q',
self.path_or_buf.read(8))[0]
else:
buf = self.path_or_buf.read(12)
# Only tested on little endian file on little endian machine.
if self.byteorder == '<':
buf = buf[0:2] + buf[4:10]
else:
buf = buf[0:2] + buf[6:]
v_o = struct.unpack('Q', buf)[0]
typ = struct.unpack('B', self.path_or_buf.read(1))[0]
length = struct.unpack(self.byteorder + 'I',
self.path_or_buf.read(4))[0]
va = self.path_or_buf.read(length)
if typ == 130:
encoding = 'utf-8'
if self.format_version == 117:
encoding = self._encoding or self._default_encoding
va = va[0:-1].decode(encoding)
self.GSO[v_o] = va
# legacy
@Appender('DEPRECATED: ' + _data_method_doc)
def data(self, **kwargs):
import warnings
warnings.warn("'data' is deprecated, use 'read' instead")
if self._data_read:
raise Exception("Data has already been read.")
self._data_read = True
return self.read(None, **kwargs)
def __next__(self):
return self.read(nrows=self._chunksize or 1)
def get_chunk(self, size=None):
"""
Reads lines from Stata file and returns as dataframe
Parameters
----------
size : int, defaults to None
Number of lines to read. If None, reads whole file.
Returns
-------
DataFrame
"""
if size is None:
size = self._chunksize
return self.read(nrows=size)
@Appender(_read_method_doc)
def read(self, nrows=None, convert_dates=None,
convert_categoricals=None, index=None,
convert_missing=None, preserve_dtypes=None,
columns=None, order_categoricals=None):
# Handle empty file or chunk. If reading incrementally raise
# StopIteration. If reading the whole thing return an empty
# data frame.
if (self.nobs == 0) and (nrows is None):
self._can_read_value_labels = True
self._data_read = True
self.close()
return DataFrame(columns=self.varlist)
# Handle options
if convert_dates is None:
convert_dates = self._convert_dates
if convert_categoricals is None:
convert_categoricals = self._convert_categoricals
if convert_missing is None:
convert_missing = self._convert_missing
if preserve_dtypes is None:
preserve_dtypes = self._preserve_dtypes
if columns is None:
columns = self._columns
if order_categoricals is None:
order_categoricals = self._order_categoricals
if nrows is None:
nrows = self.nobs
if (self.format_version >= 117) and (self._dtype is None):
self._can_read_value_labels = True
self._read_strls()
# Setup the dtype.
if self._dtype is None:
dtype = [] # Convert struct data types to numpy data type
for i, typ in enumerate(self.typlist):
if typ in self.NUMPY_TYPE_MAP:
dtype.append(('s' + str(i), self.byteorder +
self.NUMPY_TYPE_MAP[typ]))
else:
dtype.append(('s' + str(i), 'S' + str(typ)))
dtype = np.dtype(dtype)
self._dtype = dtype
# Read data
dtype = self._dtype
max_read_len = (self.nobs - self._lines_read) * dtype.itemsize
read_len = nrows * dtype.itemsize
read_len = min(read_len, max_read_len)
if read_len <= 0:
# Iterator has finished, should never be here unless
# we are reading the file incrementally
if convert_categoricals:
self._read_value_labels()
self.close()
raise StopIteration
offset = self._lines_read * dtype.itemsize
self.path_or_buf.seek(self.data_location + offset)
read_lines = min(nrows, self.nobs - self._lines_read)
data = np.frombuffer(self.path_or_buf.read(read_len), dtype=dtype,
count=read_lines)
self._lines_read += read_lines
if self._lines_read == self.nobs:
self._can_read_value_labels = True
self._data_read = True
# if necessary, swap the byte order to native here
if self.byteorder != self._native_byteorder:
data = data.byteswap().newbyteorder()
if convert_categoricals:
self._read_value_labels()
if len(data) == 0:
data = DataFrame(columns=self.varlist, index=index)
else:
data = DataFrame.from_records(data, index=index)
data.columns = self.varlist
# If index is not specified, use actual row number rather than
# restarting at 0 for each chunk.
if index is None:
ix = np.arange(self._lines_read - read_lines, self._lines_read)
data = data.set_index(ix)
if columns is not None:
try:
data = self._do_select_columns(data, columns)
except ValueError:
self.close()
raise
# Decode strings
for col, typ in zip(data, self.typlist):
if type(typ) is int:
data[col] = data[col].apply(
self._null_terminate, convert_dtype=True)
data = self._insert_strls(data)
cols_ = np.where(self.dtyplist)[0]
# Convert columns (if needed) to match input type
index = data.index
requires_type_conversion = False
data_formatted = []
for i in cols_:
if self.dtyplist[i] is not None:
col = data.columns[i]
dtype = data[col].dtype
if dtype != np.dtype(object) and dtype != self.dtyplist[i]:
requires_type_conversion = True
data_formatted.append(
(col, Series(data[col], index, self.dtyplist[i])))
else:
data_formatted.append((col, data[col]))
if requires_type_conversion:
data = DataFrame.from_items(data_formatted)
del data_formatted
self._do_convert_missing(data, convert_missing)
if convert_dates:
cols = np.where(lmap(lambda x: x in _date_formats,
self.fmtlist))[0]
for i in cols:
col = data.columns[i]
try:
data[col] = _stata_elapsed_date_to_datetime_vec(
data[col],
self.fmtlist[i])
except ValueError:
self.close()
raise
if convert_categoricals and self.format_version > 108:
data = self._do_convert_categoricals(data,
self.value_label_dict,
self.lbllist,
order_categoricals)
if not preserve_dtypes:
retyped_data = []
convert = False
for col in data:
dtype = data[col].dtype
if dtype in (np.float16, np.float32):
dtype = np.float64
convert = True
elif dtype in (np.int8, np.int16, np.int32):
dtype = np.int64
convert = True
retyped_data.append((col, data[col].astype(dtype)))
if convert:
data = DataFrame.from_items(retyped_data)
return data
def _do_convert_missing(self, data, convert_missing):
# Check for missing values, and replace if found
for i, colname in enumerate(data):
fmt = self.typlist[i]
if fmt not in self.VALID_RANGE:
continue
nmin, nmax = self.VALID_RANGE[fmt]
series = data[colname]
missing = np.logical_or(series < nmin, series > nmax)
if not missing.any():
continue
if convert_missing: # Replacement follows Stata notation
missing_loc = np.argwhere(missing)
umissing, umissing_loc = np.unique(series[missing],
return_inverse=True)
replacement = Series(series, dtype=np.object)
for j, um in enumerate(umissing):
missing_value = StataMissingValue(um)
loc = missing_loc[umissing_loc == j]
replacement.iloc[loc] = missing_value
else: # All replacements are identical
dtype = series.dtype
if dtype not in (np.float32, np.float64):
dtype = np.float64
replacement = Series(series, dtype=dtype)
replacement[missing] = np.nan
data[colname] = replacement
def _insert_strls(self, data):
if not hasattr(self, 'GSO') or len(self.GSO) == 0:
return data
for i, typ in enumerate(self.typlist):
if typ != 'Q':
continue
data.iloc[:, i] = [self.GSO[k] for k in data.iloc[:, i]]
return data
def _do_select_columns(self, data, columns):
if not self._column_selector_set:
column_set = set(columns)
if len(column_set) != len(columns):
raise ValueError('columns contains duplicate entries')
unmatched = column_set.difference(data.columns)
if unmatched:
raise ValueError('The following columns were not found in the '
'Stata data set: ' +
', '.join(list(unmatched)))
# Copy information for retained columns for later processing
dtyplist = []
typlist = []
fmtlist = []
lbllist = []
for col in columns:
i = data.columns.get_loc(col)
dtyplist.append(self.dtyplist[i])
typlist.append(self.typlist[i])
fmtlist.append(self.fmtlist[i])
lbllist.append(self.lbllist[i])
self.dtyplist = dtyplist
self.typlist = typlist
self.fmtlist = fmtlist
self.lbllist = lbllist
self._column_selector_set = True
return data[columns]
def _do_convert_categoricals(self, data, value_label_dict, lbllist,
order_categoricals):
"""
Converts categorical columns to Categorical type.
"""
value_labels = list(compat.iterkeys(value_label_dict))
cat_converted_data = []
for col, label in zip(data, lbllist):
if label in value_labels:
# Explicit call with ordered=True
cat_data = Categorical(data[col], ordered=order_categoricals)
categories = []
for category in cat_data.categories:
if category in value_label_dict[label]:
categories.append(value_label_dict[label][category])
else:
categories.append(category) # Partially labeled
try:
cat_data.categories = categories
except ValueError:
vc = Series(categories).value_counts()
repeats = list(vc.index[vc > 1])
repeats = '\n' + '-' * 80 + '\n'.join(repeats)
msg = 'Value labels for column {0} are not unique. The ' \
'repeated labels are:\n{1}'.format(col, repeats)
raise ValueError(msg)
# TODO: is the next line needed above in the data(...) method?
cat_data = Series(cat_data, index=data.index)
cat_converted_data.append((col, cat_data))
else:
cat_converted_data.append((col, data[col]))
data = DataFrame.from_items(cat_converted_data)
return data
def data_label(self):
"""Returns data label of Stata file"""
return self.data_label
def variable_labels(self):
"""Returns variable labels as a dict, associating each variable name
with corresponding label
"""
return dict(zip(self.varlist, self._variable_labels))
def value_labels(self):
"""Returns a dict, associating each variable name a dict, associating
each value its corresponding label
"""
if not self._value_labels_read:
self._read_value_labels()
return self.value_label_dict
def _open_file_binary_write(fname, encoding):
if hasattr(fname, 'write'):
# if 'b' not in fname.mode:
return fname
return open(fname, "wb")
def _set_endianness(endianness):
if endianness.lower() in ["<", "little"]:
return "<"
elif endianness.lower() in [">", "big"]:
return ">"
else: # pragma : no cover
raise ValueError("Endianness %s not understood" % endianness)
def _pad_bytes(name, length):
"""
Takes a char string and pads it with null bytes until it's length chars
"""
return name + "\x00" * (length - len(name))
def _convert_datetime_to_stata_type(fmt):
"""
Converts from one of the stata date formats to a type in TYPE_MAP
"""
if fmt in ["tc", "%tc", "td", "%td", "tw", "%tw", "tm", "%tm", "tq",
"%tq", "th", "%th", "ty", "%ty"]:
return np.float64 # Stata expects doubles for SIFs
else:
raise NotImplementedError("Format %s not implemented" % fmt)
def _maybe_convert_to_int_keys(convert_dates, varlist):
new_dict = {}
for key in convert_dates:
if not convert_dates[key].startswith("%"): # make sure proper fmts
convert_dates[key] = "%" + convert_dates[key]
if key in varlist:
new_dict.update({varlist.index(key): convert_dates[key]})
else:
if not isinstance(key, int):
raise ValueError("convert_dates key must be a "
"column or an integer")
new_dict.update({key: convert_dates[key]})
return new_dict
def _dtype_to_stata_type(dtype, column):
"""
Converts dtype types to stata types. Returns the byte of the given ordinal.
See TYPE_MAP and comments for an explanation. This is also explained in
the dta spec.
1 - 244 are strings of this length
Pandas Stata
251 - chr(251) - for int8 byte
252 - chr(252) - for int16 int
253 - chr(253) - for int32 long
254 - chr(254) - for float32 float
255 - chr(255) - for double double
If there are dates to convert, then dtype will already have the correct
type inserted.
"""
# TODO: expand to handle datetime to integer conversion
if dtype.type == np.string_:
return chr(dtype.itemsize)
elif dtype.type == np.object_: # try to coerce it to the biggest string
# not memory efficient, what else could we
# do?
itemsize = max_len_string_array(_ensure_object(column.values))
return chr(max(itemsize, 1))
elif dtype == np.float64:
return chr(255)
elif dtype == np.float32:
return chr(254)
elif dtype == np.int32:
return chr(253)
elif dtype == np.int16:
return chr(252)
elif dtype == np.int8:
return chr(251)
else: # pragma : no cover
raise NotImplementedError("Data type %s not supported." % dtype)
def _dtype_to_default_stata_fmt(dtype, column):
"""
Maps numpy dtype to stata's default format for this type. Not terribly
important since users can change this in Stata. Semantics are
object -> "%DDs" where DD is the length of the string. If not a string,
raise ValueError
float64 -> "%10.0g"
float32 -> "%9.0g"
int64 -> "%9.0g"
int32 -> "%12.0g"
int16 -> "%8.0g"
int8 -> "%8.0g"
"""
# TODO: Refactor to combine type with format
# TODO: expand this to handle a default datetime format?
if dtype.type == np.object_:
inferred_dtype = infer_dtype(column.dropna())
if not (inferred_dtype in ('string', 'unicode') or
len(column) == 0):
raise ValueError('Writing general object arrays is not supported')
itemsize = max_len_string_array(_ensure_object(column.values))
if itemsize > 244:
raise ValueError(excessive_string_length_error % column.name)
return "%" + str(max(itemsize, 1)) + "s"
elif dtype == np.float64:
return "%10.0g"
elif dtype == np.float32:
return "%9.0g"
elif dtype == np.int32:
return "%12.0g"
elif dtype == np.int8 or dtype == np.int16:
return "%8.0g"
else: # pragma : no cover
raise NotImplementedError("Data type %s not supported." % dtype)
class StataWriter(StataParser):
"""
A class for writing Stata binary dta files
Parameters
----------
fname : str or buffer
String path of file-like object
data : DataFrame
Input to save
convert_dates : dict
Dictionary mapping columns containing datetime types to stata internal
format to use when wirting the dates. Options are 'tc', 'td', 'tm',
'tw', 'th', 'tq', 'ty'. Column can be either an integer or a name.
Datetime columns that do not have a conversion type specified will be
converted to 'tc'. Raises NotImplementedError if a datetime column has
timezone information
write_index : bool
Write the index to Stata dataset.
encoding : str
Default is latin-1. Unicode is not supported
byteorder : str
Can be ">", "<", "little", or "big". default is `sys.byteorder`
time_stamp : datetime
A datetime to use as file creation date. Default is the current time
dataset_label : str
A label for the data set. Must be 80 characters or smaller.
variable_labels : dict
Dictionary containing columns as keys and variable labels as values.
Each label must be 80 characters or smaller.
.. versionadded:: 0.19.0
Returns
-------
writer : StataWriter instance
The StataWriter instance has a write_file method, which will
write the file to the given `fname`.
Raises
------
NotImplementedError
* If datetimes contain timezone information
ValueError
* Columns listed in convert_dates are noth either datetime64[ns]
or datetime.datetime
* Column dtype is not representable in Stata
* Column listed in convert_dates is not in DataFrame
* Categorical label contains more than 32,000 characters
Examples
--------
>>> import pandas as pd
>>> data = pd.DataFrame([[1.0, 1]], columns=['a', 'b'])
>>> writer = StataWriter('./data_file.dta', data)
>>> writer.write_file()
Or with dates
>>> from datetime import datetime
>>> data = pd.DataFrame([[datetime(2000,1,1)]], columns=['date'])
>>> writer = StataWriter('./date_data_file.dta', data, {'date' : 'tw'})
>>> writer.write_file()
"""
def __init__(self, fname, data, convert_dates=None, write_index=True,
encoding="latin-1", byteorder=None, time_stamp=None,
data_label=None, variable_labels=None):
super(StataWriter, self).__init__(encoding)
self._convert_dates = {} if convert_dates is None else convert_dates
self._write_index = write_index
self._time_stamp = time_stamp
self._data_label = data_label
self._variable_labels = variable_labels
# attach nobs, nvars, data, varlist, typlist
self._prepare_pandas(data)
if byteorder is None:
byteorder = sys.byteorder
self._byteorder = _set_endianness(byteorder)
self._fname = fname
self.type_converters = {253: np.int32, 252: np.int16, 251: np.int8}
def _write(self, to_write):
"""
Helper to call encode before writing to file for Python 3 compat.
"""
if compat.PY3:
self._file.write(to_write.encode(self._encoding or
self._default_encoding))
else:
self._file.write(to_write)
def _prepare_categoricals(self, data):
"""Check for categorical columns, retain categorical information for
Stata file and convert categorical data to int"""
is_cat = [is_categorical_dtype(data[col]) for col in data]
self._is_col_cat = is_cat
self._value_labels = []
if not any(is_cat):
return data
get_base_missing_value = StataMissingValue.get_base_missing_value
index = data.index
data_formatted = []
for col, col_is_cat in zip(data, is_cat):
if col_is_cat:
self._value_labels.append(StataValueLabel(data[col]))
dtype = data[col].cat.codes.dtype
if dtype == np.int64:
raise ValueError('It is not possible to export '
'int64-based categorical data to Stata.')
values = data[col].cat.codes.values.copy()
# Upcast if needed so that correct missing values can be set
if values.max() >= get_base_missing_value(dtype):
if dtype == np.int8:
dtype = np.int16
elif dtype == np.int16:
dtype = np.int32
else:
dtype = np.float64
values = np.array(values, dtype=dtype)
# Replace missing values with Stata missing value for type
values[values == -1] = get_base_missing_value(dtype)
data_formatted.append((col, values, index))
else:
data_formatted.append((col, data[col]))
return DataFrame.from_items(data_formatted)
def _replace_nans(self, data):
# return data
"""Checks floating point data columns for nans, and replaces these with
the generic Stata for missing value (.)"""
for c in data:
dtype = data[c].dtype
if dtype in (np.float32, np.float64):
if dtype == np.float32:
replacement = self.MISSING_VALUES['f']
else:
replacement = self.MISSING_VALUES['d']
data[c] = data[c].fillna(replacement)
return data
def _check_column_names(self, data):
"""
Checks column names to ensure that they are valid Stata column names.
This includes checks for:
* Non-string names
* Stata keywords
* Variables that start with numbers
* Variables with names that are too long
When an illegal variable name is detected, it is converted, and if
dates are exported, the variable name is propagated to the date
conversion dictionary
"""
converted_names = []
columns = list(data.columns)
original_columns = columns[:]
duplicate_var_id = 0
for j, name in enumerate(columns):
orig_name = name
if not isinstance(name, string_types):
name = text_type(name)
for c in name:
if (c < 'A' or c > 'Z') and (c < 'a' or c > 'z') and \
(c < '0' or c > '9') and c != '_':
name = name.replace(c, '_')
# Variable name must not be a reserved word
if name in self.RESERVED_WORDS:
name = '_' + name
# Variable name may not start with a number
if name[0] >= '0' and name[0] <= '9':
name = '_' + name
name = name[:min(len(name), 32)]
if not name == orig_name:
# check for duplicates
while columns.count(name) > 0:
# prepend ascending number to avoid duplicates
name = '_' + str(duplicate_var_id) + name
name = name[:min(len(name), 32)]
duplicate_var_id += 1
# need to possibly encode the orig name if its unicode
try:
orig_name = orig_name.encode('utf-8')
except:
pass
converted_names.append(
'{0} -> {1}'.format(orig_name, name))
columns[j] = name
data.columns = columns
# Check date conversion, and fix key if needed
if self._convert_dates:
for c, o in zip(columns, original_columns):
if c != o:
self._convert_dates[c] = self._convert_dates[o]
del self._convert_dates[o]
if converted_names:
import warnings
ws = invalid_name_doc.format('\n '.join(converted_names))
warnings.warn(ws, InvalidColumnName)
return data
def _prepare_pandas(self, data):
# NOTE: we might need a different API / class for pandas objects so
# we can set different semantics - handle this with a PR to pandas.io
data = data.copy()
if self._write_index:
data = data.reset_index()
# Ensure column names are strings
data = self._check_column_names(data)
# Check columns for compatibility with stata, upcast if necessary
# Raise if outside the supported range
data = _cast_to_stata_types(data)
# Replace NaNs with Stata missing values
data = self._replace_nans(data)
# Convert categoricals to int data, and strip labels
data = self._prepare_categoricals(data)
self.nobs, self.nvar = data.shape
self.data = data
self.varlist = data.columns.tolist()
dtypes = data.dtypes
# Ensure all date columns are converted
for col in data:
if col in self._convert_dates:
continue
if is_datetime64_dtype(data[col]):
self._convert_dates[col] = 'tc'
self._convert_dates = _maybe_convert_to_int_keys(self._convert_dates,
self.varlist)
for key in self._convert_dates:
new_type = _convert_datetime_to_stata_type(
self._convert_dates[key]
)
dtypes[key] = np.dtype(new_type)
self.typlist = []
self.fmtlist = []
for col, dtype in dtypes.iteritems():
self.fmtlist.append(_dtype_to_default_stata_fmt(dtype, data[col]))
self.typlist.append(_dtype_to_stata_type(dtype, data[col]))
# set the given format for the datetime cols
if self._convert_dates is not None:
for key in self._convert_dates:
self.fmtlist[key] = self._convert_dates[key]
def write_file(self):
self._file = _open_file_binary_write(
self._fname, self._encoding or self._default_encoding
)
try:
self._write_header(time_stamp=self._time_stamp,
data_label=self._data_label)
self._write_descriptors()
self._write_variable_labels()
# write 5 zeros for expansion fields
self._write(_pad_bytes("", 5))
self._prepare_data()
self._write_data()
self._write_value_labels()
finally:
self._file.close()
def _write_value_labels(self):
for vl in self._value_labels:
self._file.write(vl.generate_value_label(self._byteorder,
self._encoding))
def _write_header(self, data_label=None, time_stamp=None):
byteorder = self._byteorder
# ds_format - just use 114
self._file.write(struct.pack("b", 114))
# byteorder
self._write(byteorder == ">" and "\x01" or "\x02")
# filetype
self._write("\x01")
# unused
self._write("\x00")
# number of vars, 2 bytes
self._file.write(struct.pack(byteorder + "h", self.nvar)[:2])
# number of obs, 4 bytes
self._file.write(struct.pack(byteorder + "i", self.nobs)[:4])
# data label 81 bytes, char, null terminated
if data_label is None:
self._file.write(self._null_terminate(_pad_bytes("", 80)))
else:
self._file.write(
self._null_terminate(_pad_bytes(data_label[:80], 80))
)
# time stamp, 18 bytes, char, null terminated
# format dd Mon yyyy hh:mm
if time_stamp is None:
time_stamp = datetime.datetime.now()
elif not isinstance(time_stamp, datetime.datetime):
raise ValueError("time_stamp should be datetime type")
self._file.write(
self._null_terminate(time_stamp.strftime("%d %b %Y %H:%M"))
)
def _write_descriptors(self, typlist=None, varlist=None, srtlist=None,
fmtlist=None, lbllist=None):
nvar = self.nvar
# typlist, length nvar, format byte array
for typ in self.typlist:
self._write(typ)
# varlist names are checked by _check_column_names
# varlist, requires null terminated
for name in self.varlist:
name = self._null_terminate(name, True)
name = _pad_bytes(name[:32], 33)
self._write(name)
# srtlist, 2*(nvar+1), int array, encoded by byteorder
srtlist = _pad_bytes("", 2 * (nvar + 1))
self._write(srtlist)
# fmtlist, 49*nvar, char array
for fmt in self.fmtlist:
self._write(_pad_bytes(fmt, 49))
# lbllist, 33*nvar, char array
for i in range(nvar):
# Use variable name when categorical
if self._is_col_cat[i]:
name = self.varlist[i]
name = self._null_terminate(name, True)
name = _pad_bytes(name[:32], 33)
self._write(name)
else: # Default is empty label
self._write(_pad_bytes("", 33))
def _write_variable_labels(self):
# Missing labels are 80 blank characters plus null termination
blank = _pad_bytes('', 81)
if self._variable_labels is None:
for i in range(self.nvar):
self._write(blank)
return
for col in self.data:
if col in self._variable_labels:
label = self._variable_labels[col]
if len(label) > 80:
raise ValueError('Variable labels must be 80 characters '
'or fewer')
is_latin1 = all(ord(c) < 256 for c in label)
if not is_latin1:
raise ValueError('Variable labels must contain only '
'characters that can be encoded in '
'Latin-1')
self._write(_pad_bytes(label, 81))
else:
self._write(blank)
def _prepare_data(self):
data = self.data
typlist = self.typlist
convert_dates = self._convert_dates
# 1. Convert dates
if self._convert_dates is not None:
for i, col in enumerate(data):
if i in convert_dates:
data[col] = _datetime_to_stata_elapsed_vec(data[col],
self.fmtlist[i])
# 2. Convert bad string data to '' and pad to correct length
dtype = []
data_cols = []
has_strings = False
for i, col in enumerate(data):
typ = ord(typlist[i])
if typ <= 244:
has_strings = True
data[col] = data[col].fillna('').apply(_pad_bytes, args=(typ,))
stype = 'S%d' % typ
dtype.append(('c' + str(i), stype))
string = data[col].str.encode(self._encoding)
data_cols.append(string.values.astype(stype))
else:
dtype.append(('c' + str(i), data[col].dtype))
data_cols.append(data[col].values)
dtype = np.dtype(dtype)
if has_strings:
self.data = np.fromiter(zip(*data_cols), dtype=dtype)
else:
self.data = data.to_records(index=False)
def _write_data(self):
data = self.data
data.tofile(self._file)
def _null_terminate(self, s, as_string=False):
null_byte = '\x00'
if compat.PY3 and not as_string:
s += null_byte
return s.encode(self._encoding)
else:
s += null_byte
return s
| gpl-3.0 |
francisc0garcia/autonomous_bicycle | test/test_csv_conversion.py | 1 | 2233 | #!/usr/bin/env python2
# -*- coding: utf-8 -*-
import pandas as pd
import glob, os
root = '/home/pach0/Documents/autonomous_bicycle/code/src/autonomous_bicycle/bags/csv/test_velocity_5/'
pattern = "*.csv"
text_common = 'test_velocity_5-bicycle-'
# get CSV files
files = glob.glob(root + pattern)
# define desired columns
list_columns = []
list_columns.append('time')
# IMU Data
list_columns.append('.orientation.x')
list_columns.append('.orientation.y')
list_columns.append('.orientation.z')
list_columns.append('.orientation.w')
list_columns.append('.angular_velocity.x')
list_columns.append('.angular_velocity.y')
list_columns.append('.angular_velocity.z')
list_columns.append('.linear_acceleration.x')
list_columns.append('.linear_acceleration.y')
list_columns.append('.linear_acceleration.z')
# GPS odometry Data
list_columns.append('.pose.pose.position.x')
list_columns.append('.pose.pose.position.y')
list_columns.append('.pose.pose.position.z')
# Real odometry Data
list_columns.append('.twist.twist.linear.x')
# Velocity GPS odometry Data
list_columns.append('.vector.x')
list_columns.append('.vector.y')
list_columns.append('.vector.z')
df_desired_columns = pd.DataFrame(list_columns)
# read all files
dfs = [pd.read_csv(fp, parse_dates=['time'])
.drop_duplicates(subset=['time'])
.set_index(['time'],
drop=False, verify_integrity=True) for fp in files]
#replace column name
for i in range(len(dfs)):
columns_file = dfs[i].columns.values.tolist()
df_filtered = pd.merge(pd.DataFrame(columns_file), df_desired_columns, how='inner')
dfs[i] = dfs[i].filter(items=list(df_filtered.values.flatten()))
names = dfs[i].columns.tolist()
name_column_base = files[i].replace(root, '').replace(text_common, '').replace('.csv', '')
print("{{" + str(names) + '}}')
for column in names:
if column != 'time':
names[names.index(column)] = name_column_base + column
dfs[i].columns = names
base_file = dfs[0]
for i in range(1, len(dfs)):
# merge datasets using outer join
base_file = pd.merge(base_file, dfs[i], how='outer', indicator=False, on=['time'])
print(base_file.shape)
#base_file.to_csv(root + 'result.csv') | apache-2.0 |
TaylorOshan/pysal | pysal/esda/geary.py | 5 | 8394 | """
Geary's C statistic for spatial autocorrelation
"""
__author__ = "Sergio J. Rey <srey@asu.edu> "
import numpy as np
import scipy.stats as stats
from .. import weights
from .tabular import _univariate_handler
__all__ = ['Geary']
class Geary(object):
"""
Global Geary C Autocorrelation statistic
Parameters
----------
y : array
(n, 1) attribute vector
w : W
spatial weights
transformation : {'B', 'R', 'D', 'U', 'V'}
weights transformation, default is binary.
Other options include "R": row-standardized, "D":
doubly-standardized, "U": untransformed (general
weights), "V": variance-stabilizing.
permutations : int
number of random permutations for calculation of
pseudo-p_values
Attributes
----------
y : array
original variable
w : W
spatial weights
permutations : int
number of permutations
C : float
value of statistic
EC : float
expected value
VC : float
variance of G under normality assumption
z_norm : float
z-statistic for C under normality assumption
z_rand : float
z-statistic for C under randomization assumption
p_norm : float
p-value under normality assumption (one-tailed)
p_rand : float
p-value under randomization assumption (one-tailed)
sim : array
(if permutations!=0)
vector of I values for permutated samples
p_sim : float
(if permutations!=0)
p-value based on permutations (one-tailed)
null: sptial randomness
alternative: the observed C is extreme
it is either extremely high or extremely low
EC_sim : float
(if permutations!=0)
average value of C from permutations
VC_sim : float
(if permutations!=0)
variance of C from permutations
seC_sim : float
(if permutations!=0)
standard deviation of C under permutations.
z_sim : float
(if permutations!=0)
standardized C based on permutations
p_z_sim : float
(if permutations!=0)
p-value based on standard normal approximation from
permutations (one-tailed)
Examples
--------
>>> import pysal
>>> w = pysal.open(pysal.examples.get_path("book.gal")).read()
>>> f = pysal.open(pysal.examples.get_path("book.txt"))
>>> y = np.array(f.by_col['y'])
>>> c = Geary(y,w,permutations=0)
>>> print round(c.C,7)
0.3330108
>>> print round(c.p_norm,7)
9.2e-05
>>>
"""
def __init__(self, y, w, transformation="r", permutations=999):
if not isinstance(w, weights.W):
raise TypeError('w must be a pysal weights object, got {}'
' instead'.format(type(w)))
y = np.asarray(y).flatten()
self.n = len(y)
self.y = y
w.transform = transformation
self.w = w
self.permutations = permutations
self.__moments()
xn = xrange(len(y))
self.xn = xn
self.y2 = y * y
yd = y - y.mean()
yss = sum(yd * yd)
self.den = yss * self.w.s0 * 2.0
self.C = self.__calc(y)
de = self.C - 1.0
self.EC = 1.0
self.z_norm = de / self.seC_norm
self.z_rand = de / self.seC_rand
if de > 0:
self.p_norm = 1 - stats.norm.cdf(self.z_norm)
self.p_rand = 1 - stats.norm.cdf(self.z_rand)
else:
self.p_norm = stats.norm.cdf(self.z_norm)
self.p_rand = stats.norm.cdf(self.z_rand)
if permutations:
sim = [self.__calc(np.random.permutation(self.y))
for i in xrange(permutations)]
self.sim = sim = np.array(sim)
above = sim >= self.C
larger = sum(above)
if (permutations - larger) < larger:
larger = permutations - larger
self.p_sim = (larger + 1.) / (permutations + 1.)
self.EC_sim = sum(sim) / permutations
self.seC_sim = np.array(sim).std()
self.VC_sim = self.seC_sim ** 2
self.z_sim = (self.C - self.EC_sim) / self.seC_sim
self.p_z_sim = 1 - stats.norm.cdf(np.abs(self.z_sim))
@property
def _statistic(self):
""" a standardized accessor for esda statistics"""
return self.C
def __moments(self):
y = self.y
n = self.n
w = self.w
s0 = w.s0
s1 = w.s1
s2 = w.s2
s02 = s0 * s0
yd = y - y.mean()
k = (1 / (sum(yd ** 4)) * ((sum(yd ** 2)) ** 2))
vc_rand = (1 / (n * ((n - 2) ** 2) * s02)) * \
((((n - 1) * s1) * (n * n - 3 * n + 3 - (n - 1) * k))
- ((.25 * (n - 1) * s2) * (n * n + 3 * n - 6 -
(n * n - n + 2) * k))
+ (s02 * (n * n - 3 - ((n - 1) ** 2) * k)))
vc_norm = ((1 / (2 * (n + 1) * s02)) *
((2 * s1 + s2) * (n - 1) - 4 * s02))
self.VC_rand = vc_rand
self.VC_norm = vc_norm
self.seC_rand = vc_rand ** (0.5)
self.seC_norm = vc_norm ** (0.5)
def __calc(self, y):
ys = np.zeros(y.shape)
y2 = y ** 2
for i, i0 in enumerate(self.w.id_order):
neighbors = self.w.neighbor_offsets[i0]
wijs = self.w.weights[i0]
z = zip(neighbors, wijs)
ys[i] = sum([wij * (y2[i] - 2 * y[i] * y[j] + y2[j])
for j, wij in z])
a = (self.n - 1) * sum(ys)
return a / self.den
@classmethod
def by_col(cls, df, cols, w=None, inplace=False, pvalue='sim', outvals=None, **stat_kws):
"""
Function to compute a Geary statistic on a dataframe
Arguments
---------
df : pandas.DataFrame
a pandas dataframe with a geometry column
cols : string or list of string
name or list of names of columns to use to compute the statistic
w : pysal weights object
a weights object aligned with the dataframe. If not provided, this
is searched for in the dataframe's metadata
inplace : bool
a boolean denoting whether to operate on the dataframe inplace or to
return a series contaning the results of the computation. If
operating inplace, with default configurations,
the derived columns will be named like 'column_geary' and 'column_p_sim'
pvalue : string
a string denoting which pvalue should be returned. Refer to the
the Geary statistic's documentation for available p-values
outvals : list of strings
list of arbitrary attributes to return as columns from the
Geary statistic
**stat_kws : keyword arguments
options to pass to the underlying statistic. For this, see the
documentation for the Geary statistic.
Returns
--------
If inplace, None, and operation is conducted on dataframe in memory. Otherwise,
returns a copy of the dataframe with the relevant columns attached.
See Also
---------
For further documentation, refer to the Geary class in pysal.esda
"""
return _univariate_handler(df, cols, w=w, inplace=inplace, pvalue=pvalue,
outvals=outvals, stat=cls,
swapname=cls.__name__.lower(), **stat_kws)
| bsd-3-clause |
louisLouL/pair_trading | capstone_env/lib/python3.6/site-packages/pandas/tests/indexes/period/test_construction.py | 6 | 19404 | import pytest
import numpy as np
import pandas as pd
import pandas.util.testing as tm
import pandas.core.indexes.period as period
from pandas.compat import lrange, PY3, text_type, lmap
from pandas import (Period, PeriodIndex, period_range, offsets, date_range,
Series, Index)
class TestPeriodIndex(object):
def setup_method(self, method):
pass
def test_construction_base_constructor(self):
# GH 13664
arr = [pd.Period('2011-01', freq='M'), pd.NaT,
pd.Period('2011-03', freq='M')]
tm.assert_index_equal(pd.Index(arr), pd.PeriodIndex(arr))
tm.assert_index_equal(pd.Index(np.array(arr)),
pd.PeriodIndex(np.array(arr)))
arr = [np.nan, pd.NaT, pd.Period('2011-03', freq='M')]
tm.assert_index_equal(pd.Index(arr), pd.PeriodIndex(arr))
tm.assert_index_equal(pd.Index(np.array(arr)),
pd.PeriodIndex(np.array(arr)))
arr = [pd.Period('2011-01', freq='M'), pd.NaT,
pd.Period('2011-03', freq='D')]
tm.assert_index_equal(pd.Index(arr), pd.Index(arr, dtype=object))
tm.assert_index_equal(pd.Index(np.array(arr)),
pd.Index(np.array(arr), dtype=object))
def test_constructor_use_start_freq(self):
# GH #1118
p = Period('4/2/2012', freq='B')
index = PeriodIndex(start=p, periods=10)
expected = PeriodIndex(start='4/2/2012', periods=10, freq='B')
tm.assert_index_equal(index, expected)
def test_constructor_field_arrays(self):
# GH #1264
years = np.arange(1990, 2010).repeat(4)[2:-2]
quarters = np.tile(np.arange(1, 5), 20)[2:-2]
index = PeriodIndex(year=years, quarter=quarters, freq='Q-DEC')
expected = period_range('1990Q3', '2009Q2', freq='Q-DEC')
tm.assert_index_equal(index, expected)
index2 = PeriodIndex(year=years, quarter=quarters, freq='2Q-DEC')
tm.assert_numpy_array_equal(index.asi8, index2.asi8)
index = PeriodIndex(year=years, quarter=quarters)
tm.assert_index_equal(index, expected)
years = [2007, 2007, 2007]
months = [1, 2]
pytest.raises(ValueError, PeriodIndex, year=years, month=months,
freq='M')
pytest.raises(ValueError, PeriodIndex, year=years, month=months,
freq='2M')
pytest.raises(ValueError, PeriodIndex, year=years, month=months,
freq='M', start=Period('2007-01', freq='M'))
years = [2007, 2007, 2007]
months = [1, 2, 3]
idx = PeriodIndex(year=years, month=months, freq='M')
exp = period_range('2007-01', periods=3, freq='M')
tm.assert_index_equal(idx, exp)
def test_constructor_U(self):
# U was used as undefined period
pytest.raises(ValueError, period_range, '2007-1-1', periods=500,
freq='X')
def test_constructor_nano(self):
idx = period_range(start=Period(ordinal=1, freq='N'),
end=Period(ordinal=4, freq='N'), freq='N')
exp = PeriodIndex([Period(ordinal=1, freq='N'),
Period(ordinal=2, freq='N'),
Period(ordinal=3, freq='N'),
Period(ordinal=4, freq='N')], freq='N')
tm.assert_index_equal(idx, exp)
def test_constructor_arrays_negative_year(self):
years = np.arange(1960, 2000, dtype=np.int64).repeat(4)
quarters = np.tile(np.array([1, 2, 3, 4], dtype=np.int64), 40)
pindex = PeriodIndex(year=years, quarter=quarters)
tm.assert_index_equal(pindex.year, pd.Index(years))
tm.assert_index_equal(pindex.quarter, pd.Index(quarters))
def test_constructor_invalid_quarters(self):
pytest.raises(ValueError, PeriodIndex, year=lrange(2000, 2004),
quarter=lrange(4), freq='Q-DEC')
def test_constructor_corner(self):
pytest.raises(ValueError, PeriodIndex, periods=10, freq='A')
start = Period('2007', freq='A-JUN')
end = Period('2010', freq='A-DEC')
pytest.raises(ValueError, PeriodIndex, start=start, end=end)
pytest.raises(ValueError, PeriodIndex, start=start)
pytest.raises(ValueError, PeriodIndex, end=end)
result = period_range('2007-01', periods=10.5, freq='M')
exp = period_range('2007-01', periods=10, freq='M')
tm.assert_index_equal(result, exp)
def test_constructor_fromarraylike(self):
idx = period_range('2007-01', periods=20, freq='M')
# values is an array of Period, thus can retrieve freq
tm.assert_index_equal(PeriodIndex(idx.values), idx)
tm.assert_index_equal(PeriodIndex(list(idx.values)), idx)
pytest.raises(ValueError, PeriodIndex, idx._values)
pytest.raises(ValueError, PeriodIndex, list(idx._values))
pytest.raises(TypeError, PeriodIndex,
data=Period('2007', freq='A'))
result = PeriodIndex(iter(idx))
tm.assert_index_equal(result, idx)
result = PeriodIndex(idx)
tm.assert_index_equal(result, idx)
result = PeriodIndex(idx, freq='M')
tm.assert_index_equal(result, idx)
result = PeriodIndex(idx, freq=offsets.MonthEnd())
tm.assert_index_equal(result, idx)
assert result.freq, 'M'
result = PeriodIndex(idx, freq='2M')
tm.assert_index_equal(result, idx.asfreq('2M'))
assert result.freq, '2M'
result = PeriodIndex(idx, freq=offsets.MonthEnd(2))
tm.assert_index_equal(result, idx.asfreq('2M'))
assert result.freq, '2M'
result = PeriodIndex(idx, freq='D')
exp = idx.asfreq('D', 'e')
tm.assert_index_equal(result, exp)
def test_constructor_datetime64arr(self):
vals = np.arange(100000, 100000 + 10000, 100, dtype=np.int64)
vals = vals.view(np.dtype('M8[us]'))
pytest.raises(ValueError, PeriodIndex, vals, freq='D')
def test_constructor_dtype(self):
# passing a dtype with a tz should localize
idx = PeriodIndex(['2013-01', '2013-03'], dtype='period[M]')
exp = PeriodIndex(['2013-01', '2013-03'], freq='M')
tm.assert_index_equal(idx, exp)
assert idx.dtype == 'period[M]'
idx = PeriodIndex(['2013-01-05', '2013-03-05'], dtype='period[3D]')
exp = PeriodIndex(['2013-01-05', '2013-03-05'], freq='3D')
tm.assert_index_equal(idx, exp)
assert idx.dtype == 'period[3D]'
# if we already have a freq and its not the same, then asfreq
# (not changed)
idx = PeriodIndex(['2013-01-01', '2013-01-02'], freq='D')
res = PeriodIndex(idx, dtype='period[M]')
exp = PeriodIndex(['2013-01', '2013-01'], freq='M')
tm.assert_index_equal(res, exp)
assert res.dtype == 'period[M]'
res = PeriodIndex(idx, freq='M')
tm.assert_index_equal(res, exp)
assert res.dtype == 'period[M]'
msg = 'specified freq and dtype are different'
with tm.assert_raises_regex(period.IncompatibleFrequency, msg):
PeriodIndex(['2011-01'], freq='M', dtype='period[D]')
def test_constructor_empty(self):
idx = pd.PeriodIndex([], freq='M')
assert isinstance(idx, PeriodIndex)
assert len(idx) == 0
assert idx.freq == 'M'
with tm.assert_raises_regex(ValueError, 'freq not specified'):
pd.PeriodIndex([])
def test_constructor_pi_nat(self):
idx = PeriodIndex([Period('2011-01', freq='M'), pd.NaT,
Period('2011-01', freq='M')])
exp = PeriodIndex(['2011-01', 'NaT', '2011-01'], freq='M')
tm.assert_index_equal(idx, exp)
idx = PeriodIndex(np.array([Period('2011-01', freq='M'), pd.NaT,
Period('2011-01', freq='M')]))
tm.assert_index_equal(idx, exp)
idx = PeriodIndex([pd.NaT, pd.NaT, Period('2011-01', freq='M'),
Period('2011-01', freq='M')])
exp = PeriodIndex(['NaT', 'NaT', '2011-01', '2011-01'], freq='M')
tm.assert_index_equal(idx, exp)
idx = PeriodIndex(np.array([pd.NaT, pd.NaT,
Period('2011-01', freq='M'),
Period('2011-01', freq='M')]))
tm.assert_index_equal(idx, exp)
idx = PeriodIndex([pd.NaT, pd.NaT, '2011-01', '2011-01'], freq='M')
tm.assert_index_equal(idx, exp)
with tm.assert_raises_regex(ValueError, 'freq not specified'):
PeriodIndex([pd.NaT, pd.NaT])
with tm.assert_raises_regex(ValueError, 'freq not specified'):
PeriodIndex(np.array([pd.NaT, pd.NaT]))
with tm.assert_raises_regex(ValueError, 'freq not specified'):
PeriodIndex(['NaT', 'NaT'])
with tm.assert_raises_regex(ValueError, 'freq not specified'):
PeriodIndex(np.array(['NaT', 'NaT']))
def test_constructor_incompat_freq(self):
msg = "Input has different freq=D from PeriodIndex\\(freq=M\\)"
with tm.assert_raises_regex(period.IncompatibleFrequency, msg):
PeriodIndex([Period('2011-01', freq='M'), pd.NaT,
Period('2011-01', freq='D')])
with tm.assert_raises_regex(period.IncompatibleFrequency, msg):
PeriodIndex(np.array([Period('2011-01', freq='M'), pd.NaT,
Period('2011-01', freq='D')]))
# first element is pd.NaT
with tm.assert_raises_regex(period.IncompatibleFrequency, msg):
PeriodIndex([pd.NaT, Period('2011-01', freq='M'),
Period('2011-01', freq='D')])
with tm.assert_raises_regex(period.IncompatibleFrequency, msg):
PeriodIndex(np.array([pd.NaT, Period('2011-01', freq='M'),
Period('2011-01', freq='D')]))
def test_constructor_mixed(self):
idx = PeriodIndex(['2011-01', pd.NaT, Period('2011-01', freq='M')])
exp = PeriodIndex(['2011-01', 'NaT', '2011-01'], freq='M')
tm.assert_index_equal(idx, exp)
idx = PeriodIndex(['NaT', pd.NaT, Period('2011-01', freq='M')])
exp = PeriodIndex(['NaT', 'NaT', '2011-01'], freq='M')
tm.assert_index_equal(idx, exp)
idx = PeriodIndex([Period('2011-01-01', freq='D'), pd.NaT,
'2012-01-01'])
exp = PeriodIndex(['2011-01-01', 'NaT', '2012-01-01'], freq='D')
tm.assert_index_equal(idx, exp)
def test_constructor_simple_new(self):
idx = period_range('2007-01', name='p', periods=2, freq='M')
result = idx._simple_new(idx, 'p', freq=idx.freq)
tm.assert_index_equal(result, idx)
result = idx._simple_new(idx.astype('i8'), 'p', freq=idx.freq)
tm.assert_index_equal(result, idx)
result = idx._simple_new([pd.Period('2007-01', freq='M'),
pd.Period('2007-02', freq='M')],
'p', freq=idx.freq)
tm.assert_index_equal(result, idx)
result = idx._simple_new(np.array([pd.Period('2007-01', freq='M'),
pd.Period('2007-02', freq='M')]),
'p', freq=idx.freq)
tm.assert_index_equal(result, idx)
def test_constructor_simple_new_empty(self):
# GH13079
idx = PeriodIndex([], freq='M', name='p')
result = idx._simple_new(idx, name='p', freq='M')
tm.assert_index_equal(result, idx)
def test_constructor_floats(self):
# GH13079
for floats in [[1.1, 2.1], np.array([1.1, 2.1])]:
with pytest.raises(TypeError):
pd.PeriodIndex._simple_new(floats, freq='M')
with pytest.raises(TypeError):
pd.PeriodIndex(floats, freq='M')
def test_constructor_nat(self):
pytest.raises(ValueError, period_range, start='NaT',
end='2011-01-01', freq='M')
pytest.raises(ValueError, period_range, start='2011-01-01',
end='NaT', freq='M')
def test_constructor_year_and_quarter(self):
year = pd.Series([2001, 2002, 2003])
quarter = year - 2000
idx = PeriodIndex(year=year, quarter=quarter)
strs = ['%dQ%d' % t for t in zip(quarter, year)]
lops = list(map(Period, strs))
p = PeriodIndex(lops)
tm.assert_index_equal(p, idx)
def test_constructor_freq_mult(self):
# GH #7811
for func in [PeriodIndex, period_range]:
# must be the same, but for sure...
pidx = func(start='2014-01', freq='2M', periods=4)
expected = PeriodIndex(['2014-01', '2014-03',
'2014-05', '2014-07'], freq='2M')
tm.assert_index_equal(pidx, expected)
pidx = func(start='2014-01-02', end='2014-01-15', freq='3D')
expected = PeriodIndex(['2014-01-02', '2014-01-05',
'2014-01-08', '2014-01-11',
'2014-01-14'], freq='3D')
tm.assert_index_equal(pidx, expected)
pidx = func(end='2014-01-01 17:00', freq='4H', periods=3)
expected = PeriodIndex(['2014-01-01 09:00', '2014-01-01 13:00',
'2014-01-01 17:00'], freq='4H')
tm.assert_index_equal(pidx, expected)
msg = ('Frequency must be positive, because it'
' represents span: -1M')
with tm.assert_raises_regex(ValueError, msg):
PeriodIndex(['2011-01'], freq='-1M')
msg = ('Frequency must be positive, because it' ' represents span: 0M')
with tm.assert_raises_regex(ValueError, msg):
PeriodIndex(['2011-01'], freq='0M')
msg = ('Frequency must be positive, because it' ' represents span: 0M')
with tm.assert_raises_regex(ValueError, msg):
period_range('2011-01', periods=3, freq='0M')
def test_constructor_freq_mult_dti_compat(self):
import itertools
mults = [1, 2, 3, 4, 5]
freqs = ['A', 'M', 'D', 'T', 'S']
for mult, freq in itertools.product(mults, freqs):
freqstr = str(mult) + freq
pidx = PeriodIndex(start='2014-04-01', freq=freqstr, periods=10)
expected = date_range(start='2014-04-01', freq=freqstr,
periods=10).to_period(freqstr)
tm.assert_index_equal(pidx, expected)
def test_constructor_freq_combined(self):
for freq in ['1D1H', '1H1D']:
pidx = PeriodIndex(['2016-01-01', '2016-01-02'], freq=freq)
expected = PeriodIndex(['2016-01-01 00:00', '2016-01-02 00:00'],
freq='25H')
for freq, func in zip(['1D1H', '1H1D'], [PeriodIndex, period_range]):
pidx = func(start='2016-01-01', periods=2, freq=freq)
expected = PeriodIndex(['2016-01-01 00:00', '2016-01-02 01:00'],
freq='25H')
tm.assert_index_equal(pidx, expected)
def test_constructor(self):
pi = PeriodIndex(freq='A', start='1/1/2001', end='12/1/2009')
assert len(pi) == 9
pi = PeriodIndex(freq='Q', start='1/1/2001', end='12/1/2009')
assert len(pi) == 4 * 9
pi = PeriodIndex(freq='M', start='1/1/2001', end='12/1/2009')
assert len(pi) == 12 * 9
pi = PeriodIndex(freq='D', start='1/1/2001', end='12/31/2009')
assert len(pi) == 365 * 9 + 2
pi = PeriodIndex(freq='B', start='1/1/2001', end='12/31/2009')
assert len(pi) == 261 * 9
pi = PeriodIndex(freq='H', start='1/1/2001', end='12/31/2001 23:00')
assert len(pi) == 365 * 24
pi = PeriodIndex(freq='Min', start='1/1/2001', end='1/1/2001 23:59')
assert len(pi) == 24 * 60
pi = PeriodIndex(freq='S', start='1/1/2001', end='1/1/2001 23:59:59')
assert len(pi) == 24 * 60 * 60
start = Period('02-Apr-2005', 'B')
i1 = PeriodIndex(start=start, periods=20)
assert len(i1) == 20
assert i1.freq == start.freq
assert i1[0] == start
end_intv = Period('2006-12-31', 'W')
i1 = PeriodIndex(end=end_intv, periods=10)
assert len(i1) == 10
assert i1.freq == end_intv.freq
assert i1[-1] == end_intv
end_intv = Period('2006-12-31', '1w')
i2 = PeriodIndex(end=end_intv, periods=10)
assert len(i1) == len(i2)
assert (i1 == i2).all()
assert i1.freq == i2.freq
end_intv = Period('2006-12-31', ('w', 1))
i2 = PeriodIndex(end=end_intv, periods=10)
assert len(i1) == len(i2)
assert (i1 == i2).all()
assert i1.freq == i2.freq
end_intv = Period('2005-05-01', 'B')
i1 = PeriodIndex(start=start, end=end_intv)
# infer freq from first element
i2 = PeriodIndex([end_intv, Period('2005-05-05', 'B')])
assert len(i2) == 2
assert i2[0] == end_intv
i2 = PeriodIndex(np.array([end_intv, Period('2005-05-05', 'B')]))
assert len(i2) == 2
assert i2[0] == end_intv
# Mixed freq should fail
vals = [end_intv, Period('2006-12-31', 'w')]
pytest.raises(ValueError, PeriodIndex, vals)
vals = np.array(vals)
pytest.raises(ValueError, PeriodIndex, vals)
def test_constructor_error(self):
start = Period('02-Apr-2005', 'B')
end_intv = Period('2006-12-31', ('w', 1))
msg = 'Start and end must have same freq'
with tm.assert_raises_regex(ValueError, msg):
PeriodIndex(start=start, end=end_intv)
msg = 'Must specify 2 of start, end, periods'
with tm.assert_raises_regex(ValueError, msg):
PeriodIndex(start=start)
def test_recreate_from_data(self):
for o in ['M', 'Q', 'A', 'D', 'B', 'T', 'S', 'L', 'U', 'N', 'H']:
org = PeriodIndex(start='2001/04/01', freq=o, periods=1)
idx = PeriodIndex(org.values, freq=o)
tm.assert_index_equal(idx, org)
def test_map_with_string_constructor(self):
raw = [2005, 2007, 2009]
index = PeriodIndex(raw, freq='A')
types = str,
if PY3:
# unicode
types += text_type,
for t in types:
expected = Index(lmap(t, raw))
res = index.map(t)
# should return an Index
assert isinstance(res, Index)
# preserve element types
assert all(isinstance(resi, t) for resi in res)
# lastly, values should compare equal
tm.assert_index_equal(res, expected)
class TestSeriesPeriod(object):
def setup_method(self, method):
self.series = Series(period_range('2000-01-01', periods=10, freq='D'))
def test_constructor_cant_cast_period(self):
with pytest.raises(TypeError):
Series(period_range('2000-01-01', periods=10, freq='D'),
dtype=float)
def test_constructor_cast_object(self):
s = Series(period_range('1/1/2000', periods=10), dtype=object)
exp = Series(period_range('1/1/2000', periods=10))
tm.assert_series_equal(s, exp)
| mit |
MicrosoftGenomics/FaST-LMM | fastlmm/association/lrt.py | 1 | 13901 | import copy
import pdb
import scipy.linalg as LA
import scipy as SP
import numpy as NP
import logging as LG
import scipy.optimize as opt
import scipy.stats as ST
import scipy.special as SS
import os
import sys
from fastlmm.pyplink.plink import *
from pysnptools.util.pheno import *
from fastlmm.util.mingrid import *
from fastlmm.util.util import *
import fastlmm.util.stats as ss
import fastlmm.inference as inference
import fastlmm.association.score as score
import fastlmm.association as association
import statsmodels.api as sm
from sklearn import linear_model
class lrt(association.varcomp_test):
__slots__ = ["model0","model1","lrt","forcefullrank","nullModel","altModel","G0","K0","__testGcalled"]
def __init__(self,Y,X=None,model0=None,appendbias=False,forcefullrank=False,
G0=None,K0=None,nullModel=None,altModel=None):
association.varcomp_test.__init__(self,Y=Y,X=X,appendbias=appendbias)
N = self.Y.shape[0]
self.forcefullrank=forcefullrank
self.nullModel = nullModel
self.altModel = altModel
self.G0=G0
self.K0=K0
self.__testGcalled=False
if (not nullModel.has_key('penalty')) or nullModel['penalty'] is None:
nullModel['penalty'] = 'l2'
if nullModel['effect']=='fixed':
if nullModel['link']=='linear':
self._nullModelLinReg(G0)
elif nullModel['link']=='logistic':
self._nullModelLogReg(G0, nullModel['penalty'])
else:
assert False, 'Unknown link function.'
assert not nullModel.has_key('approx') or nullModel['approx'] is None, 'Cannot use approx with fixed effect'
elif nullModel['effect']=='mixed':
if nullModel['link']=='linear':
self._nullModelMixedEffectLinear(G0=G0,K0=K0)
else:
self._nullModelMixedEffectNonLinear(G0, nullModel['approx'], nullModel['link'], nullModel['penalty'])
else:
assert False, 'Unknown effect type.'
def _nullModelLogReg(self, G0, penalty='L2'):
assert G0 is None, 'Logistic regression cannot handle two kernels.'
self.model0={}
import statsmodels.api as sm
logreg_mod = sm.Logit(self.Y,self.X)
#logreg_sk = linear_model.LogisticRegression(penalty=penalty)
logreg_result = logreg_mod.fit(disp=0)
self.model0['nLL']=logreg_result.llf
self.model0['h2']=SP.nan #so that code for both one-kernel and two-kernel prints out
self.model0['a2']=SP.nan
def _nullModelLinReg(self, G0):
assert G0 is None, 'Linear regression cannot handle two kernels.'
self.model0={}
model = ss.linreg(self.X,self.Y)
self.model0['h2']=SP.nan #so that code for both one-kernel and two-kernel prints out
self.model0['nLL']=model['nLL']
def _nullModelMixedEffectLinear(self, G0=None,K0=None):
lmm0 = inference.getLMM(forcefullrank = self.forcefullrank)
if G0 is not None:
lmm0.setG(G0=G0,K0=K0)
lmm0.setX(self.X)
lmm0.sety(self.Y)
self.model0 = lmm0.findH2()# The null model only has a single kernel and only needs to find h2
def _nullModelMixedEffectNonLinear(self, G0, approx, link, penalty):
if G0 is None:
return self._nullModelMixedEffectNonLinear1Kernel(approx, link, penalty)
return self._nullModelMixedEffectNonLinear2Kernel(G0, approx, link, penalty)
def _nullModelMixedEffectNonLinear1Kernel(self, approx, link, penalty):
if self.forcefullrank:
assert False, "Not implemented yet."
else:
glmm0 = inference.getGLMM(approx, link, self.Y, None, None, penalty=penalty)
glmm0.setX(self.X)
glmm0.sety(self.Y)
glmm0.optimize()
self.model0 = {}
self.model0['h2']=0.0
self.model0['a2']=NP.nan
self.model0['nLL']=-glmm0.marginal_loglikelihood()
self.model0['sig02'] = glmm0.sig02
self.model0['sig12'] = glmm0.sig12
self.model0['sign2'] = glmm0.sign2
for i in range(len(glmm0.beta)):
self.model0['beta' + str(i)] = glmm0.beta[i]
def _nullModelMixedEffectNonLinear2Kernel(self, G0, approx, link, penalty):
if self.forcefullrank:
assert False, "Not implemented yet."
else:
glmm0 = inference.getGLMM(approx, link, self.Y, G0, None, penalty=penalty)
glmm0.setX(self.X)
glmm0.setG(G0)
glmm0.sety(self.Y)
glmm0.optimize()
self.model0 = {}
if glmm0.sig02 + glmm0.sign2 <= NP.sqrt(NP.finfo(NP.float).eps):
h2 = NP.nan
else:
h2 = glmm0.sig02 / (glmm0.sig02 + glmm0.sign2)
self.model0['h2']=h2
self.model0['a2']=0.0
self.model0['nLL']=-glmm0.marginal_loglikelihood()
self.model0['sig02'] = glmm0.sig02
self.model0['sig12'] = glmm0.sig12
self.model0['sign2'] = glmm0.sign2
for i in range(len(glmm0.beta)):
self.model0['beta' + str(i)] = glmm0.beta[i]
def testGupdate(self, y, X, type=None):
'''
Assume that testG has already been called (and therefore the
expensive part of SVD related to the test SNPs), and that we are only changing
the phenotype and covariates (e.g. for permutations).
Recomputes the null model, and, crucially, cheaply, the alternative model
'''
assert self._testGcalled, "must have called testG before updateTestG which assumes only a change in y"
origX=self.X
origY=self.Y
self._updateYX(y,X)
#don't need this, as is invariant under permutations of only test SNPs or compliment
#if self.nullModel['effect']=='fixed' and self.nullModel['link']=='linear':
# self._nullModelLinReg(self.G0)
#else:
# raise Exception("not implemented")
#compute the alternative likelihood
if self.altModel['effect']=='fixed':
raise Exception("not implemented")
elif self.altModel['effect']=='mixed' and self.altModel["link"]=="linear":
(lik1,stat,alteqnull) = self._altModelMixedEffectLinearUpdate(self.model1)
else:
raise Exception("not implemented")
#due to optimization the alternative log-likelihood might be a about 1E-6 worse than the null log-likelihood
pvreg = (ST.chi2.sf(stat,1.0)) #standard way to compute p-value when no boundary conditions
if SP.isnan(pvreg) or pvreg>1.0:
pvreg=1.0
pv = 0.5*pvreg #conservative 50/50 estimate
if alteqnull: pv=1.0 #chi_0 component
test={
'pv':pv,
'stat':stat,
'lik1':lik1,
'lik0':self.model0,
'alteqnull':alteqnull
}
self._updateYX(origY,origX)
return test
@property
def _testGcalled(self):
return self.__testGcalled
def testG(self, G1, type=None,i_exclude=None,G_exclude=None):
"""
Params:
G1: SNPs to be tested
type: Dummy
i_exclude: Dummy
G_exclude: Dummy
"""
self.__testGcalled=True
#compute the alternative likelihood
if self.altModel['effect']=='fixed':
if self.altModel['link']=='linear':
(lik1,stat,alteqnull) = self._altModelLinReg(G1)
elif self.altModel['link']=='logistic':
assert False, 'Link function not implemented yet.'
else:
assert False, 'Unkown link function.'
assert not altModel.has_key('approx') or altModel['approx'] is None, 'Cannot use approx with fixed effect'
elif self.altModel['effect']=='mixed':
if self.altModel['link']=='linear':
(lik1,stat,alteqnull) = self._altModelMixedEffectLinear(G1)
else:
(lik1,stat,alteqnull) = self._altModelMixedEffectNonLinear(G1, self.altModel['approx'],
self.altModel['link'],
self.altModel['penalty'])
else:
assert False, 'Unkown effect type.'
#due to optimization the alternative log-likelihood might be a about 1E-6 worse than the null log-likelihood
pvreg = (ST.chi2.sf(stat,1.0)) #standard way to compute p-value when no boundary conditions
if SP.isnan(pvreg) or pvreg>1.0:
pvreg=1.0
pv = 0.5*pvreg #conservative 50/50 estimate
if alteqnull: pv=1.0 #chi_0 component
test={
'pv':pv,
'stat':stat,
'lik1':lik1,
'lik0':self.model0,
'alteqnull':alteqnull
}
return test
def _altModelLinReg(self, G1):
assert False, 'Not implemented yet.'
def _altModelMixedEffectLinearUpdate(self, lmm1, tol=0.0):
'''
Assumes that setG has been called already (expensive in many cases), and does not redo it.
'''
if self.G0 is not None:
raise Exception("not implemented")
else:
lmm1.setX(self.X)
lmm1.sety(self.Y)
lik1 = lmm1.findH2()#The alternative model has one kernel and needs to find only h2
alteqnull=lik1['h2']<=(0.0+tol)
stat = 2.0*(self.model0['nLL'] - lik1['nLL'])
self.model1=lmm1
return (lik1,stat,alteqnull)
def _altModelMixedEffectLinear(self, G1,tol=0.0):
lmm1 = inference.getLMM(forcefullrank = self.forcefullrank)
if self.G0 is not None:
lmm1.setG(self.G0, G1)
lmm1.setX(self.X)
lmm1.sety(self.Y)
lik1 = lmm1.findA2()#The alternative model has two kernels and needs to find both a2 and h2
alteqnull=lik1['a2']<=(0.0+tol)
else:
lmm1.setG(G1)
lmm1.setX(self.X)
lmm1.sety(self.Y)
lik1 = lmm1.findH2()#The alternative model has one kernel and needs to find only h2
alteqnull=lik1['h2']<=(0.0+tol)
stat = 2.0*(self.model0['nLL'] - lik1['nLL'])
self.model1=lmm1
return (lik1,stat,alteqnull)
def _altModelMixedEffectNonLinear(self, G1, approx, link, penalty):
if self.G0 is None:
(lik1,stat) = self._altModelMixedEffectNonLinear1Kernel(G1, approx, link, penalty)
else:
(lik1,stat) = self._altModelMixedEffectNonLinear2Kernel(G1, approx, link, penalty)
if stat < 1e-4:
lik1['nLL'] = self.model0['nLL']
lik1['h2'] = self.model0['h2']
lik1['a2'] = self.model0['a2']
stat = 0.0
alteqnull = True
else:
alteqnull = False
return (lik1,stat,alteqnull)
def _altModelMixedEffectNonLinear1Kernel(self, G1, approx, link, penalty):
if self.forcefullrank:
assert False, 'Not working, call Danilo'
assert False, "Not implemented yet."
else:
glmm1 = inference.getGLMM(approx, link, self.Y, G1, None, penalty=penalty)
glmm1.setX(self.X)
glmm1.sety(self.Y)
glmm1.setG(G1)
glmm1.optimize()
assert glmm1.sig02 >= 0.0 and glmm1.sign2 >= 0
if glmm1.sig02 + glmm1.sign2 <= NP.sqrt(NP.finfo(NP.float).eps):
h2 = NP.nan
else:
h2 = glmm1.sig02 / (glmm1.sig02 + glmm1.sign2)
a2 = NP.nan
lik1 = {'nLL':-glmm1.marginal_loglikelihood(),
'h2':h2,
'a2':a2}
lik1['sig02'] = glmm1.sig02
lik1['sig12'] = glmm1.sig12
lik1['sign2'] = glmm1.sign2
for i in range(len(glmm1.beta)):
lik1['beta' + str(i)] = glmm1.beta[i]
stat = 2.0*(self.model0['nLL'] - lik1['nLL'])
return (lik1,stat)
def _altModelMixedEffectNonLinear2Kernel(self, G1, approx, link, penalty):
if self.forcefullrank:
assert False, "Not implemented yet."
else:
glmm1 = inference.getGLMM(approx, link, self.Y, self.G0,
G1, penalty=penalty)
glmm1.setX(self.X)
glmm1.sety(self.Y)
glmm1.setG(self.G0, G1)
glmm1.optimize()
assert glmm1.sig02 >= 0.0 and glmm1.sig12 >= 0.0 and glmm1.sign2 >= 0
if glmm1.sig02 + glmm1.sig12 + glmm1.sign2 <= NP.sqrt(NP.finfo(NP.float).eps):
# in this case we don't have enough precision to calculate the
# proportion between sig02+sig12 and the total or it does not make sense
# because the covariance of the posterior tends to zero
h2 = NP.nan
else:
h2 = (glmm1.sig02+glmm1.sig12) / (glmm1.sig02 + glmm1.sig12 + glmm1.sign2)
if glmm1.sig02 + glmm1.sig12 <= NP.sqrt(NP.finfo(NP.float).eps):
a2 = NP.nan
else:
a2 = glmm1.sig12 / (glmm1.sig02+glmm1.sig12)
lik1 = {'nLL':-glmm1.marginal_loglikelihood(),
'h2':h2,
'a2':a2}
lik1['sig02'] = glmm1.sig02
lik1['sig12'] = glmm1.sig12
lik1['sign2'] = glmm1.sign2
for i in range(len(glmm1.beta)):
lik1['beta' + str(i)] = glmm1.beta[i]
stat = 2.0*(self.model0['nLL'] - lik1['nLL'])
return (lik1,stat)
| apache-2.0 |
luiscape/fts-collector | ckan_loading/produce_csvs.py | 1 | 5186 | """
Can be used to produce the following CSV files for upload into CKAN:
- sectors.csv
- countries.csv
- organizations.csv
- emergencies.csv (for a given country)
- appeals.csv (for a given country)
- projects.csv (for a given country, based on appeals)
- contributions.csv (for given country, based on emergencies, which should capture all appeals, also)
"""
import fts_queries
import os
import pandas as pd
# TODO extract strings to header section above the code
def build_csv_path(base_path, object_type, country=None):
"""
Using CSV names that duplicate the file paths here, which generally I don't like,
but having very explicit filenames is maybe nicer to sort out for CKAN.
"""
filename = 'fts_' + object_type + '.csv'
if country: # a little bit of duplication but easier to read
filename = 'fts_' + country + '_' + object_type + '.csv'
return os.path.join(base_path, filename)
def write_dataframe_to_csv(dataframe, path):
print "Writing", path
# include the index which is an ID for each of the objects serialized by this script
# use Unicode as many non-ASCII characters present in this data
dataframe.to_csv(path, index=True, encoding='utf-8')
def filter_out_empty_dataframes(dataframes):
# empty dataframes will fail the "if" test
return [frame for frame in dataframes if not frame.empty]
def produce_sectors_csv(output_dir):
sectors = fts_queries.fetch_sectors_json_as_dataframe()
write_dataframe_to_csv(sectors, build_csv_path(output_dir, 'sectors'))
def produce_countries_csv(output_dir):
countries = fts_queries.fetch_countries_json_as_dataframe()
write_dataframe_to_csv(countries, build_csv_path(output_dir, 'countries'))
def produce_organizations_csv(output_dir):
organizations = fts_queries.fetch_organizations_json_as_dataframe()
write_dataframe_to_csv(organizations, build_csv_path(output_dir, 'organizations'))
def produce_global_csvs(base_output_dir):
# not sure if this directory creation code should be somewhere else..?
output_dir = os.path.join(base_output_dir, 'fts', 'global')
if not os.path.exists(output_dir):
os.makedirs(output_dir)
produce_sectors_csv(output_dir)
produce_countries_csv(output_dir)
produce_organizations_csv(output_dir)
def produce_emergencies_csv_for_country(output_dir, country):
emergencies = fts_queries.fetch_emergencies_json_for_country_as_dataframe(country)
write_dataframe_to_csv(emergencies, build_csv_path(output_dir, 'emergencies', country=country))
def produce_appeals_csv_for_country(output_dir, country):
appeals = fts_queries.fetch_appeals_json_for_country_as_dataframe(country)
write_dataframe_to_csv(appeals, build_csv_path(output_dir, 'appeals', country=country))
def produce_projects_csv_for_country(output_dir, country):
# first get all appeals for this country (could eliminate this duplicative call, but it's not expensive)
appeals = fts_queries.fetch_appeals_json_for_country_as_dataframe(country)
appeal_ids = appeals.index
# then get all projects corresponding to those appeals and concatenate into one big frame
list_of_projects = [fts_queries.fetch_projects_json_for_appeal_as_dataframe(appeal_id) for appeal_id in appeal_ids]
list_of_non_empty_projects = filter_out_empty_dataframes(list_of_projects)
projects_frame = pd.concat(list_of_non_empty_projects)
write_dataframe_to_csv(projects_frame, build_csv_path(output_dir, 'projects', country=country))
def produce_contributions_csv_for_country(output_dir, country):
# first get all emergencies for this country (could eliminate this duplicative call, but it's not expensive)
emergencies = fts_queries.fetch_emergencies_json_for_country_as_dataframe(country)
emergency_ids = emergencies.index
# then get all contributions corresponding to those emergencies and concatenate into one big frame
list_of_contributions = [fts_queries.fetch_contributions_json_for_emergency_as_dataframe(emergency_id)
for emergency_id in emergency_ids]
list_of_non_empty_contributions = filter_out_empty_dataframes(list_of_contributions)
contributions_master_frame = pd.concat(list_of_non_empty_contributions)
write_dataframe_to_csv(contributions_master_frame, build_csv_path(output_dir, 'contributions', country=country))
def produce_csvs_for_country(base_output_dir, country):
output_dir = os.path.join(base_output_dir, 'fts', 'per_country', country)
if not os.path.exists(output_dir):
os.makedirs(output_dir)
produce_emergencies_csv_for_country(output_dir, country)
produce_appeals_csv_for_country(output_dir, country)
produce_projects_csv_for_country(output_dir, country)
produce_contributions_csv_for_country(output_dir, country)
if __name__ == "__main__":
# output all CSVs for the given countries to '/tmp/'
country_codes = ['COL', 'SSD', 'YEM', 'PAK'] # the set of starter countries for DAP
tmp_output_dir = '/tmp/'
produce_global_csvs(tmp_output_dir)
for country_code in country_codes:
produce_csvs_for_country(tmp_output_dir, country_code)
| unlicense |
pandas-ml/pandas-ml | pandas_ml/skaccessors/isotonic.py | 3 | 1174 | #!/usr/bin/env python
from pandas_ml.core.accessor import _AccessorMethods
class IsotonicMethods(_AccessorMethods):
"""
Accessor to ``sklearn.isotonic``.
"""
_module_name = 'sklearn.isotonic'
@property
def IsotonicRegression(self):
"""``sklearn.isotonic.IsotonicRegression``"""
return self._module.IsotonicRegression
def isotonic_regression(self, *args, **kwargs):
"""
Call ``sklearn.isotonic.isotonic_regression`` using automatic mapping.
- ``y``: ``ModelFrame.target``
"""
func = self._module.isotonic_regression
target = self._target
_y = func(target.values, *args, **kwargs)
_y = self._constructor_sliced(_y, index=target.index)
return _y
def check_increasing(self, *args, **kwargs):
"""
Call ``sklearn.isotonic.check_increasing`` using automatic mapping.
- ``x``: ``ModelFrame.index``
- ``y``: ``ModelFrame.target``
"""
func = self._module.check_increasing
target = self._target
return func(target.index, target.values, *args, **kwargs)
| bsd-3-clause |
guorendong/iridium-browser-ubuntu | native_client/buildbot/buildbot_pnacl.py | 2 | 10251 | #!/usr/bin/python
# Copyright (c) 2013 The Native Client Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import os
import sys
from buildbot_lib import (
BuildContext, BuildStatus, Command, ParseStandardCommandLine,
RemoveSconsBuildDirectories, RunBuild, SetupLinuxEnvironment,
SetupMacEnvironment, SetupWindowsEnvironment, SCons, Step )
sys.path.append(os.path.join(os.path.dirname(__file__), '..'))
import pynacl.platform
def RunSconsTests(status, context):
# Clean out build directories, unless we have built elsewhere.
if not context['skip_build']:
with Step('clobber scons', status):
RemoveSconsBuildDirectories()
# Run checkdeps script to vet #includes.
with Step('checkdeps', status):
Command(context, cmd=[sys.executable, 'tools/checkdeps/checkdeps.py'])
arch = context['default_scons_platform']
flags_subzero = ['use_sz=1']
flags_build = ['do_not_run_tests=1']
flags_run = []
# This file is run 3 different ways for ARM builds. The qemu-only trybot does
# a normal build-and-run with the emulator just like the x86 bots. The panda
# build side runs on an x86 machines with skip_run, and then packs up the
# result and triggers an ARM hardware tester that run with skip_build
if arch != 'arm':
# Unlike their arm counterparts we do not run trusted tests on x86 bots.
# Trusted tests get plenty of coverage by other bots, e.g. nacl-gcc bots.
# We make the assumption here that there are no "exotic tests" which
# are trusted in nature but are somehow depedent on the untrusted TC.
flags_build.append('skip_trusted_tests=1')
flags_run.append('skip_trusted_tests=1')
if context['skip_run']:
flags_run.append('do_not_run_tests=1')
if arch == 'arm':
# For ARM hardware bots, force_emulator= disables use of QEMU, which
# enables building tests which don't work under QEMU.
flags_build.append('force_emulator=')
flags_run.append('force_emulator=')
if context['skip_build']:
flags_run.extend(['naclsdk_validate=0', 'built_elsewhere=1'])
if not context['skip_build']:
# For ARM builders which will trigger hardware testers, run the hello world
# test with the emulator as a basic sanity check before doing anything else.
if arch == 'arm' and context['skip_run']:
with Step('hello_world ' + arch, status):
SCons(context, parallel=True, args=['run_hello_world_test'])
with Step('build_all ' + arch, status):
SCons(context, parallel=True, args=flags_build)
if arch == 'x86-32':
with Step('build_all subzero ' + arch, status):
SCons(context, parallel=True, args=flags_build + flags_subzero)
smoke_tests = ['small_tests', 'medium_tests']
# Normal pexe-mode tests
with Step('smoke_tests ' + arch, status, halt_on_fail=False):
SCons(context, parallel=True, args=flags_run + smoke_tests)
# Large tests cannot be run in parallel
with Step('large_tests ' + arch, status, halt_on_fail=False):
SCons(context, parallel=False, args=flags_run + ['large_tests'])
# Run small_tests, medium_tests, and large_tests with Subzero.
# TODO(stichnot): Move this to the sandboxed translator section
# along with the translate_fast flag once pnacl-sz.nexe is ready.
if arch == 'x86-32':
# Normal pexe-mode tests
with Step('smoke_tests subzero ' + arch, status, halt_on_fail=False):
SCons(context, parallel=True,
args=flags_run + flags_subzero + smoke_tests)
# Large tests cannot be run in parallel
with Step('large_tests subzero ' + arch, status, halt_on_fail=False):
SCons(context, parallel=False,
args=flags_run + flags_subzero + ['large_tests'])
with Step('nonpexe_tests ' + arch, status, halt_on_fail=False):
SCons(context, parallel=True,
args=flags_run + ['pnacl_generate_pexe=0', 'nonpexe_tests'])
irt_mode = context['default_scons_mode'] + ['nacl_irt_test']
# Build all the tests with the IRT
if not context['skip_build']:
with Step('build_all_irt ' + arch, status):
SCons(context, parallel=True, mode=irt_mode, args=flags_build)
smoke_tests_irt = ['small_tests_irt', 'medium_tests_irt']
# Run tests with the IRT.
with Step('smoke_tests_irt ' + arch, status, halt_on_fail=False):
SCons(context, parallel=True, mode=irt_mode,
args=flags_run + smoke_tests_irt)
with Step('large_tests_irt ' + arch, status, halt_on_fail=False):
SCons(context, parallel=False, mode=irt_mode,
args=flags_run + ['large_tests_irt'])
# Run some nacl_clang tests. Eventually we will have bots that just run
# buildbot_standard with nacl_clang and this can be split out.
context['pnacl'] = False
context['nacl_clang'] = True
if not context['skip_build']:
with Step('build_nacl_clang ' + arch, status, halt_on_fail=False):
SCons(context, parallel=True, args=flags_build)
with Step('smoke_tests_nacl_clang ' + arch, status, halt_on_fail=False):
SCons(context, parallel=True,
args=flags_run + ['small_tests', 'medium_tests'])
with Step('large_tests_nacl_clang ' + arch, status, halt_on_fail=False):
SCons(context, parallel=False,
args=flags_run + ['large_tests'])
context['pnacl'] = True
context['nacl_clang'] = False
# Test sandboxed translation
# TODO(dschuff): The standalone sandboxed translator driver does not have
# the batch script wrappers, so it can't run on Windows. Either add them to
# the translator package or make SCons use the pnacl_newlib drivers except
# on the ARM bots where we don't have the pnacl_newlib drivers.
# The mac standalone sandboxed translator is flaky.
# https://code.google.com/p/nativeclient/issues/detail?id=3856
if not context.Windows() and not context.Mac():
flags_run_sbtc = ['use_sandboxed_translator=1']
sbtc_tests = ['toolchain_tests_irt']
if arch == 'arm':
# When splitting the build from the run, translate_in_build_step forces
# the translation to run on the run side (it usually runs on the build
# side because that runs with more parallelism)
if context['skip_build'] or context['skip_run']:
flags_run_sbtc.append('translate_in_build_step=0')
else:
# The ARM sandboxed translator is flaky under qemu, so run a very small
# set of tests on the qemu-only trybot.
sbtc_tests = ['run_hello_world_test_irt']
else:
sbtc_tests.append('large_code')
with Step('sandboxed_translator_tests ' + arch, status,
halt_on_fail=False):
SCons(context, parallel=True, mode=irt_mode,
args=flags_run + flags_run_sbtc + sbtc_tests)
with Step('sandboxed_translator_fast_tests ' + arch, status,
halt_on_fail=False):
SCons(context, parallel=True, mode=irt_mode,
args=flags_run + flags_run_sbtc + ['translate_fast=1'] + sbtc_tests)
# Test Non-SFI Mode.
# The only architectures that the PNaCl toolchain supports Non-SFI
# versions of are currently x86-32 and ARM.
# The x86-64 toolchain bot currently also runs these tests from
# buildbot_pnacl.sh
if context.Linux() and (arch == 'x86-32' or arch == 'arm'):
with Step('nonsfi_tests ' + arch, status, halt_on_fail=False):
SCons(context, parallel=True, mode=irt_mode,
args=flags_run +
['nonsfi_nacl=1',
'nonsfi_tests',
'nonsfi_tests_irt'])
# Build with pnacl_generate_pexe=0 to allow using pnacl-clang with
# direct-to-native mode. This allows assembly to be used in tests.
with Step('nonsfi_tests_nopnacl_generate_pexe ' + arch,
status, halt_on_fail=False):
extra_args = ['nonsfi_nacl=1',
'pnacl_generate_pexe=0',
'nonsfi_tests',
'nonsfi_tests_irt']
# nonsfi_tests_irt with pnacl_generate_pexe=0 does not pass on x86-32.
# https://code.google.com/p/nativeclient/issues/detail?id=4093
if arch == 'x86-32':
extra_args.remove('nonsfi_tests_irt')
SCons(context, parallel=True, mode=irt_mode,
args=flags_run + extra_args)
# Test nonsfi_loader linked against host's libc.
with Step('nonsfi_tests_host_libc ' + arch, status, halt_on_fail=False):
# Using skip_nonstable_bitcode=1 here disables the tests for
# zero-cost C++ exception handling, which don't pass for Non-SFI
# mode yet because we don't build libgcc_eh for Non-SFI mode.
SCons(context, parallel=True, mode=irt_mode,
args=flags_run +
['nonsfi_nacl=1', 'use_newlib_nonsfi_loader=0',
'nonsfi_tests', 'nonsfi_tests_irt',
'toolchain_tests_irt', 'skip_nonstable_bitcode=1'])
# Test unsandboxed mode.
if (context.Linux() or context.Mac()) and arch == 'x86-32':
if context.Linux():
tests = ['run_' + test + '_test_irt' for test in
['hello_world', 'irt_futex', 'thread', 'float',
'malloc_realloc_calloc_free', 'dup', 'cond_timedwait',
'getpid']]
else:
# TODO(mseaborn): Use the same test list as on Linux when the threading
# tests pass for Mac.
tests = ['run_hello_world_test_irt']
with Step('unsandboxed_tests ' + arch, status, halt_on_fail=False):
SCons(context, parallel=True, mode=irt_mode,
args=flags_run + ['pnacl_unsandboxed=1'] + tests)
# Test MinSFI.
if not context.Windows() and (arch == 'x86-32' or arch == 'x86-64'):
with Step('minsfi_tests ' + arch, status, halt_on_fail=False):
SCons(context, parallel=True,
args=flags_run + ['minsfi=1', 'minsfi_tests'])
def Main():
context = BuildContext()
status = BuildStatus(context)
ParseStandardCommandLine(context)
if context.Linux():
SetupLinuxEnvironment(context)
elif context.Windows():
SetupWindowsEnvironment(context)
elif context.Mac():
SetupMacEnvironment(context)
else:
raise Exception('Unsupported platform')
# Panda bots only have 2 cores.
if pynacl.platform.GetArch() == 'arm':
context['max_jobs'] = 2
RunBuild(RunSconsTests, status)
if __name__ == '__main__':
Main()
| bsd-3-clause |
csae1152/seizure-prediction | seizure_prediction/cross_validation/kfold_strategy.py | 3 | 4173 | import numpy as np
import sklearn
from seizure_prediction.cross_validation.sequences import collect_sequence_ranges_from_meta
class KFoldStrategy:
"""
Create a k-fold strategy focused on preictal segments. The idea is to create a small number of folds
that maximise coverage of the training set. Small number of folds as to keep performance in check.
If there are 3 preictal sequences, then do 3 folds of (0,1), (0,2), (1,2). If there are 6 sequences,
do 3 folds (0,1), (2,3), (4,5). The sequences are shuffled before being allocated to folds.
However, interictal sequences are partitioned randomly as there are a lot more of them that random
should more or less be fine.
"""
def get_name(self):
return 'kfold'
def get_folds(self, preictal_meta):
"""
:param preictal_meta: metadata from preictal segments
:return: iterable of fold numbers to pass to split_train_cv
"""
num_seqs = len(collect_sequence_ranges_from_meta(preictal_meta))
assert num_seqs >= 2
if num_seqs <= 2:
num_folds = 2
elif num_seqs <= 6:
num_folds = 3
else:
num_folds = num_seqs / 2
return xrange(num_folds)
def get_sequence_ranges(self, meta, fold_number, interictal=False, shuffle=True):
seq_ranges = collect_sequence_ranges_from_meta(meta, shuffle=shuffle)
num_seqs = len(seq_ranges)
# calculate the split numbers for a fold
def get_num_train_seqs(num_seqs):
if num_seqs <= 3:
return 2
else:
return 3
if interictal:
interictal_ratio = 0.8 if num_seqs <= 20 else 0.4
train_ranges, cv_ranges = sklearn.cross_validation.train_test_split(seq_ranges, train_size=interictal_ratio, random_state=fold_number)
else:
train_size = get_num_train_seqs(num_seqs)
if num_seqs == 3:
combinations = [[0, 1], [0, 2], [1, 2]]
else:
first_pass = [range(i, i + train_size) for i in range(0, num_seqs, train_size) if (i + train_size) <= num_seqs]
remainder = num_seqs % train_size
if remainder == 0:
gap = []
else:
seq = range(num_seqs - remainder, num_seqs)
needed = train_size - remainder
gap_fillers = [i * train_size for i in range(needed)]
gap_fillers = [x for x in gap_fillers if x < num_seqs]
# print 'gf', gap_fillers
if len(gap_fillers) < train_size:
gap_fillers = [i * (train_size-1) for i in range(needed)]
gap_fillers = [x for x in gap_fillers if x < num_seqs]
gap = [gap_fillers + seq]
second_pass = [range(i, i + train_size**2, train_size) for i in range(num_seqs)]
second_pass = [x for x in second_pass if len(x) == train_size and x < num_seqs]
third_pass = [range(i, i + train_size) for i in range(1, num_seqs, train_size) if (i + train_size) <= num_seqs]
# third_pass = [range(i, i + train_size) for i in range(2, num_seqs, train_size) if (i + train_size) < num_seqs]
combinations = first_pass + gap + second_pass + third_pass
indices = combinations[fold_number]
# print 'indices', indices
train_ranges = [seq_ranges[i] for i in indices]
cv_ranges = np.delete(seq_ranges, indices, axis=0)
return train_ranges, cv_ranges
def split_train_cv(self, data, meta, fold_number, interictal=False):
train_ranges, cv_ranges = self.get_sequence_ranges(meta, fold_number, interictal)
train_data = []
for start, end in train_ranges:
train_data.append(data[start:end])
train_data = np.concatenate(train_data, axis=0)
cv_data = []
for start, end in cv_ranges:
cv_data.append(data[start:end])
cv_data = np.concatenate(cv_data, axis=0)
return train_data, cv_data
| mit |
PatrickOReilly/scikit-learn | sklearn/decomposition/__init__.py | 76 | 1490 | """
The :mod:`sklearn.decomposition` module includes matrix decomposition
algorithms, including among others PCA, NMF or ICA. Most of the algorithms of
this module can be regarded as dimensionality reduction techniques.
"""
from .nmf import NMF, ProjectedGradientNMF, non_negative_factorization
from .pca import PCA, RandomizedPCA
from .incremental_pca import IncrementalPCA
from .kernel_pca import KernelPCA
from .sparse_pca import SparsePCA, MiniBatchSparsePCA
from .truncated_svd import TruncatedSVD
from .fastica_ import FastICA, fastica
from .dict_learning import (dict_learning, dict_learning_online, sparse_encode,
DictionaryLearning, MiniBatchDictionaryLearning,
SparseCoder)
from .factor_analysis import FactorAnalysis
from ..utils.extmath import randomized_svd
from .online_lda import LatentDirichletAllocation
__all__ = ['DictionaryLearning',
'FastICA',
'IncrementalPCA',
'KernelPCA',
'MiniBatchDictionaryLearning',
'MiniBatchSparsePCA',
'NMF',
'PCA',
'ProjectedGradientNMF',
'RandomizedPCA',
'SparseCoder',
'SparsePCA',
'dict_learning',
'dict_learning_online',
'fastica',
'non_negative_factorization',
'randomized_svd',
'sparse_encode',
'FactorAnalysis',
'TruncatedSVD',
'LatentDirichletAllocation']
| bsd-3-clause |
ithemal/Ithemal | learning/pytorch/ithemal/run_ithemal.py | 1 | 14450 | import sys
import os
sys.path.append(os.path.join(os.environ['ITHEMAL_HOME'], 'learning', 'pytorch'))
import argparse
import time
import torch
import torch.multiprocessing as mp
torch.backends.cudnn.enabled = False
from utils import messages
import models.losses as ls
import models.train as tr
from tqdm import tqdm
from mpconfig import MPConfig
from typing import Callable, List, Optional, Iterator, Tuple, NamedTuple, Union
import random
import Queue
from ithemal_utils import *
import training
import pandas as pd
import common_libs.utilities as ut
def graph_model_benchmark(base_params, benchmark_params):
# type: (BaseParameters, BenchmarkParameters) -> None
data = load_data(base_params)
model = load_model(base_params, data)
train = tr.Train(
model, data, tr.PredictionType.REGRESSION, ls.mse_loss, 1,
batch_size=benchmark_params.batch_size, clip=None, opt=tr.OptimizerType.ADAM_PRIVATE, lr=0.01,
)
model.share_memory()
mp_config = MPConfig(benchmark_params.threads)
partition_size = benchmark_params.examples // benchmark_params.trainers
processes = []
start_time = time.time()
with mp_config:
for rank in range(benchmark_params.trainers):
mp_config.set_env(rank)
partition = (rank * partition_size, (rank + 1) * partition_size)
p = mp.Process(target=train, args=(rank, partition))
p.daemon = True
p.start()
processes.append(p)
for p in processes:
p.join()
end_time = time.time()
print('Time to process {} examples: {} seconds'.format(
benchmark_params.examples,
end_time - start_time,
))
def graph_model_validate(base_params, model_file, iaca_only):
# type: (BaseParameters, str, bool) -> None
data = load_data(base_params)
if iaca_only:
cnx = ut.create_connection()
legal_code_ids = set(
pd.read_sql('SELECT time_id, code_id FROM times WHERE kind="iaca"', cnx)
.set_index('time_id')
.code_id
)
data.test = [datum for datum in data.test if datum.code_id in legal_code_ids]
model = load_model(base_params, data)
train = tr.Train(
model, data, tr.PredictionType.REGRESSION, ls.mse_loss, 1,
batch_size=1000, clip=None, predict_log=base_params.predict_log,
)
resultfile = os.environ['ITHEMAL_HOME'] + '/learning/pytorch/results/realtime_results.txt'
(actual, predicted) = train.validate(resultfile=resultfile, loadfile=model_file)
def graph_model_dump(base_params, model_file):
# type: (BaseParameters, str) -> None
data = load_data(base_params)
model = load_model(base_params, data)
dump_model_and_data(model, data, model_file)
def main():
# type: () -> None
parser = argparse.ArgumentParser()
# data arguments
parser.add_argument('--data', required=True, help='The data file to load from')
parser.add_argument('--embed-mode', help='The embedding mode to use (default: none)', default='none')
parser.add_argument('--embed-file', help='The embedding file to use (default: code_delim.emb)',
default=os.path.join(os.environ['ITHEMAL_HOME'], 'learning', 'pytorch', 'inputs', 'embeddings', 'code_delim.emb'))
parser.add_argument('--embed-size', help='The size of embedding to use (default: 256)', default=256, type=int)
parser.add_argument('--hidden-size', help='The size of hidden layer to use (default: 256)', default=256, type=int)
parser.add_argument('--no-mem', help='Remove all instructions with memory', default=False, action='store_true')
# edge/misc arguments
parser.add_argument('--random-edge-freq', type=float, default=0.0, help='The fraction of instructions to add an additional random forward edge to (can be >1)')
parser.add_argument('--no-residual', default=False, action='store_true', help='Don\'t use a residual model in Ithemal')
parser.add_argument('--no-dag-rnn', default=False, action='store_true', help='Don\'t use the DAG-RNN model in Ithemal')
parser.add_argument('--predict-log', action='store_true', default=False, help='Predict the log of the time')
parser.add_argument('--linear-embeddings', action='store_true', default=False, help='Use linear embeddings instead of LSTM')
parser.add_argument('--use-rnn', action='store_true', default=False)
rnn_type_group = parser.add_mutually_exclusive_group()
rnn_type_group.add_argument('--rnn-normal', action='store_const', const=md.RnnType.RNN, dest='rnn_type')
rnn_type_group.add_argument('--rnn-lstm', action='store_const', const=md.RnnType.LSTM, dest='rnn_type')
rnn_type_group.add_argument('--rnn-gru', action='store_const', const=md.RnnType.GRU, dest='rnn_type')
parser.set_defaults(rnn_type=md.RnnType.LSTM)
rnn_hierarchy_type_group = parser.add_mutually_exclusive_group()
rnn_hierarchy_type_group.add_argument('--rnn-token', action='store_const', const=md.RnnHierarchyType.NONE, dest='rnn_hierarchy_type')
rnn_hierarchy_type_group.add_argument('--rnn-dense', action='store_const', const=md.RnnHierarchyType.DENSE, dest='rnn_hierarchy_type')
rnn_hierarchy_type_group.add_argument('--rnn-multiscale', action='store_const', const=md.RnnHierarchyType.MULTISCALE, dest='rnn_hierarchy_type')
rnn_hierarchy_type_group.add_argument('--rnn-linear-model', action='store_const', const=md.RnnHierarchyType.LINEAR_MODEL, dest='rnn_hierarchy_type')
rnn_hierarchy_type_group.add_argument('--rnn-mop', action='store_const', const=md.RnnHierarchyType.MOP_MODEL, dest='rnn_hierarchy_type')
parser.set_defaults(rnn_hierarchy_type=md.RnnHierarchyType.MULTISCALE)
parser.add_argument('--rnn-skip-connections', action='store_true', default=False)
parser.add_argument('--rnn-learn-init', action='store_true', default=False)
parser.add_argument('--rnn-connect-tokens', action='store_true', default=False)
dag_nonlinearity_group = parser.add_mutually_exclusive_group()
dag_nonlinearity_group.add_argument('--dag-relu-nonlinearity', action='store_const', const=md.NonlinearityType.RELU, dest='dag_nonlinearity')
dag_nonlinearity_group.add_argument('--dag-tanh-nonlinearity', action='store_const', const=md.NonlinearityType.TANH, dest='dag_nonlinearity')
dag_nonlinearity_group.add_argument('--dag-sigmoid-nonlinearity', action='store_const', const=md.NonlinearityType.SIGMOID, dest='dag_nonlinearity')
parser.set_defaults(dag_nonlinearity=None)
parser.add_argument('--dag-nonlinearity-width', help='The width of the final nonlinearity (default: 128)', default=128, type=int)
parser.add_argument('--dag-nonlinear-before-max', action='store_true', default=False)
data_dependency_group = parser.add_mutually_exclusive_group()
data_dependency_group.add_argument('--linear-dependencies', action='store_true', default=False)
data_dependency_group.add_argument('--flat-dependencies', action='store_true', default=False)
dag_reduction_group = parser.add_mutually_exclusive_group()
dag_reduction_group.add_argument('--dag-add-reduction', action='store_const', const=md.ReductionType.ADD, dest='dag_reduction')
dag_reduction_group.add_argument('--dag-max-reduction', action='store_const', const=md.ReductionType.MAX, dest='dag_reduction')
dag_reduction_group.add_argument('--dag-mean-reduction', action='store_const', const=md.ReductionType.MEAN, dest='dag_reduction')
dag_reduction_group.add_argument('--dag-attention-reduction', action='store_const', const=md.ReductionType.ATTENTION, dest='dag_reduction')
parser.set_defaults(dag_reduction=md.ReductionType.MAX)
def add_edge_ablation(ablation):
# type: (EdgeAblationType) -> None
parser.add_argument('--{}'.format(ablation.value), action='append_const', dest='edge_ablations', const=ablation)
add_edge_ablation(EdgeAblationType.TRANSITIVE_REDUCTION)
add_edge_ablation(EdgeAblationType.TRANSITIVE_CLOSURE)
add_edge_ablation(EdgeAblationType.ADD_LINEAR_EDGES)
add_edge_ablation(EdgeAblationType.ONLY_LINEAR_EDGES)
add_edge_ablation(EdgeAblationType.NO_EDGES)
sp = parser.add_subparsers(dest='subparser')
train = sp.add_parser('train', help='Train an ithemal model')
train.add_argument('--experiment-name', required=True, help='Name of the experiment to run')
train.add_argument('--experiment-time', required=True, help='Time the experiment was started at')
train.add_argument('--load-file', help='Start by loading the provided model')
train.add_argument('--batch-size', type=int, default=4, help='The batch size to use in train')
train.add_argument('--epochs', type=int, default=3, help='Number of epochs to run for')
train.add_argument('--trainers', type=int, default=4, help='Number of trainer processes to use')
train.add_argument('--threads', type=int, default=4, help='Total number of PyTorch threads to create per trainer')
train.add_argument('--decay-trainers', action='store_true', default=False, help='Decay the number of trainers at the end of each epoch')
train.add_argument('--weight-decay', type=float, default=0, help='Coefficient of weight decay (L2 regularization) on model')
train.add_argument('--initial-lr', type=float, default=0.1, help='Initial learning rate')
train.add_argument('--decay-lr', action='store_true', default=False, help='Decay the learning rate at the end of each epoch')
train.add_argument('--momentum', type=float, default=0.9, help='Momentum parameter for SGD')
train.add_argument('--nesterov', action='store_true', default=False, help='Use Nesterov momentum')
train.add_argument('--weird-lr', action='store_true', default=False, help='Use unusual LR schedule')
train.add_argument('--lr-decay-rate', default=1.2, help='LR division rate', type=float)
split_group = train.add_mutually_exclusive_group()
split_group.add_argument(
'--split-dist', action='store_const', const=[0.5, 0.25, 0.125, .0625, .0625],
help='Split data partitions between trainers via a distribution',
)
split_group.add_argument('--split-size', type=int, help='Partitions of a fixed size')
optimizer_group = train.add_mutually_exclusive_group()
optimizer_group.add_argument('--adam-private', action='store_const', const=tr.OptimizerType.ADAM_PRIVATE, dest='optimizer', help='Use Adam with private moments',
default=tr.OptimizerType.ADAM_PRIVATE)
optimizer_group.add_argument('--adam-shared', action='store_const', const=tr.OptimizerType.ADAM_SHARED, dest='optimizer', help='Use Adam with shared moments')
optimizer_group.add_argument('--sgd', action='store_const', const=tr.OptimizerType.SGD, dest='optimizer', help='Use SGD')
benchmark = sp.add_parser('benchmark', help='Benchmark train performance of an Ithemal setup')
benchmark.add_argument('--n-examples', type=int, default=1000, help='Number of examples to use in benchmark')
benchmark.add_argument('--trainers', type=int, default=4, help='Number of trainer processes to use')
benchmark.add_argument('--threads', type=int, default=4, help='Total number of PyTorch threads to create per trainer')
benchmark.add_argument('--batch-size', type=int, default=4, help='The batch size to use in train')
validate = sp.add_parser('validate', help='Get performance of a dataset')
validate.add_argument('--load-file', help='File to load the model from')
validate.add_argument('--iaca-only', help='Only report accuracy on IACA datapoints', action='store_true', default=False)
dump = sp.add_parser('dump', help='Dump the dataset to a file')
dump.add_argument('--dump-file', help='File to dump the model to', required=True)
args = parser.parse_args()
base_params = BaseParameters(
data=args.data,
embed_mode=args.embed_mode,
embed_file=args.embed_file,
random_edge_freq=args.random_edge_freq,
predict_log=args.predict_log,
no_residual=args.no_residual,
no_dag_rnn=args.no_dag_rnn,
dag_reduction=args.dag_reduction,
edge_ablation_types=args.edge_ablations or [],
embed_size=args.embed_size,
hidden_size=args.hidden_size,
linear_embeddings=args.linear_embeddings,
use_rnn=args.use_rnn,
rnn_type=args.rnn_type,
rnn_hierarchy_type=args.rnn_hierarchy_type,
rnn_connect_tokens=args.rnn_connect_tokens,
rnn_skip_connections=args.rnn_skip_connections,
rnn_learn_init=args.rnn_learn_init,
no_mem=args.no_mem,
linear_dependencies=args.linear_dependencies,
flat_dependencies=args.flat_dependencies,
dag_nonlinearity=args.dag_nonlinearity,
dag_nonlinearity_width=args.dag_nonlinearity_width,
dag_nonlinear_before_max=args.dag_nonlinear_before_max,
)
if args.subparser == 'train':
if args.split_dist:
split = args.split_dist
else:
split = args.split_size or 1000
train_params = TrainParameters(
experiment_name=args.experiment_name,
experiment_time=args.experiment_time,
load_file=args.load_file,
batch_size=args.batch_size,
trainers=args.trainers,
threads=args.threads,
decay_trainers=args.decay_trainers,
weight_decay=args.weight_decay,
initial_lr=args.initial_lr,
decay_lr=args.decay_lr,
epochs=args.epochs,
split=split,
optimizer=args.optimizer,
momentum=args.momentum,
nesterov=args.nesterov,
weird_lr=args.weird_lr,
lr_decay_rate=args.lr_decay_rate,
)
training.run_training_coordinator(base_params, train_params)
elif args.subparser == 'validate':
graph_model_validate(base_params, args.load_file, args.iaca_only)
elif args.subparser == 'dump':
graph_model_dump(base_params, args.dump_file)
elif args.subparser == 'benchmark':
benchmark_params = BenchmarkParameters(
batch_size=args.batch_size,
trainers=args.trainers,
threads=args.threads,
examples=args.n_examples,
)
graph_model_benchmark(base_params, benchmark_params)
else:
raise ValueError('Unknown mode "{}"'.format(args.subparser))
if __name__ == '__main__':
main()
| mit |
pratapvardhan/scikit-learn | examples/ensemble/plot_voting_probas.py | 316 | 2824 | """
===========================================================
Plot class probabilities calculated by the VotingClassifier
===========================================================
Plot the class probabilities of the first sample in a toy dataset
predicted by three different classifiers and averaged by the
`VotingClassifier`.
First, three examplary classifiers are initialized (`LogisticRegression`,
`GaussianNB`, and `RandomForestClassifier`) and used to initialize a
soft-voting `VotingClassifier` with weights `[1, 1, 5]`, which means that
the predicted probabilities of the `RandomForestClassifier` count 5 times
as much as the weights of the other classifiers when the averaged probability
is calculated.
To visualize the probability weighting, we fit each classifier on the training
set and plot the predicted class probabilities for the first sample in this
example dataset.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn.linear_model import LogisticRegression
from sklearn.naive_bayes import GaussianNB
from sklearn.ensemble import RandomForestClassifier
from sklearn.ensemble import VotingClassifier
clf1 = LogisticRegression(random_state=123)
clf2 = RandomForestClassifier(random_state=123)
clf3 = GaussianNB()
X = np.array([[-1.0, -1.0], [-1.2, -1.4], [-3.4, -2.2], [1.1, 1.2]])
y = np.array([1, 1, 2, 2])
eclf = VotingClassifier(estimators=[('lr', clf1), ('rf', clf2), ('gnb', clf3)],
voting='soft',
weights=[1, 1, 5])
# predict class probabilities for all classifiers
probas = [c.fit(X, y).predict_proba(X) for c in (clf1, clf2, clf3, eclf)]
# get class probabilities for the first sample in the dataset
class1_1 = [pr[0, 0] for pr in probas]
class2_1 = [pr[0, 1] for pr in probas]
# plotting
N = 4 # number of groups
ind = np.arange(N) # group positions
width = 0.35 # bar width
fig, ax = plt.subplots()
# bars for classifier 1-3
p1 = ax.bar(ind, np.hstack(([class1_1[:-1], [0]])), width, color='green')
p2 = ax.bar(ind + width, np.hstack(([class2_1[:-1], [0]])), width, color='lightgreen')
# bars for VotingClassifier
p3 = ax.bar(ind, [0, 0, 0, class1_1[-1]], width, color='blue')
p4 = ax.bar(ind + width, [0, 0, 0, class2_1[-1]], width, color='steelblue')
# plot annotations
plt.axvline(2.8, color='k', linestyle='dashed')
ax.set_xticks(ind + width)
ax.set_xticklabels(['LogisticRegression\nweight 1',
'GaussianNB\nweight 1',
'RandomForestClassifier\nweight 5',
'VotingClassifier\n(average probabilities)'],
rotation=40,
ha='right')
plt.ylim([0, 1])
plt.title('Class probabilities for sample 1 by different classifiers')
plt.legend([p1[0], p2[0]], ['class 1', 'class 2'], loc='upper left')
plt.show()
| bsd-3-clause |
remenska/rootpy | rootpy/plotting/root2matplotlib.py | 1 | 28571 | # Copyright 2012 the rootpy developers
# distributed under the terms of the GNU General Public License
"""
This module provides functions that allow the plotting of ROOT histograms and
graphs with `matplotlib <http://matplotlib.org/>`_.
If you just want to save image files and don't want matplotlib to attempt to
create a graphical window, tell matplotlib to use a non-interactive backend
such as ``Agg`` when importing it for the first time (i.e. before importing
rootpy.plotting.root2matplotlib)::
import matplotlib
matplotlib.use('Agg') # do this before importing pyplot or root2matplotlib
This puts matplotlib in a batch state similar to ``ROOT.gROOT.SetBatch(True)``.
"""
from __future__ import absolute_import
# trigger ROOT's finalSetup (GUI thread) before matplotlib's
import ROOT
ROOT.kTRUE
from math import sqrt
try:
from itertools import izip as zip
except ImportError: # will be 3.x series
pass
import matplotlib.pyplot as plt
import numpy as np
from ..extern.six.moves import range
from .hist import _Hist
from .graph import _Graph1DBase
from .utils import get_limits
__all__ = [
'hist',
'bar',
'errorbar',
'fill_between',
'step',
'hist2d',
'imshow',
'contour',
]
def _set_defaults(obj, kwargs, types=['common']):
defaults = {}
for key in types:
if key == 'common':
defaults['label'] = obj.GetTitle()
defaults['visible'] = getattr(obj, 'visible', True)
defaults['alpha'] = getattr(obj, 'alpha', None)
elif key == 'line':
defaults['linestyle'] = obj.GetLineStyle('mpl')
defaults['linewidth'] = obj.GetLineWidth()
elif key == 'fill':
defaults['edgecolor'] = kwargs.get('color', obj.GetLineColor('mpl'))
defaults['facecolor'] = kwargs.get('color', obj.GetFillColor('mpl'))
root_fillstyle = obj.GetFillStyle('root')
if root_fillstyle == 0:
if not kwargs.get('fill'):
defaults['facecolor'] = 'none'
defaults['fill'] = False
elif root_fillstyle == 1001:
defaults['fill'] = True
else:
defaults['hatch'] = obj.GetFillStyle('mpl')
defaults['facecolor'] = 'none'
elif key == 'marker':
defaults['marker'] = obj.GetMarkerStyle('mpl')
defaults['markersize'] = obj.GetMarkerSize() * 5
defaults['markeredgecolor'] = obj.GetMarkerColor('mpl')
defaults['markerfacecolor'] = obj.GetMarkerColor('mpl')
elif key == 'errors':
defaults['ecolor'] = obj.GetLineColor('mpl')
elif key == 'errorbar':
defaults['fmt'] = obj.GetMarkerStyle('mpl')
for key, value in defaults.items():
if key not in kwargs:
kwargs[key] = value
def _set_bounds(h,
axes=None,
was_empty=True,
prev_xlim=None,
prev_ylim=None,
xpadding=0,
ypadding=.1,
xerror_in_padding=True,
yerror_in_padding=True,
snap=True,
logx=None,
logy=None):
if axes is None:
axes = plt.gca()
if prev_xlim is None:
prev_xlim = plt.xlim()
if prev_ylim is None:
prev_ylim = plt.ylim()
if logx is None:
logx = axes.get_xscale() == 'log'
if logy is None:
logy = axes.get_yscale() == 'log'
xmin, xmax, ymin, ymax = get_limits(
h,
xpadding=xpadding,
ypadding=ypadding,
xerror_in_padding=xerror_in_padding,
yerror_in_padding=yerror_in_padding,
snap=snap,
logx=logx,
logy=logy)
if was_empty:
axes.set_xlim([xmin, xmax])
axes.set_ylim([ymin, ymax])
else:
prev_xmin, prev_xmax = prev_xlim
if logx and prev_xmin <= 0:
axes.set_xlim([xmin, max(prev_xmax, xmax)])
else:
axes.set_xlim([min(prev_xmin, xmin), max(prev_xmax, xmax)])
prev_ymin, prev_ymax = prev_ylim
if logy and prev_ymin <= 0:
axes.set_ylim([ymin, max(prev_ymax, ymax)])
else:
axes.set_ylim([min(prev_ymin, ymin), max(prev_ymax, ymax)])
def _get_highest_zorder(axes):
return max([c.get_zorder() for c in axes.get_children()])
def _maybe_reversed(x, reverse=False):
if reverse:
return reversed(x)
return x
def hist(hists,
stacked=True,
reverse=False,
xpadding=0, ypadding=.1,
yerror_in_padding=True,
logy=None,
snap=True,
axes=None,
**kwargs):
"""
Make a matplotlib hist plot from a ROOT histogram, stack or
list of histograms.
Parameters
----------
hists : Hist, list of Hist, HistStack
The histogram(s) to be plotted
stacked : bool, optional (default=True)
If True then stack the histograms with the first histogram on the
bottom, otherwise overlay them with the first histogram in the
background.
reverse : bool, optional (default=False)
If True then reverse the order of the stack or overlay.
xpadding : float or 2-tuple of floats, optional (default=0)
Padding to add on the left and right sides of the plot as a fraction of
the axes width after the padding has been added. Specify unique left
and right padding with a 2-tuple.
ypadding : float or 2-tuple of floats, optional (default=.1)
Padding to add on the top and bottom of the plot as a fraction of
the axes height after the padding has been added. Specify unique top
and bottom padding with a 2-tuple.
yerror_in_padding : bool, optional (default=True)
If True then make the padding inclusive of the y errors otherwise
only pad around the y values.
logy : bool, optional (default=None)
Apply special treatment of a log-scale y-axis to display the histogram
correctly. If None (the default) then automatically determine if the
y-axis is log-scale.
snap : bool, optional (default=True)
If True (the default) then the origin is an implicit lower bound of the
histogram unless the histogram has both positive and negative bins.
axes : matplotlib Axes instance, optional (default=None)
The axes to plot on. If None then use the global current axes.
kwargs : additional keyword arguments, optional
All additional keyword arguments are passed to matplotlib's
fill_between for the filled regions and matplotlib's step function
for the edges.
Returns
-------
The return value from matplotlib's hist function, or list of such return
values if a stack or list of histograms was plotted.
"""
if axes is None:
axes = plt.gca()
if logy is None:
logy = axes.get_yscale() == 'log'
curr_xlim = axes.get_xlim()
curr_ylim = axes.get_ylim()
was_empty = not axes.has_data()
returns = []
if isinstance(hists, _Hist):
# This is a single plottable object.
returns = _hist(hists, axes=axes, logy=logy, **kwargs)
_set_bounds(hists, axes=axes,
was_empty=was_empty,
prev_xlim=curr_xlim,
prev_ylim=curr_ylim,
xpadding=xpadding, ypadding=ypadding,
yerror_in_padding=yerror_in_padding,
snap=snap,
logy=logy)
elif stacked:
# draw the top histogram first so its edges don't cover the histograms
# beneath it in the stack
if not reverse:
hists = list(hists)[::-1]
for i, h in enumerate(hists):
kwargs_local = kwargs.copy()
if i == len(hists) - 1:
low = h.Clone()
low.Reset()
else:
low = sum(hists[i + 1:])
high = h + low
high.alpha = getattr(h, 'alpha', None)
proxy = _hist(high, bottom=low, axes=axes, logy=logy, **kwargs)
returns.append(proxy)
if not reverse:
returns = returns[::-1]
_set_bounds(sum(hists), axes=axes,
was_empty=was_empty,
prev_xlim=curr_xlim,
prev_ylim=curr_ylim,
xpadding=xpadding, ypadding=ypadding,
yerror_in_padding=yerror_in_padding,
snap=snap,
logy=logy)
else:
for h in _maybe_reversed(hists, reverse):
returns.append(_hist(h, axes=axes, logy=logy, **kwargs))
if reverse:
returns = returns[::-1]
_set_bounds(max(hists), axes=axes,
was_empty=was_empty,
prev_xlim=curr_xlim,
prev_ylim=curr_ylim,
xpadding=xpadding, ypadding=ypadding,
yerror_in_padding=yerror_in_padding,
snap=snap,
logy=logy)
return returns
def _hist(h, axes=None, bottom=None, logy=None, zorder=None, **kwargs):
if axes is None:
axes = plt.gca()
if zorder is None:
zorder = _get_highest_zorder(axes) + 1
_set_defaults(h, kwargs, ['common', 'line', 'fill'])
kwargs_proxy = kwargs.copy()
fill = kwargs.pop('fill', False) or ('hatch' in kwargs)
if fill:
# draw the fill without the edge
if bottom is None:
bottom = h.Clone()
bottom.Reset()
fill_between(bottom, h, axes=axes, logy=logy, linewidth=0,
facecolor=kwargs['facecolor'],
edgecolor=kwargs['edgecolor'],
hatch=kwargs.get('hatch', None),
alpha=kwargs['alpha'],
zorder=zorder)
# draw the edge
s = step(h, axes=axes, logy=logy, label=None,
zorder=zorder + 1, alpha=kwargs['alpha'],
color=kwargs.get('color'))
# draw the legend proxy
if getattr(h, 'legendstyle', '').upper() == 'F':
proxy = plt.Rectangle((0, 0), 0, 0, **kwargs_proxy)
axes.add_patch(proxy)
else:
# be sure the linewidth is greater than zero...
proxy = plt.Line2D((0, 0), (0, 0),
linestyle=kwargs_proxy['linestyle'],
linewidth=kwargs_proxy['linewidth'],
color=kwargs_proxy['edgecolor'],
alpha=kwargs['alpha'],
label=kwargs_proxy['label'])
axes.add_line(proxy)
return proxy, s[0]
def bar(hists,
stacked=True,
reverse=False,
xerr=False, yerr=True,
xpadding=0, ypadding=.1,
yerror_in_padding=True,
rwidth=0.8,
snap=True,
axes=None,
**kwargs):
"""
Make a matplotlib bar plot from a ROOT histogram, stack or
list of histograms.
Parameters
----------
hists : Hist, list of Hist, HistStack
The histogram(s) to be plotted
stacked : bool or string, optional (default=True)
If True then stack the histograms with the first histogram on the
bottom, otherwise overlay them with the first histogram in the
background. If 'cluster', then the bars will be arranged side-by-side.
reverse : bool, optional (default=False)
If True then reverse the order of the stack or overlay.
xerr : bool, optional (default=False)
If True, x error bars will be displayed.
yerr : bool or string, optional (default=True)
If False, no y errors are displayed. If True, an individual y
error will be displayed for each hist in the stack. If 'linear' or
'quadratic', a single error bar will be displayed with either the
linear or quadratic sum of the individual errors.
xpadding : float or 2-tuple of floats, optional (default=0)
Padding to add on the left and right sides of the plot as a fraction of
the axes width after the padding has been added. Specify unique left
and right padding with a 2-tuple.
ypadding : float or 2-tuple of floats, optional (default=.1)
Padding to add on the top and bottom of the plot as a fraction of
the axes height after the padding has been added. Specify unique top
and bottom padding with a 2-tuple.
yerror_in_padding : bool, optional (default=True)
If True then make the padding inclusive of the y errors otherwise
only pad around the y values.
rwidth : float, optional (default=0.8)
The relative width of the bars as a fraction of the bin width.
snap : bool, optional (default=True)
If True (the default) then the origin is an implicit lower bound of the
histogram unless the histogram has both positive and negative bins.
axes : matplotlib Axes instance, optional (default=None)
The axes to plot on. If None then use the global current axes.
kwargs : additional keyword arguments, optional
All additional keyword arguments are passed to matplotlib's bar
function.
Returns
-------
The return value from matplotlib's bar function, or list of such return
values if a stack or list of histograms was plotted.
"""
if axes is None:
axes = plt.gca()
curr_xlim = axes.get_xlim()
curr_ylim = axes.get_ylim()
was_empty = not axes.has_data()
logy = kwargs.pop('log', axes.get_yscale() == 'log')
kwargs['log'] = logy
returns = []
if isinstance(hists, _Hist):
# This is a single histogram.
returns = _bar(hists, xerr=xerr, yerr=yerr,
axes=axes, **kwargs)
_set_bounds(hists, axes=axes,
was_empty=was_empty,
prev_xlim=curr_xlim,
prev_ylim=curr_ylim,
xpadding=xpadding, ypadding=ypadding,
yerror_in_padding=yerror_in_padding,
snap=snap,
logy=logy)
elif stacked == 'cluster':
nhists = len(hists)
hlist = _maybe_reversed(hists, reverse)
for i, h in enumerate(hlist):
width = rwidth / nhists
offset = (1 - rwidth) / 2 + i * width
returns.append(_bar(
h, offset, width,
xerr=xerr, yerr=yerr, axes=axes, **kwargs))
_set_bounds(sum(hists), axes=axes,
was_empty=was_empty,
prev_xlim=curr_xlim,
prev_ylim=curr_ylim,
xpadding=xpadding, ypadding=ypadding,
yerror_in_padding=yerror_in_padding,
snap=snap,
logy=logy)
elif stacked is True:
nhists = len(hists)
hlist = _maybe_reversed(hists, reverse)
toterr = bottom = None
if yerr == 'linear':
toterr = [sum([h.GetBinError(i) for h in hists])
for i in range(1, hists[0].nbins(0) + 1)]
elif yerr == 'quadratic':
toterr = [sqrt(sum([h.GetBinError(i) ** 2 for h in hists]))
for i in range(1, hists[0].nbins(0) + 1)]
for i, h in enumerate(hlist):
err = None
if yerr is True:
err = True
elif yerr and i == (nhists - 1):
err = toterr
returns.append(_bar(
h,
xerr=xerr, yerr=err,
bottom=list(bottom.y()) if bottom else None,
axes=axes, **kwargs))
if bottom is None:
bottom = h.Clone()
else:
bottom += h
_set_bounds(bottom, axes=axes,
was_empty=was_empty,
prev_xlim=curr_xlim,
prev_ylim=curr_ylim,
xpadding=xpadding, ypadding=ypadding,
yerror_in_padding=yerror_in_padding,
snap=snap,
logy=logy)
else:
for h in hlist:
returns.append(_bar(h, xerr=xerr, yerr=yerr,
axes=axes, **kwargs))
_set_bounds(max(hists), axes=axes,
was_empty=was_empty,
prev_xlim=curr_xlim,
prev_ylim=curr_ylim,
xpadding=xpadding, ypadding=ypadding,
yerror_in_padding=yerror_in_padding,
snap=snap,
logy=logy)
return returns
def _bar(h, roffset=0., rwidth=1., xerr=None, yerr=None, axes=None, **kwargs):
if axes is None:
axes = plt.gca()
if xerr:
xerr = np.array([list(h.xerrl()), list(h.xerrh())])
if yerr:
yerr = np.array([list(h.yerrl()), list(h.yerrh())])
_set_defaults(h, kwargs, ['common', 'line', 'fill', 'errors'])
width = [x * rwidth for x in h.xwidth()]
left = [h.xedgesl(i) + h.xwidth(i) * roffset
for i in range(1, h.nbins(0) + 1)]
height = list(h.y())
return axes.bar(left, height, width=width, xerr=xerr, yerr=yerr, **kwargs)
def errorbar(hists,
xerr=True, yerr=True,
xpadding=0, ypadding=.1,
xerror_in_padding=True,
yerror_in_padding=True,
emptybins=True,
snap=True,
axes=None,
**kwargs):
"""
Make a matplotlib errorbar plot from a ROOT histogram or graph
or list of histograms and graphs.
Parameters
----------
hists : Hist, Graph or list of Hist and Graph
The histogram(s) and/or Graph(s) to be plotted
xerr : bool, optional (default=True)
If True, x error bars will be displayed.
yerr : bool or string, optional (default=True)
If False, no y errors are displayed. If True, an individual y
error will be displayed for each hist in the stack. If 'linear' or
'quadratic', a single error bar will be displayed with either the
linear or quadratic sum of the individual errors.
xpadding : float or 2-tuple of floats, optional (default=0)
Padding to add on the left and right sides of the plot as a fraction of
the axes width after the padding has been added. Specify unique left
and right padding with a 2-tuple.
ypadding : float or 2-tuple of floats, optional (default=.1)
Padding to add on the top and bottom of the plot as a fraction of
the axes height after the padding has been added. Specify unique top
and bottom padding with a 2-tuple.
xerror_in_padding : bool, optional (default=True)
If True then make the padding inclusive of the x errors otherwise
only pad around the x values.
yerror_in_padding : bool, optional (default=True)
If True then make the padding inclusive of the y errors otherwise
only pad around the y values.
emptybins : bool, optional (default=True)
If True (the default) then plot bins with zero content otherwise only
show bins with nonzero content.
snap : bool, optional (default=True)
If True (the default) then the origin is an implicit lower bound of the
histogram unless the histogram has both positive and negative bins.
axes : matplotlib Axes instance, optional (default=None)
The axes to plot on. If None then use the global current axes.
kwargs : additional keyword arguments, optional
All additional keyword arguments are passed to matplotlib's errorbar
function.
Returns
-------
The return value from matplotlib's errorbar function, or list of such
return values if a list of histograms and/or graphs was plotted.
"""
if axes is None:
axes = plt.gca()
curr_xlim = axes.get_xlim()
curr_ylim = axes.get_ylim()
was_empty = not axes.has_data()
if isinstance(hists, (_Hist, _Graph1DBase)):
# This is a single plottable object.
returns = _errorbar(
hists, xerr, yerr,
axes=axes, emptybins=emptybins, **kwargs)
_set_bounds(hists, axes=axes,
was_empty=was_empty,
prev_ylim=curr_ylim,
xpadding=xpadding, ypadding=ypadding,
xerror_in_padding=xerror_in_padding,
yerror_in_padding=yerror_in_padding,
snap=snap)
else:
returns = []
for h in hists:
returns.append(errorbar(
h, xerr=xerr, yerr=yerr, axes=axes,
xpadding=xpadding, ypadding=ypadding,
xerror_in_padding=xerror_in_padding,
yerror_in_padding=yerror_in_padding,
snap=snap,
emptybins=emptybins,
**kwargs))
return returns
def _errorbar(h, xerr, yerr, axes=None, emptybins=True, zorder=None, **kwargs):
if axes is None:
axes = plt.gca()
if zorder is None:
zorder = _get_highest_zorder(axes) + 1
_set_defaults(h, kwargs, ['common', 'errors', 'errorbar', 'marker'])
if xerr:
xerr = np.array([list(h.xerrl()), list(h.xerrh())])
if yerr:
yerr = np.array([list(h.yerrl()), list(h.yerrh())])
x = np.array(list(h.x()))
y = np.array(list(h.y()))
if not emptybins:
nonempty = y != 0
x = x[nonempty]
y = y[nonempty]
if xerr is not False and xerr is not None:
xerr = xerr[:, nonempty]
if yerr is not False and yerr is not None:
yerr = yerr[:, nonempty]
return axes.errorbar(x, y, xerr=xerr, yerr=yerr, zorder=zorder, **kwargs)
def step(h, logy=None, axes=None, **kwargs):
"""
Make a matplotlib step plot from a ROOT histogram.
Parameters
----------
h : Hist
A rootpy Hist
logy : bool, optional (default=None)
If True then clip the y range between 1E-300 and 1E300.
If None (the default) then automatically determine if the axes are
log-scale and if this clipping should be performed.
axes : matplotlib Axes instance, optional (default=None)
The axes to plot on. If None then use the global current axes.
kwargs : additional keyword arguments, optional
Additional keyword arguments are passed directly to
matplotlib's fill_between function.
Returns
-------
Returns the value from matplotlib's fill_between function.
"""
if axes is None:
axes = plt.gca()
if logy is None:
logy = axes.get_yscale() == 'log'
_set_defaults(h, kwargs, ['common', 'line'])
if kwargs.get('color') is None:
kwargs['color'] = h.GetLineColor('mpl')
y = np.array(list(h.y()) + [0.])
if logy:
np.clip(y, 1E-300, 1E300, out=y)
return axes.step(list(h.xedges()), y, where='post', **kwargs)
def fill_between(a, b, logy=None, axes=None, **kwargs):
"""
Fill the region between two histograms or graphs.
Parameters
----------
a : Hist
A rootpy Hist
b : Hist
A rootpy Hist
logy : bool, optional (default=None)
If True then clip the region between 1E-300 and 1E300.
If None (the default) then automatically determine if the axes are
log-scale and if this clipping should be performed.
axes : matplotlib Axes instance, optional (default=None)
The axes to plot on. If None then use the global current axes.
kwargs : additional keyword arguments, optional
Additional keyword arguments are passed directly to
matplotlib's fill_between function.
Returns
-------
Returns the value from matplotlib's fill_between function.
"""
if axes is None:
axes = plt.gca()
if logy is None:
logy = axes.get_yscale() == 'log'
if not isinstance(a, _Hist) or not isinstance(b, _Hist):
raise TypeError(
"fill_between only operates on 1D histograms")
a.check_compatibility(b, check_edges=True)
x = []
top = []
bottom = []
for abin, bbin in zip(a.bins(overflow=False), b.bins(overflow=False)):
up = max(abin.value, bbin.value)
dn = min(abin.value, bbin.value)
x.extend([abin.x.low, abin.x.high])
top.extend([up, up])
bottom.extend([dn, dn])
x = np.array(x)
top = np.array(top)
bottom = np.array(bottom)
if logy:
np.clip(top, 1E-300, 1E300, out=top)
np.clip(bottom, 1E-300, 1E300, out=bottom)
return axes.fill_between(x, top, bottom, **kwargs)
def hist2d(h, axes=None, colorbar=False, **kwargs):
"""
Draw a 2D matplotlib histogram plot from a 2D ROOT histogram.
Parameters
----------
h : Hist2D
A rootpy Hist2D
axes : matplotlib Axes instance, optional (default=None)
The axes to plot on. If None then use the global current axes.
colorbar : Boolean, optional (default=False)
If True, include a colorbar in the produced plot
kwargs : additional keyword arguments, optional
Additional keyword arguments are passed directly to
matplotlib's hist2d function.
Returns
-------
Returns the value from matplotlib's hist2d function.
"""
if axes is None:
axes = plt.gca()
X, Y = np.meshgrid(list(h.x()), list(h.y()))
x = X.ravel()
y = Y.ravel()
z = np.array(h.z()).T
# returns of hist2d: (counts, xedges, yedges, Image)
return_values = axes.hist2d(x, y, weights=z.ravel(),
bins=(list(h.xedges()), list(h.yedges())),
**kwargs)
if colorbar:
mappable = return_values[-1]
plt.colorbar(mappable, ax=axes)
return return_values
def imshow(h, axes=None, colorbar=False, **kwargs):
"""
Draw a matplotlib imshow plot from a 2D ROOT histogram.
Parameters
----------
h : Hist2D
A rootpy Hist2D
axes : matplotlib Axes instance, optional (default=None)
The axes to plot on. If None then use the global current axes.
colorbar : Boolean, optional (default=False)
If True, include a colorbar in the produced plot
kwargs : additional keyword arguments, optional
Additional keyword arguments are passed directly to
matplotlib's imshow function.
Returns
-------
Returns the value from matplotlib's imshow function.
"""
kwargs.setdefault('aspect', 'auto')
if axes is None:
axes = plt.gca()
z = np.array(h.z()).T
axis_image= axes.imshow(
z,
extent=[
h.xedges(1), h.xedges(h.nbins(0) + 1),
h.yedges(1), h.yedges(h.nbins(1) + 1)],
interpolation='nearest',
origin='lower',
**kwargs)
if colorbar:
plt.colorbar(axis_image, ax=axes)
return axis_image
def contour(h, axes=None, zoom=None, label_contour=False, **kwargs):
"""
Draw a matplotlib contour plot from a 2D ROOT histogram.
Parameters
----------
h : Hist2D
A rootpy Hist2D
axes : matplotlib Axes instance, optional (default=None)
The axes to plot on. If None then use the global current axes.
zoom : float or sequence, optional (default=None)
The zoom factor along the axes. If a float, zoom is the same for each
axis. If a sequence, zoom should contain one value for each axis.
The histogram is zoomed using a cubic spline interpolation to create
smooth contours.
label_contour : Boolean, optional (default=False)
If True, labels are printed on the contour lines.
kwargs : additional keyword arguments, optional
Additional keyword arguments are passed directly to
matplotlib's contour function.
Returns
-------
Returns the value from matplotlib's contour function.
"""
if axes is None:
axes = plt.gca()
x = np.array(list(h.x()))
y = np.array(list(h.y()))
z = np.array(h.z()).T
if zoom is not None:
from scipy import ndimage
if hasattr(zoom, '__iter__'):
zoom = list(zoom)
x = ndimage.zoom(x, zoom[0])
y = ndimage.zoom(y, zoom[1])
else:
x = ndimage.zoom(x, zoom)
y = ndimage.zoom(y, zoom)
z = ndimage.zoom(z, zoom)
return_values = axes.contour(x, y, z, **kwargs)
if label_contour:
plt.clabel(return_values)
return return_values
| gpl-3.0 |
rseubert/scikit-learn | sklearn/decomposition/tests/test_factor_analysis.py | 11 | 3064 | # Author: Christian Osendorfer <osendorf@gmail.com>
# Alexandre Gramfort <alexandre.gramfort@inria.fr>
# Licence: BSD3
import numpy as np
from sklearn.utils.testing import assert_warns
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils import ConvergenceWarning
from sklearn.decomposition import FactorAnalysis
def test_factor_analysis():
"""Test FactorAnalysis ability to recover the data covariance structure
"""
rng = np.random.RandomState(0)
n_samples, n_features, n_components = 20, 5, 3
# Some random settings for the generative model
W = rng.randn(n_components, n_features)
# latent variable of dim 3, 20 of it
h = rng.randn(n_samples, n_components)
# using gamma to model different noise variance
# per component
noise = rng.gamma(1, size=n_features) * rng.randn(n_samples, n_features)
# generate observations
# wlog, mean is 0
X = np.dot(h, W) + noise
assert_raises(ValueError, FactorAnalysis, svd_method='foo')
fa_fail = FactorAnalysis()
fa_fail.svd_method = 'foo'
assert_raises(ValueError, fa_fail.fit, X)
fas = []
for method in ['randomized', 'lapack']:
fa = FactorAnalysis(n_components=n_components, svd_method=method)
fa.fit(X)
fas.append(fa)
X_t = fa.transform(X)
assert_equal(X_t.shape, (n_samples, n_components))
assert_almost_equal(fa.loglike_[-1], fa.score_samples(X).sum())
assert_almost_equal(fa.score_samples(X).mean(), fa.score(X))
diff = np.all(np.diff(fa.loglike_))
assert_greater(diff, 0., 'Log likelihood dif not increase')
# Sample Covariance
scov = np.cov(X, rowvar=0., bias=1.)
# Model Covariance
mcov = fa.get_covariance()
diff = np.sum(np.abs(scov - mcov)) / W.size
assert_less(diff, 0.1, "Mean absolute difference is %f" % diff)
fa = FactorAnalysis(n_components=n_components,
noise_variance_init=np.ones(n_features))
assert_raises(ValueError, fa.fit, X[:, :2])
f = lambda x, y: np.abs(getattr(x, y)) # sign will not be equal
fa1, fa2 = fas
for attr in ['loglike_', 'components_', 'noise_variance_']:
assert_almost_equal(f(fa1, attr), f(fa2, attr))
fa1.max_iter = 1
fa1.verbose = True
assert_warns(ConvergenceWarning, fa1.fit, X)
# Test get_covariance and get_precision with n_components == n_features
# with n_components < n_features and with n_components == 0
for n_components in [0, 2, X.shape[1]]:
fa.n_components = n_components
fa.fit(X)
cov = fa.get_covariance()
precision = fa.get_precision()
assert_array_almost_equal(np.dot(cov, precision),
np.eye(X.shape[1]), 12)
| bsd-3-clause |
justincassidy/scikit-learn | sklearn/manifold/tests/test_locally_linear.py | 232 | 4761 | from itertools import product
from nose.tools import assert_true
import numpy as np
from numpy.testing import assert_almost_equal, assert_array_almost_equal
from scipy import linalg
from sklearn import neighbors, manifold
from sklearn.manifold.locally_linear import barycenter_kneighbors_graph
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import ignore_warnings
eigen_solvers = ['dense', 'arpack']
#----------------------------------------------------------------------
# Test utility routines
def test_barycenter_kneighbors_graph():
X = np.array([[0, 1], [1.01, 1.], [2, 0]])
A = barycenter_kneighbors_graph(X, 1)
assert_array_almost_equal(
A.toarray(),
[[0., 1., 0.],
[1., 0., 0.],
[0., 1., 0.]])
A = barycenter_kneighbors_graph(X, 2)
# check that columns sum to one
assert_array_almost_equal(np.sum(A.toarray(), 1), np.ones(3))
pred = np.dot(A.toarray(), X)
assert_less(linalg.norm(pred - X) / X.shape[0], 1)
#----------------------------------------------------------------------
# Test LLE by computing the reconstruction error on some manifolds.
def test_lle_simple_grid():
# note: ARPACK is numerically unstable, so this test will fail for
# some random seeds. We choose 2 because the tests pass.
rng = np.random.RandomState(2)
tol = 0.1
# grid of equidistant points in 2D, n_components = n_dim
X = np.array(list(product(range(5), repeat=2)))
X = X + 1e-10 * rng.uniform(size=X.shape)
n_components = 2
clf = manifold.LocallyLinearEmbedding(n_neighbors=5,
n_components=n_components,
random_state=rng)
tol = 0.1
N = barycenter_kneighbors_graph(X, clf.n_neighbors).toarray()
reconstruction_error = linalg.norm(np.dot(N, X) - X, 'fro')
assert_less(reconstruction_error, tol)
for solver in eigen_solvers:
clf.set_params(eigen_solver=solver)
clf.fit(X)
assert_true(clf.embedding_.shape[1] == n_components)
reconstruction_error = linalg.norm(
np.dot(N, clf.embedding_) - clf.embedding_, 'fro') ** 2
assert_less(reconstruction_error, tol)
assert_almost_equal(clf.reconstruction_error_,
reconstruction_error, decimal=1)
# re-embed a noisy version of X using the transform method
noise = rng.randn(*X.shape) / 100
X_reembedded = clf.transform(X + noise)
assert_less(linalg.norm(X_reembedded - clf.embedding_), tol)
def test_lle_manifold():
rng = np.random.RandomState(0)
# similar test on a slightly more complex manifold
X = np.array(list(product(np.arange(18), repeat=2)))
X = np.c_[X, X[:, 0] ** 2 / 18]
X = X + 1e-10 * rng.uniform(size=X.shape)
n_components = 2
for method in ["standard", "hessian", "modified", "ltsa"]:
clf = manifold.LocallyLinearEmbedding(n_neighbors=6,
n_components=n_components,
method=method, random_state=0)
tol = 1.5 if method == "standard" else 3
N = barycenter_kneighbors_graph(X, clf.n_neighbors).toarray()
reconstruction_error = linalg.norm(np.dot(N, X) - X)
assert_less(reconstruction_error, tol)
for solver in eigen_solvers:
clf.set_params(eigen_solver=solver)
clf.fit(X)
assert_true(clf.embedding_.shape[1] == n_components)
reconstruction_error = linalg.norm(
np.dot(N, clf.embedding_) - clf.embedding_, 'fro') ** 2
details = ("solver: %s, method: %s" % (solver, method))
assert_less(reconstruction_error, tol, msg=details)
assert_less(np.abs(clf.reconstruction_error_ -
reconstruction_error),
tol * reconstruction_error, msg=details)
def test_pipeline():
# check that LocallyLinearEmbedding works fine as a Pipeline
# only checks that no error is raised.
# TODO check that it actually does something useful
from sklearn import pipeline, datasets
X, y = datasets.make_blobs(random_state=0)
clf = pipeline.Pipeline(
[('filter', manifold.LocallyLinearEmbedding(random_state=0)),
('clf', neighbors.KNeighborsClassifier())])
clf.fit(X, y)
assert_less(.9, clf.score(X, y))
# Test the error raised when the weight matrix is singular
def test_singular_matrix():
from nose.tools import assert_raises
M = np.ones((10, 3))
f = ignore_warnings
assert_raises(ValueError, f(manifold.locally_linear_embedding),
M, 2, 1, method='standard', eigen_solver='arpack')
| bsd-3-clause |
hammerlab/mhcflurry | test/test_class1_pan.py | 1 | 3833 | """
Tests for training and predicting using Class1 pan-allele models.
"""
import logging
logging.getLogger('tensorflow').disabled = True
logging.getLogger('matplotlib').disabled = True
from sklearn.metrics import roc_auc_score
import pandas
from numpy.testing import assert_, assert_equal
from mhcflurry import Class1AffinityPredictor, Class1NeuralNetwork
from mhcflurry.allele_encoding import AlleleEncoding
from mhcflurry.downloads import get_path
from mhcflurry.testing_utils import cleanup, startup
teardown = cleanup
setup = startup
HYPERPARAMETERS = {
'activation': 'tanh',
'allele_dense_layer_sizes': [],
'batch_normalization': False,
'dense_layer_l1_regularization': 0.0,
'dense_layer_l2_regularization': 0.0,
'dropout_probability': 0.5,
'early_stopping': True,
'init': 'glorot_uniform',
'layer_sizes': [64],
'learning_rate': None,
'locally_connected_layers': [],
'loss': 'custom:mse_with_inequalities',
'max_epochs': 5000,
'minibatch_size': 256,
'optimizer': 'rmsprop',
'output_activation': 'sigmoid',
'patience': 5,
'peptide_allele_merge_activation': '',
'peptide_allele_merge_method': 'concatenate',
'peptide_amino_acid_encoding': 'BLOSUM62',
'peptide_dense_layer_sizes': [],
'peptide_encoding': {
'alignment_method': 'left_pad_centered_right_pad',
'max_length': 15,
'vector_encoding_name': 'BLOSUM62',
},
'random_negative_affinity_max': 50000.0,
'random_negative_affinity_min': 20000.0,
'random_negative_constant': 25,
'random_negative_distribution_smoothing': 0.0,
'random_negative_match_distribution': True,
'random_negative_rate': 0.2,
'random_negative_method': 'by_allele',
'train_data': {},
'validation_split': 0.1,
}
ALLELE_TO_SEQUENCE = pandas.read_csv(
get_path(
"allele_sequences", "allele_sequences.csv"),
index_col=0).sequence.to_dict()
TRAIN_DF = pandas.read_csv(
get_path(
"data_curated", "curated_training_data.affinity.csv.bz2"))
TRAIN_DF = TRAIN_DF.loc[TRAIN_DF.allele.isin(ALLELE_TO_SEQUENCE)]
TRAIN_DF = TRAIN_DF.loc[TRAIN_DF.peptide.str.len() >= 8]
TRAIN_DF = TRAIN_DF.loc[TRAIN_DF.peptide.str.len() <= 15]
TRAIN_DF = TRAIN_DF.loc[
TRAIN_DF.allele.isin(TRAIN_DF.allele.value_counts().iloc[:3].index)
]
MS_HITS_DF = pandas.read_csv(
get_path(
"data_curated", "curated_training_data.csv.bz2"))
MS_HITS_DF = MS_HITS_DF.loc[MS_HITS_DF.allele.isin(TRAIN_DF.allele.unique())]
MS_HITS_DF = MS_HITS_DF.loc[MS_HITS_DF.peptide.str.len() >= 8]
MS_HITS_DF = MS_HITS_DF.loc[MS_HITS_DF.peptide.str.len() <= 15]
MS_HITS_DF = MS_HITS_DF.loc[~MS_HITS_DF.peptide.isin(TRAIN_DF.peptide)]
print("Loaded %d training and %d ms hits" % (
len(TRAIN_DF), len(MS_HITS_DF)))
def test_train_simple():
network = Class1NeuralNetwork(**HYPERPARAMETERS)
allele_encoding = AlleleEncoding(
TRAIN_DF.allele.values,
allele_to_sequence=ALLELE_TO_SEQUENCE)
network.fit(
TRAIN_DF.peptide.values,
affinities=TRAIN_DF.measurement_value.values,
allele_encoding=allele_encoding,
inequalities=TRAIN_DF.measurement_inequality.values)
validation_df = MS_HITS_DF.copy()
validation_df["hit"] = 1
decoys_df = MS_HITS_DF.copy()
decoys_df["hit"] = 0
decoys_df["allele"] = decoys_df.allele.sample(frac=1.0).values
validation_df = pandas.concat([validation_df, decoys_df], ignore_index=True)
predictions = network.predict(
peptides=validation_df.peptide.values,
allele_encoding=AlleleEncoding(
validation_df.allele.values, borrow_from=allele_encoding))
print(pandas.Series(predictions).describe())
score = roc_auc_score(validation_df.hit, -1 * predictions)
print("AUC", score)
assert_(score > 0.6)
| apache-2.0 |
arijitt/HiBench | bin/report_gen_plot.py | 22 | 5011 | #!/usr/bin/env python
#coding: utf-8
import sys, os, re
from pprint import pprint
from collections import defaultdict, namedtuple
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import matplotlib.colors as colors
import matplotlib.cm as cmx
import numpy as np
RecordRaw=namedtuple("RecordRaw", "type durtation data_size throughput_total throughput_per_node")
Record=namedtuple("Record", "type language durtation data_size throughput_total throughput_per_node")
def human_readable_size(n):
"convert number into human readable string"
if n<1000: return str(n)
if n<800000: return "%.3fK" % (n/1000.0)
if n<800000000: return "%.3fM" % (n/1000000.0)
if n<800000000000: return "%.3fG" % (n/1000000000.0)
return "%.3fT" % (n/1000000000000.0)
def group_by_type(datas):
groups = defaultdict(dict)
for i in datas:
words = re.sub(r'((?<=[a-z])[A-Z]|(?<!\A)[A-Z](?=[a-z]))', r' \1', i.type).split()
prefix = words[0].lower()
suffix = "_".join([x.lower() for x in words[1:]])
groups[suffix][prefix] = Record(type = "".join(words[1:]),
language = prefix,
durtation = i.durtation,
data_size = i.data_size,
throughput_total = i.throughput_total,
throughput_per_node = i.throughput_per_node
)
return dict(groups)
def report_plot(fn):
if not os.path.isfile(fn):
print "Failed to find `sparkbench.report`"
sys.exit(1)
with open(fn) as f:
data = [x.split() for x in f.readlines()[1:] if x.strip() and not x.strip().startswith('#')]
pprint(data, width=300)
groups = group_by_type([RecordRaw(type = x[0],
data_size = int(x[3]),
durtation = float(x[4]),
throughput_total = int(x[5]) / 1024.0 / 1024,
throughput_per_node = int(x[6]) / 1024.0 /1024
) for x in data])
#print groups
base_dir = os.path.dirname(fn)
plot(groups, "Seconds of durtations (Less is better)", "Seconds", "durtation", os.path.join(base_dir, "durtation.png"))
# plot(groups, "Throughput in total (Higher is better)", "MB/s", "throughput_total", os.path.join(base_dir, "throughput_total.png"))
# plot(groups, "Throughput per node (Higher is better)", "MB/s", "throughput_per_node", os.path.join(base_dir, "throughput_per_node.png"))
def plot(groups, title="Seconds of durtations", ylabel="Seconds", value_field="durtation", fig_fn = "foo.png"):
# plot it
keys = groups.keys()
languages = sorted(reduce(lambda x,y: x.union(y), [set([groups[x][y].language for y in groups[x]]) for x in groups]))
width = 0.15
rects = []
fig = plt.figure()
ax = plt.axes()
colors='rgbcymw'
# NCURVES=10
# curves = [np.random.random(20) for i in range(NCURVES)]
# values = range(NCURVES)
# jet = colors.Colormap('jet')
# cNorm = colors.Normalize(vmin=0, vmax=values[-1])
# scalarMap = cmx.ScalarMappable(norm=cNorm, cmap=jet)
patterns = ('-', '+', 'x', '\\', '/', '*', '.', 'O')
for idx, lang in enumerate(languages):
rects.append(ax.bar([x + width * (idx + 1) for x in range(len(keys))], # x index
[getattr(groups[x][lang], value_field) if x in groups and groups[x].has_key(lang) else 0 for x in keys], # value
width,
color = colors[idx],
hatch = patterns[idx]
) # width
)
# Set the tick labels font
for label in (ax.get_xticklabels() + ax.get_yticklabels()):
label.set_fontname('Arial')
label.set_fontsize(24)
ax.set_ylabel(ylabel, fontname="Arial", size="32")
ax.set_title(title, fontname="Arial", size="44")
x_axis_offset = len(languages)* width /2.0
ax.set_xticks([(x + width + x_axis_offset) for x in range(len(keys))])
ax.set_xticklabels(["%s \n@%s" % (x, human_readable_size(groups[x].values()[0].data_size)) for x in keys])
ax.grid(True)
ax.legend([x[0] for x in rects],
languages)
def autolabel(rects):
for rect in rects:
height = rect.get_height()
ax.text(rect.get_x()+rect.get_width()/2., 1.05*height, '%d' % int(height),
ha='center', va='bottom')
# [autolabel(x) for x in rects]
fig = plt.gcf()
fig.set_size_inches(18.5,10.5)
plt.savefig(fig_fn, dpi=100)
if __name__ == "__main__":
try:
default_report_fn = sys.argv[1]
except:
default_report_fn = os.path.join(os.path.dirname(__file__), "..", "sparkbench.report")
report_plot(default_report_fn)
| apache-2.0 |
RomainBrault/scikit-learn | sklearn/neural_network/tests/test_stochastic_optimizers.py | 146 | 4310 | import numpy as np
from sklearn.neural_network._stochastic_optimizers import (BaseOptimizer,
SGDOptimizer,
AdamOptimizer)
from sklearn.utils.testing import (assert_array_equal, assert_true,
assert_false, assert_equal)
shapes = [(4, 6), (6, 8), (7, 8, 9)]
def test_base_optimizer():
params = [np.zeros(shape) for shape in shapes]
for lr in [10 ** i for i in range(-3, 4)]:
optimizer = BaseOptimizer(params, lr)
assert_true(optimizer.trigger_stopping('', False))
def test_sgd_optimizer_no_momentum():
params = [np.zeros(shape) for shape in shapes]
for lr in [10 ** i for i in range(-3, 4)]:
optimizer = SGDOptimizer(params, lr, momentum=0, nesterov=False)
grads = [np.random.random(shape) for shape in shapes]
expected = [param - lr * grad for param, grad in zip(params, grads)]
optimizer.update_params(grads)
for exp, param in zip(expected, optimizer.params):
assert_array_equal(exp, param)
def test_sgd_optimizer_momentum():
params = [np.zeros(shape) for shape in shapes]
lr = 0.1
for momentum in np.arange(0.5, 0.9, 0.1):
optimizer = SGDOptimizer(params, lr, momentum=momentum, nesterov=False)
velocities = [np.random.random(shape) for shape in shapes]
optimizer.velocities = velocities
grads = [np.random.random(shape) for shape in shapes]
updates = [momentum * velocity - lr * grad
for velocity, grad in zip(velocities, grads)]
expected = [param + update for param, update in zip(params, updates)]
optimizer.update_params(grads)
for exp, param in zip(expected, optimizer.params):
assert_array_equal(exp, param)
def test_sgd_optimizer_trigger_stopping():
params = [np.zeros(shape) for shape in shapes]
lr = 2e-6
optimizer = SGDOptimizer(params, lr, lr_schedule='adaptive')
assert_false(optimizer.trigger_stopping('', False))
assert_equal(lr / 5, optimizer.learning_rate)
assert_true(optimizer.trigger_stopping('', False))
def test_sgd_optimizer_nesterovs_momentum():
params = [np.zeros(shape) for shape in shapes]
lr = 0.1
for momentum in np.arange(0.5, 0.9, 0.1):
optimizer = SGDOptimizer(params, lr, momentum=momentum, nesterov=True)
velocities = [np.random.random(shape) for shape in shapes]
optimizer.velocities = velocities
grads = [np.random.random(shape) for shape in shapes]
updates = [momentum * velocity - lr * grad
for velocity, grad in zip(velocities, grads)]
updates = [momentum * update - lr * grad
for update, grad in zip(updates, grads)]
expected = [param + update for param, update in zip(params, updates)]
optimizer.update_params(grads)
for exp, param in zip(expected, optimizer.params):
assert_array_equal(exp, param)
def test_adam_optimizer():
params = [np.zeros(shape) for shape in shapes]
lr = 0.001
epsilon = 1e-8
for beta_1 in np.arange(0.9, 1.0, 0.05):
for beta_2 in np.arange(0.995, 1.0, 0.001):
optimizer = AdamOptimizer(params, lr, beta_1, beta_2, epsilon)
ms = [np.random.random(shape) for shape in shapes]
vs = [np.random.random(shape) for shape in shapes]
t = 10
optimizer.ms = ms
optimizer.vs = vs
optimizer.t = t - 1
grads = [np.random.random(shape) for shape in shapes]
ms = [beta_1 * m + (1 - beta_1) * grad
for m, grad in zip(ms, grads)]
vs = [beta_2 * v + (1 - beta_2) * (grad ** 2)
for v, grad in zip(vs, grads)]
learning_rate = lr * np.sqrt(1 - beta_2 ** t) / (1 - beta_1**t)
updates = [-learning_rate * m / (np.sqrt(v) + epsilon)
for m, v in zip(ms, vs)]
expected = [param + update
for param, update in zip(params, updates)]
optimizer.update_params(grads)
for exp, param in zip(expected, optimizer.params):
assert_array_equal(exp, param)
| bsd-3-clause |
Clyde-fare/scikit-learn | examples/ensemble/plot_adaboost_twoclass.py | 347 | 3268 | """
==================
Two-class AdaBoost
==================
This example fits an AdaBoosted decision stump on a non-linearly separable
classification dataset composed of two "Gaussian quantiles" clusters
(see :func:`sklearn.datasets.make_gaussian_quantiles`) and plots the decision
boundary and decision scores. The distributions of decision scores are shown
separately for samples of class A and B. The predicted class label for each
sample is determined by the sign of the decision score. Samples with decision
scores greater than zero are classified as B, and are otherwise classified
as A. The magnitude of a decision score determines the degree of likeness with
the predicted class label. Additionally, a new dataset could be constructed
containing a desired purity of class B, for example, by only selecting samples
with a decision score above some value.
"""
print(__doc__)
# Author: Noel Dawe <noel.dawe@gmail.com>
#
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn.ensemble import AdaBoostClassifier
from sklearn.tree import DecisionTreeClassifier
from sklearn.datasets import make_gaussian_quantiles
# Construct dataset
X1, y1 = make_gaussian_quantiles(cov=2.,
n_samples=200, n_features=2,
n_classes=2, random_state=1)
X2, y2 = make_gaussian_quantiles(mean=(3, 3), cov=1.5,
n_samples=300, n_features=2,
n_classes=2, random_state=1)
X = np.concatenate((X1, X2))
y = np.concatenate((y1, - y2 + 1))
# Create and fit an AdaBoosted decision tree
bdt = AdaBoostClassifier(DecisionTreeClassifier(max_depth=1),
algorithm="SAMME",
n_estimators=200)
bdt.fit(X, y)
plot_colors = "br"
plot_step = 0.02
class_names = "AB"
plt.figure(figsize=(10, 5))
# Plot the decision boundaries
plt.subplot(121)
x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1
y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, plot_step),
np.arange(y_min, y_max, plot_step))
Z = bdt.predict(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
cs = plt.contourf(xx, yy, Z, cmap=plt.cm.Paired)
plt.axis("tight")
# Plot the training points
for i, n, c in zip(range(2), class_names, plot_colors):
idx = np.where(y == i)
plt.scatter(X[idx, 0], X[idx, 1],
c=c, cmap=plt.cm.Paired,
label="Class %s" % n)
plt.xlim(x_min, x_max)
plt.ylim(y_min, y_max)
plt.legend(loc='upper right')
plt.xlabel('x')
plt.ylabel('y')
plt.title('Decision Boundary')
# Plot the two-class decision scores
twoclass_output = bdt.decision_function(X)
plot_range = (twoclass_output.min(), twoclass_output.max())
plt.subplot(122)
for i, n, c in zip(range(2), class_names, plot_colors):
plt.hist(twoclass_output[y == i],
bins=10,
range=plot_range,
facecolor=c,
label='Class %s' % n,
alpha=.5)
x1, x2, y1, y2 = plt.axis()
plt.axis((x1, x2, y1, y2 * 1.2))
plt.legend(loc='upper right')
plt.ylabel('Samples')
plt.xlabel('Score')
plt.title('Decision Scores')
plt.tight_layout()
plt.subplots_adjust(wspace=0.35)
plt.show()
| bsd-3-clause |
abonaca/gary | gary/dynamics/tests/helpers.py | 1 | 4730 | # coding: utf-8
""" Test helpers """
from __future__ import division, print_function
__author__ = "adrn <adrn@astro.columbia.edu>"
# Standard library
import os
import sys
# Third-party
import astropy.coordinates as coord
import astropy.units as u
import matplotlib.pyplot as plt
import numpy as np
# Project
from ..actionangle import classify_orbit
from ...units import galactic
from ...coordinates import physicsspherical_to_cartesian
from ...potential import HarmonicOscillatorPotential, IsochronePotential
# HACK:
if "/Users/adrian/projects/genfunc" not in sys.path:
sys.path.append("/Users/adrian/projects/genfunc")
import genfunc_3d
def sanders_nvecs(N_max, dx, dy, dz):
from itertools import product
NNx = range(-N_max, N_max+1, dx)
NNy = range(-N_max, N_max+1, dy)
NNz = range(-N_max, N_max+1, dz)
n_vectors = np.array([[i,j,k] for (i,j,k) in product(NNx,NNy,NNz)
if(not(i==0 and j==0 and k==0) # exclude zero vector
and (k>0 # northern hemisphere
or (k==0 and j>0) # half of x-y plane
or (k==0 and j==0 and i>0)) # half of x axis
and np.sqrt(i*i+j*j+k*k)<=N_max)]) # inside sphere
return n_vectors
def sanders_act_ang_freq(t, w, N_max=6):
w2 = w.copy()
loop = classify_orbit(w)
if np.any(loop):
w2[:,3:] = (w2[:,3:]*u.kpc/u.Myr).to(u.km/u.s).value
(act,ang,n_vec,toy_aa,pars),loop2 = genfunc_3d.find_actions(w2, t/1000.,
N_matrix=N_max, ifloop=True)
else:
(act,ang,n_vec,toy_aa,pars),loop2 = genfunc_3d.find_actions(w2, t,
N_matrix=N_max, ifloop=True)
actions = act[:3]
angles = ang[:3]
freqs = ang[3:6]
if np.any(loop):
toy_potential = IsochronePotential(m=pars[0]*1E11, b=pars[1], units=galactic)
actions = (actions*u.kpc*u.km/u.s).to(u.kpc**2/u.Myr).value
freqs = (freqs/u.Gyr).to(1/u.Myr).value
else:
toy_potential = HarmonicOscillatorPotential(omega=np.array(pars))
return actions,angles,freqs,toy_potential
def _crazy_angle_loop(theta1,theta2,ax):
cnt = 0
ix1 = 0
while True:
cnt += 1
for ix2 in range(ix1,ix1+1000):
if ix2 > len(theta1)-1:
ix2 = len(theta1)-1
break
if theta1[ix2] < theta1[ix1] or theta2[ix2] < theta2[ix1]:
ix2 -= 1
break
if theta1[ix2] != theta1[ix1:ix2+1].max() or theta2[ix2] != theta2[ix1:ix2+1].max():
ix1 = ix2+1
continue
if cnt > 100 or ix2 == len(theta1)-1:
break
if ix1 == ix2:
ix1 = ix2+1
continue
ax.plot(theta1[ix1:ix2+1], theta2[ix1:ix2+1], alpha=0.5, marker='o', c='k')
ix1 = ix2+1
def plot_angles(t, angles, freqs, subsample_factor=1000):
theta = (angles[:,None] + freqs[:,None]*t[np.newaxis])
subsample = theta.shape[1]//subsample_factor
# subsample = 1
theta = (theta[:,::subsample] / np.pi) % 2.
fig,axes = plt.subplots(1,2,sharex=True,sharey=True,figsize=(10,5))
_crazy_angle_loop(theta[0], theta[1], axes[0])
_crazy_angle_loop(theta[0], theta[2], axes[1])
axes[0].set_xlim(0,2)
axes[0].set_ylim(0,2)
return fig
# axes[1].scatter(theta[0,ix], theta[2], alpha=0.5, marker='o', c=t)
def isotropic_w0(N=100):
# positions
d = np.random.lognormal(mean=np.log(25), sigma=0.5, size=N)
phi = np.random.uniform(0, 2*np.pi, size=N)
theta = np.arccos(np.random.uniform(size=N) - 0.5)
vr = np.random.normal(150., 40., size=N)*u.km/u.s
vt = np.random.normal(100., 40., size=N)
vt = np.vstack((vt,np.zeros_like(vt))).T
# rotate to be random position angle
pa = np.random.uniform(0, 2*np.pi, size=N)
M = np.array([[np.cos(pa), -np.sin(pa)],[np.sin(pa), np.cos(pa)]]).T
vt = np.array([vv.dot(MM) for (vv,MM) in zip(vt,M)])*u.km/u.s
vphi,vtheta = vt.T
rep = coord.PhysicsSphericalRepresentation(r=d*u.dimensionless_unscaled,
phi=phi*u.radian,
theta=theta*u.radian)
x = rep.represent_as(coord.CartesianRepresentation).xyz.T.value
vr = vr.decompose(galactic).value
vphi = vphi.decompose(galactic).value
vtheta = vtheta.decompose(galactic).value
v = physicsspherical_to_cartesian(rep, [vr,vphi,vtheta]*u.dimensionless_unscaled).T.value
return np.hstack((x,v))
| mit |
xyguo/scikit-learn | examples/neighbors/plot_kde_1d.py | 347 | 5100 | """
===================================
Simple 1D Kernel Density Estimation
===================================
This example uses the :class:`sklearn.neighbors.KernelDensity` class to
demonstrate the principles of Kernel Density Estimation in one dimension.
The first plot shows one of the problems with using histograms to visualize
the density of points in 1D. Intuitively, a histogram can be thought of as a
scheme in which a unit "block" is stacked above each point on a regular grid.
As the top two panels show, however, the choice of gridding for these blocks
can lead to wildly divergent ideas about the underlying shape of the density
distribution. If we instead center each block on the point it represents, we
get the estimate shown in the bottom left panel. This is a kernel density
estimation with a "top hat" kernel. This idea can be generalized to other
kernel shapes: the bottom-right panel of the first figure shows a Gaussian
kernel density estimate over the same distribution.
Scikit-learn implements efficient kernel density estimation using either
a Ball Tree or KD Tree structure, through the
:class:`sklearn.neighbors.KernelDensity` estimator. The available kernels
are shown in the second figure of this example.
The third figure compares kernel density estimates for a distribution of 100
samples in 1 dimension. Though this example uses 1D distributions, kernel
density estimation is easily and efficiently extensible to higher dimensions
as well.
"""
# Author: Jake Vanderplas <jakevdp@cs.washington.edu>
#
import numpy as np
import matplotlib.pyplot as plt
from scipy.stats import norm
from sklearn.neighbors import KernelDensity
#----------------------------------------------------------------------
# Plot the progression of histograms to kernels
np.random.seed(1)
N = 20
X = np.concatenate((np.random.normal(0, 1, 0.3 * N),
np.random.normal(5, 1, 0.7 * N)))[:, np.newaxis]
X_plot = np.linspace(-5, 10, 1000)[:, np.newaxis]
bins = np.linspace(-5, 10, 10)
fig, ax = plt.subplots(2, 2, sharex=True, sharey=True)
fig.subplots_adjust(hspace=0.05, wspace=0.05)
# histogram 1
ax[0, 0].hist(X[:, 0], bins=bins, fc='#AAAAFF', normed=True)
ax[0, 0].text(-3.5, 0.31, "Histogram")
# histogram 2
ax[0, 1].hist(X[:, 0], bins=bins + 0.75, fc='#AAAAFF', normed=True)
ax[0, 1].text(-3.5, 0.31, "Histogram, bins shifted")
# tophat KDE
kde = KernelDensity(kernel='tophat', bandwidth=0.75).fit(X)
log_dens = kde.score_samples(X_plot)
ax[1, 0].fill(X_plot[:, 0], np.exp(log_dens), fc='#AAAAFF')
ax[1, 0].text(-3.5, 0.31, "Tophat Kernel Density")
# Gaussian KDE
kde = KernelDensity(kernel='gaussian', bandwidth=0.75).fit(X)
log_dens = kde.score_samples(X_plot)
ax[1, 1].fill(X_plot[:, 0], np.exp(log_dens), fc='#AAAAFF')
ax[1, 1].text(-3.5, 0.31, "Gaussian Kernel Density")
for axi in ax.ravel():
axi.plot(X[:, 0], np.zeros(X.shape[0]) - 0.01, '+k')
axi.set_xlim(-4, 9)
axi.set_ylim(-0.02, 0.34)
for axi in ax[:, 0]:
axi.set_ylabel('Normalized Density')
for axi in ax[1, :]:
axi.set_xlabel('x')
#----------------------------------------------------------------------
# Plot all available kernels
X_plot = np.linspace(-6, 6, 1000)[:, None]
X_src = np.zeros((1, 1))
fig, ax = plt.subplots(2, 3, sharex=True, sharey=True)
fig.subplots_adjust(left=0.05, right=0.95, hspace=0.05, wspace=0.05)
def format_func(x, loc):
if x == 0:
return '0'
elif x == 1:
return 'h'
elif x == -1:
return '-h'
else:
return '%ih' % x
for i, kernel in enumerate(['gaussian', 'tophat', 'epanechnikov',
'exponential', 'linear', 'cosine']):
axi = ax.ravel()[i]
log_dens = KernelDensity(kernel=kernel).fit(X_src).score_samples(X_plot)
axi.fill(X_plot[:, 0], np.exp(log_dens), '-k', fc='#AAAAFF')
axi.text(-2.6, 0.95, kernel)
axi.xaxis.set_major_formatter(plt.FuncFormatter(format_func))
axi.xaxis.set_major_locator(plt.MultipleLocator(1))
axi.yaxis.set_major_locator(plt.NullLocator())
axi.set_ylim(0, 1.05)
axi.set_xlim(-2.9, 2.9)
ax[0, 1].set_title('Available Kernels')
#----------------------------------------------------------------------
# Plot a 1D density example
N = 100
np.random.seed(1)
X = np.concatenate((np.random.normal(0, 1, 0.3 * N),
np.random.normal(5, 1, 0.7 * N)))[:, np.newaxis]
X_plot = np.linspace(-5, 10, 1000)[:, np.newaxis]
true_dens = (0.3 * norm(0, 1).pdf(X_plot[:, 0])
+ 0.7 * norm(5, 1).pdf(X_plot[:, 0]))
fig, ax = plt.subplots()
ax.fill(X_plot[:, 0], true_dens, fc='black', alpha=0.2,
label='input distribution')
for kernel in ['gaussian', 'tophat', 'epanechnikov']:
kde = KernelDensity(kernel=kernel, bandwidth=0.5).fit(X)
log_dens = kde.score_samples(X_plot)
ax.plot(X_plot[:, 0], np.exp(log_dens), '-',
label="kernel = '{0}'".format(kernel))
ax.text(6, 0.38, "N={0} points".format(N))
ax.legend(loc='upper left')
ax.plot(X[:, 0], -0.005 - 0.01 * np.random.random(X.shape[0]), '+k')
ax.set_xlim(-4, 9)
ax.set_ylim(-0.02, 0.4)
plt.show()
| bsd-3-clause |
mikebenfield/scikit-learn | sklearn/ensemble/tests/test_iforest.py | 16 | 8439 | """
Testing for Isolation Forest algorithm (sklearn.ensemble.iforest).
"""
# Authors: Nicolas Goix <nicolas.goix@telecom-paristech.fr>
# Alexandre Gramfort <alexandre.gramfort@telecom-paristech.fr>
# License: BSD 3 clause
import numpy as np
from sklearn.utils.fixes import euler_gamma
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_warns_message
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_no_warnings
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import ignore_warnings
from sklearn.model_selection import ParameterGrid
from sklearn.ensemble import IsolationForest
from sklearn.ensemble.iforest import _average_path_length
from sklearn.model_selection import train_test_split
from sklearn.datasets import load_boston, load_iris
from sklearn.utils import check_random_state
from sklearn.metrics import roc_auc_score
from scipy.sparse import csc_matrix, csr_matrix
rng = check_random_state(0)
# load the iris dataset
# and randomly permute it
iris = load_iris()
perm = rng.permutation(iris.target.size)
iris.data = iris.data[perm]
iris.target = iris.target[perm]
# also load the boston dataset
# and randomly permute it
boston = load_boston()
perm = rng.permutation(boston.target.size)
boston.data = boston.data[perm]
boston.target = boston.target[perm]
def test_iforest():
"""Check Isolation Forest for various parameter settings."""
X_train = np.array([[0, 1], [1, 2]])
X_test = np.array([[2, 1], [1, 1]])
grid = ParameterGrid({"n_estimators": [3],
"max_samples": [0.5, 1.0, 3],
"bootstrap": [True, False]})
with ignore_warnings():
for params in grid:
IsolationForest(random_state=rng,
**params).fit(X_train).predict(X_test)
def test_iforest_sparse():
"""Check IForest for various parameter settings on sparse input."""
rng = check_random_state(0)
X_train, X_test, y_train, y_test = train_test_split(boston.data[:50],
boston.target[:50],
random_state=rng)
grid = ParameterGrid({"max_samples": [0.5, 1.0],
"bootstrap": [True, False]})
for sparse_format in [csc_matrix, csr_matrix]:
X_train_sparse = sparse_format(X_train)
X_test_sparse = sparse_format(X_test)
for params in grid:
# Trained on sparse format
sparse_classifier = IsolationForest(
n_estimators=10, random_state=1, **params).fit(X_train_sparse)
sparse_results = sparse_classifier.predict(X_test_sparse)
# Trained on dense format
dense_classifier = IsolationForest(
n_estimators=10, random_state=1, **params).fit(X_train)
dense_results = dense_classifier.predict(X_test)
assert_array_equal(sparse_results, dense_results)
assert_array_equal(sparse_results, dense_results)
def test_iforest_error():
"""Test that it gives proper exception on deficient input."""
X = iris.data
# Test max_samples
assert_raises(ValueError,
IsolationForest(max_samples=-1).fit, X)
assert_raises(ValueError,
IsolationForest(max_samples=0.0).fit, X)
assert_raises(ValueError,
IsolationForest(max_samples=2.0).fit, X)
# The dataset has less than 256 samples, explicitly setting
# max_samples > n_samples should result in a warning. If not set
# explicitly there should be no warning
assert_warns_message(UserWarning,
"max_samples will be set to n_samples for estimation",
IsolationForest(max_samples=1000).fit, X)
assert_no_warnings(IsolationForest(max_samples='auto').fit, X)
assert_no_warnings(IsolationForest(max_samples=np.int64(2)).fit, X)
assert_raises(ValueError, IsolationForest(max_samples='foobar').fit, X)
assert_raises(ValueError, IsolationForest(max_samples=1.5).fit, X)
def test_recalculate_max_depth():
"""Check max_depth recalculation when max_samples is reset to n_samples"""
X = iris.data
clf = IsolationForest().fit(X)
for est in clf.estimators_:
assert_equal(est.max_depth, int(np.ceil(np.log2(X.shape[0]))))
def test_max_samples_attribute():
X = iris.data
clf = IsolationForest().fit(X)
assert_equal(clf.max_samples_, X.shape[0])
clf = IsolationForest(max_samples=500)
assert_warns_message(UserWarning,
"max_samples will be set to n_samples for estimation",
clf.fit, X)
assert_equal(clf.max_samples_, X.shape[0])
clf = IsolationForest(max_samples=0.4).fit(X)
assert_equal(clf.max_samples_, 0.4*X.shape[0])
def test_iforest_parallel_regression():
"""Check parallel regression."""
rng = check_random_state(0)
X_train, X_test, y_train, y_test = train_test_split(boston.data,
boston.target,
random_state=rng)
ensemble = IsolationForest(n_jobs=3,
random_state=0).fit(X_train)
ensemble.set_params(n_jobs=1)
y1 = ensemble.predict(X_test)
ensemble.set_params(n_jobs=2)
y2 = ensemble.predict(X_test)
assert_array_almost_equal(y1, y2)
ensemble = IsolationForest(n_jobs=1,
random_state=0).fit(X_train)
y3 = ensemble.predict(X_test)
assert_array_almost_equal(y1, y3)
def test_iforest_performance():
"""Test Isolation Forest performs well"""
# Generate train/test data
rng = check_random_state(2)
X = 0.3 * rng.randn(120, 2)
X_train = np.r_[X + 2, X - 2]
X_train = X[:100]
# Generate some abnormal novel observations
X_outliers = rng.uniform(low=-4, high=4, size=(20, 2))
X_test = np.r_[X[100:], X_outliers]
y_test = np.array([0] * 20 + [1] * 20)
# fit the model
clf = IsolationForest(max_samples=100, random_state=rng).fit(X_train)
# predict scores (the lower, the more normal)
y_pred = - clf.decision_function(X_test)
# check that there is at most 6 errors (false positive or false negative)
assert_greater(roc_auc_score(y_test, y_pred), 0.98)
def test_iforest_works():
# toy sample (the last two samples are outliers)
X = [[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1], [6, 3], [-4, 7]]
# Test LOF
clf = IsolationForest(random_state=rng, contamination=0.25)
clf.fit(X)
decision_func = - clf.decision_function(X)
pred = clf.predict(X)
# assert detect outliers:
assert_greater(np.min(decision_func[-2:]), np.max(decision_func[:-2]))
assert_array_equal(pred, 6 * [1] + 2 * [-1])
def test_max_samples_consistency():
# Make sure validated max_samples in iforest and BaseBagging are identical
X = iris.data
clf = IsolationForest().fit(X)
assert_equal(clf.max_samples_, clf._max_samples)
def test_iforest_subsampled_features():
# It tests non-regression for #5732 which failed at predict.
rng = check_random_state(0)
X_train, X_test, y_train, y_test = train_test_split(boston.data[:50],
boston.target[:50],
random_state=rng)
clf = IsolationForest(max_features=0.8)
clf.fit(X_train, y_train)
clf.predict(X_test)
def test_iforest_average_path_length():
# It tests non-regression for #8549 which used the wrong formula
# for average path length, strictly for the integer case
result_one = 2. * (np.log(4.) + euler_gamma) - 2. * 4. / 5.
result_two = 2. * (np.log(998.) + euler_gamma) - 2. * 998. / 999.
assert_almost_equal(_average_path_length(1), 1., decimal=10)
assert_almost_equal(_average_path_length(5), result_one, decimal=10)
assert_almost_equal(_average_path_length(999), result_two, decimal=10)
assert_array_almost_equal(_average_path_length(np.array([1, 5, 999])),
[1., result_one, result_two], decimal=10)
| bsd-3-clause |
jslhs/clFFT | src/scripts/perf/plotPerformance.py | 2 | 11532 | # ########################################################################
# Copyright 2013 Advanced Micro Devices, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ########################################################################
# to use this script, you will need to download and install the 32-BIT VERSION of:
# - Python 2.7 x86 (32-bit) - http://www.python.org/download/releases/2.7.1
#
# you will also need the 32-BIT VERSIONS of the following packages as not all the packages are available in 64bit at the time of this writing
# The ActiveState python distribution is recommended for windows
# (make sure to get the python 2.7-compatible packages):
# - NumPy 1.5.1 (32-bit, 64-bit unofficial, supports Python 2.4 - 2.7 and 3.1 - 3.2.) - http://sourceforge.net/projects/numpy/files/NumPy/
# - matplotlib 1.0.1 (32-bit & 64-bit, supports Python 2.4 - 2.7) - http://sourceforge.net/projects/matplotlib/files/matplotlib/
#
# For ActiveState Python, all that one should need to type is 'pypm install matplotlib'
import datetime
import sys
import argparse
import subprocess
import itertools
import os
import matplotlib
import pylab
from matplotlib.backends.backend_pdf import PdfPages
from fftPerformanceTesting import *
def plotGraph(dataForAllPlots, title, plottype, plotkwargs, xaxislabel, yaxislabel):
"""
display a pretty graph
"""
dh.write('Making graph\n')
colors = ['k','y','m','c','r','b','g']
#plottype = 'plot'
for thisPlot in dataForAllPlots:
getattr(pylab, plottype)(thisPlot.xdata, thisPlot.ydata,
'{}.-'.format(colors.pop()),
label=thisPlot.label, **plotkwargs)
if len(dataForAllPlots) > 1:
pylab.legend(loc='best')
pylab.title(title)
pylab.xlabel(xaxislabel)
pylab.ylabel(yaxislabel)
pylab.grid(True)
if args.outputFilename == None:
# if no pdf output is requested, spit the graph to the screen . . .
pylab.show()
else:
pylab.savefig(args.outputFilename,dpi=(1024/8))
# . . . otherwise, gimme gimme pdf
#pdf = PdfPages(args.outputFilename)
#pdf.savefig()
#pdf.close()
######## plotFromDataFile() Function to plot from data file begins ########
def plotFromDataFile():
data = []
"""
read in table(s) from file(s)
"""
for thisFile in args.datafile:
if not os.path.isfile(thisFile):
print 'No file with the name \'{}\' exists. Please indicate another filename.'.format(thisFile)
quit()
results = open(thisFile, 'r')
resultsContents = results.read()
resultsContents = resultsContents.rstrip().split('\n')
firstRow = resultsContents.pop(0)
if firstRow != tableHeader:
print 'ERROR: input file \'{}\' does not match expected format.'.format(thisFile)
quit()
for row in resultsContents:
row = row.split(',')
row = TableRow(TestCombination(row[0],row[1],row[2],row[3],row[4],row[5],row[6],row[7],row[8],row[9]), row[10])
data.append(GraphPoint(row.parameters.x, row.parameters.y, row.parameters.z, row.parameters.batchsize, row.parameters.precision, row.parameters.device, row.parameters.label, row.gflops))
"""
data sanity check
"""
# if multiple plotvalues have > 1 value among the data rows, the user must specify which to plot
multiplePlotValues = []
for option in plotvalues:
values = []
for point in data:
values.append(getattr(point, option))
multiplePlotValues.append(len(set(values)) > 1)
if multiplePlotValues.count(True) > 1 and args.plot == None:
print 'ERROR: more than one parameter of {} has multiple values. Please specify which parameter to plot with --plot'.format(plotvalues)
quit()
# if args.graphxaxis is not 'problemsize', the user should know that the results might be strange
if args.graphxaxis != 'problemsize':
xaxisvalueSet = []
for option in xaxisvalues:
if option != 'problemsize':
values = []
for point in data:
values.append(getattr(point, option))
xaxisvalueSet.append(len(set(values)) > 1)
if xaxisvalueSet.count(True) > 1:
print 'WARNING: more than one parameter of {} is varied. unexpected results may occur. please double check your graphs for accuracy.'.format(xaxisvalues)
# multiple rows should not have the same input values
pointInputs = []
for point in data:
pointInputs.append(point.__str__().split(';')[0])
if len(set(pointInputs)) != len(data):
print 'ERROR: imported table has duplicate rows with identical input parameters'
quit()
"""
figure out if we have multiple plots on this graph (and what they should be)
"""
if args.plot != None:
multiplePlots = args.plot
elif multiplePlotValues.count(True) == 1:
multiplePlots = plotvalues[multiplePlotValues.index(True)]
else:
# default to device if none of the options to plot have multiple values
multiplePlots = 'device'
"""
assemble data for the graphs
"""
data.sort(key=lambda row: int(getattr(row, args.graphxaxis)))
# choose scale for x axis
if args.xaxisscale == None:
# user didn't specify. autodetect
if int(getattr(data[len(data)-1], args.graphxaxis)) > 2000: # big numbers on x-axis
args.xaxisscale = 'log2'
elif int(getattr(data[len(data)-1], args.graphxaxis)) > 10000: # bigger numbers on x-axis
args.xaxisscale = 'log10'
else: # small numbers on x-axis
args.xaxisscale = 'linear'
if args.xaxisscale == 'linear':
plotkwargs = {}
plottype = 'plot'
elif args.xaxisscale == 'log2':
plottype = 'semilogx'
plotkwargs = {'basex':2}
elif args.xaxisscale == 'log10':
plottype = 'semilogx'
plotkwargs = {'basex':10}
else:
print 'ERROR: invalid value for x-axis scale'
quit()
plots = set(getattr(row, multiplePlots) for row in data)
class DataForOnePlot:
def __init__(self, inlabel, inxdata, inydata):
self.label = inlabel
self.xdata = inxdata
self.ydata = inydata
dataForAllPlots = []
for plot in plots:
dataForThisPlot = itertools.ifilter( lambda x: getattr(x, multiplePlots) == plot, data)
dataForThisPlot = list(itertools.islice(dataForThisPlot, None))
if args.graphxaxis == 'problemsize':
xdata = [int(row.x) * int(row.y) * int(row.z) * int(row.batchsize) for row in dataForThisPlot]
else:
xdata = [getattr(row, args.graphxaxis) for row in dataForThisPlot]
ydata = [getattr(row, args.graphyaxis) for row in dataForThisPlot]
dataForAllPlots.append(DataForOnePlot(plot,xdata,ydata))
"""
assemble labels for the graph or use the user-specified ones
"""
if args.graphtitle:
# use the user selection
title = args.graphtitle
else:
# autogen a lovely title
title = 'Performance vs. ' + args.graphxaxis.capitalize()
if args.xaxislabel:
# use the user selection
xaxislabel = args.xaxislabel
else:
# autogen a lovely x-axis label
if args.graphxaxis == 'cachesize':
units = '(bytes)'
else:
units = '(datapoints)'
xaxislabel = args.graphxaxis + ' ' + units
if args.yaxislabel:
# use the user selection
yaxislabel = args.yaxislabel
else:
# autogen a lovely y-axis label
if args.graphyaxis == 'gflops':
units = 'GFLOPS'
yaxislabel = 'Performance (' + units + ')'
"""
display a pretty graph
"""
colors = ['k','y','m','c','r','b','g']
for thisPlot in dataForAllPlots:
getattr(pylab, plottype)(thisPlot.xdata, thisPlot.ydata, '{}.-'.format(colors.pop()), label=thisPlot.label, **plotkwargs)
if len(dataForAllPlots) > 1:
pylab.legend(loc='best')
pylab.title(title)
pylab.xlabel(xaxislabel)
pylab.ylabel(yaxislabel)
pylab.grid(True)
if args.outputFilename == None:
# if no pdf output is requested, spit the graph to the screen . . .
pylab.show()
else:
# . . . otherwise, gimme gimme pdf
#pdf = PdfPages(args.outputFilename)
#pdf.savefig()
#pdf.close()
pylab.savefig(args.outputFilename,dpi=(1024/8))
######### plotFromDataFile() Function to plot from data file ends #########
######## "main" program begins #####
"""
define and parse parameters
"""
xaxisvalues = ['x','y','z','batchsize','problemsize']
yaxisvalues = ['gflops']
plotvalues = ['device', 'precision', 'label']
parser = argparse.ArgumentParser(description='Plot performance of the clfft\
library. clfft.plotPerformance.py reads in data tables from clfft.\
measurePerformance.py and plots their values')
fileOrDb = parser.add_mutually_exclusive_group(required=True)
fileOrDb.add_argument('-d', '--datafile',
dest='datafile', action='append', default=None, required=False,
help='indicate a file to use as input. must be in the format output by\
clfft.measurePerformance.py. may be used multiple times to indicate\
multiple input files. e.g., -d cypressOutput.txt -d caymanOutput.txt')
parser.add_argument('-x', '--x_axis',
dest='graphxaxis', default=None, choices=xaxisvalues, required=True,
help='indicate which value will be represented on the x axis. problemsize\
is defined as x*y*z*batchsize')
parser.add_argument('-y', '--y_axis',
dest='graphyaxis', default='gflops', choices=yaxisvalues,
help='indicate which value will be represented on the y axis')
parser.add_argument('--plot',
dest='plot', default=None, choices=plotvalues,
help='indicate which of {} should be used to differentiate multiple plots.\
this will be chosen automatically if not specified'.format(plotvalues))
parser.add_argument('--title',
dest='graphtitle', default=None,
help='the desired title for the graph generated by this execution. if\
GRAPHTITLE contains any spaces, it must be entered in \"double quotes\".\
if this option is not specified, the title will be autogenerated')
parser.add_argument('--x_axis_label',
dest='xaxislabel', default=None,
help='the desired label for the graph\'s x-axis. if XAXISLABEL contains\
any spaces, it must be entered in \"double quotes\". if this option\
is not specified, the x-axis label will be autogenerated')
parser.add_argument('--x_axis_scale',
dest='xaxisscale', default=None, choices=['linear','log2','log10'],
help='the desired scale for the graph\'s x-axis. if nothing is specified,\
it will be selected automatically')
parser.add_argument('--y_axis_label',
dest='yaxislabel', default=None,
help='the desired label for the graph\'s y-axis. if YAXISLABEL contains any\
spaces, it must be entered in \"double quotes\". if this option is not\
specified, the y-axis label will be autogenerated')
parser.add_argument('--outputfile',
dest='outputFilename', default=None,
help='name of the file to output graphs. Supported formats: emf, eps, pdf, png, ps, raw, rgba, svg, svgz.')
args = parser.parse_args()
if args.datafile != None:
plotFromDataFile()
else:
print "Atleast specify if you want to use text files or database for plotting graphs. Use -h or --help option for more details"
quit()
| apache-2.0 |
hypergravity/hrs | song/measure_xdrift.py | 1 | 4877 | #!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Created on Sat Dec 3 23:10:05 2016
@author: cham
"""
import sys
import ccdproc
import matplotlib.pyplot as plt
import numpy as np
from astropy.io import fits
from astropy.table import Table, Column
from matplotlib import rcParams
from tqdm import trange
from hrs import thar_corr2d
from .utils import scan_files
rcParams.update({"font.size": 20})
def measure_xshift_2dcorr(imfps, bias_temp, flat_temp, xmax=12):
drifts = np.zeros(len(imfps))
cfn = sys._getframe().f_code.co_name
for i in trange(len(imfps), ncols=100, ascii=False, desc="**" + cfn + "**",
unit=" file"):
data = ccdproc.CCDData.read(imfps[i], unit="adu")
data = ccdproc.subtract_bias(data, bias_temp)
shift, corr2d = thar_corr2d(data.data, flat_temp.data,
xtrim=(512, 1536), ytrim=(512, 1536),
x_shiftmax=xmax, y_shiftmax=0,
verbose=False)
drifts[i] = np.int(shift[0])
return drifts
def check_xdrift(t):
# check Y shift
ind_bias = t["IMAGETYP"] == "BIAS"
print("%s BIAS found" % np.sum(ind_bias))
ind_flat = t["IMAGETYP"] == "FLAT"
print("%s FLAT found" % np.sum(ind_flat))
ind_flati2 = t["IMAGETYP"] == "FLATI2"
print("%s FLATI2 found" % np.sum(ind_flati2))
ind_thar = t["IMAGETYP"] == "THAR"
print("%s THAR found" % np.sum(ind_thar))
ind_star = t["IMAGETYP"] == "STAR"
print("%s STAR found" % np.sum(ind_star))
# Y shift
sub_flat = np.where(ind_flat)[0]
sub_flati2 = np.where(ind_flati2)[0]
sub_star = np.where(ind_star)[0]
sub_thar = np.where(ind_thar)[0]
sub_bias = np.where(ind_bias)[0]
if len(sub_flat) > 0:
# if there is a flat
t["astemplate"][sub_flat[0]] = True
flat_temp = ccdproc.CCDData.read(t["fps"][sub_flat[0]], unit="adu")
else:
# no flat, use star instead
t["astemplate"][sub_star[0]] = True
flat_temp = ccdproc.CCDData.read(t["fps"][sub_star[0]], unit="adu")
if len(sub_bias) > 0:
# if there is a bias
t["astemplate"][sub_bias[0]] = True
bias_temp = ccdproc.CCDData.read(t["fps"][sub_bias[0]], unit="adu")
else:
# use 300 as bias instead
bias_temp = ccdproc.CCDData(np.ones((2048, 2048)) * 300, unit="adu")
flat_temp = ccdproc.subtract_bias(flat_temp, bias_temp)
# plot figure
fig = plt.figure(figsize=(11, 5))
ax = fig.add_subplot(111)
sub = sub_flati2
print("----- FLATI2 -----")
if len(sub) > 0:
drifts = measure_xshift_2dcorr(t["fps"][sub], bias_temp, flat_temp,
xmax=12)
ax.plot(t["MJD-MID"][sub], drifts, "o", ms=20, mec="r", mfc="None",
label="FLATI2x%s" % len(sub))
for i, this_drift in zip(sub, drifts):
t["xdrift"][i] = this_drift
sub = sub_flat
print("------ FLAT ------")
if len(sub) > 0:
drifts = measure_xshift_2dcorr(t["fps"][sub], bias_temp, flat_temp,
xmax=12)
ax.plot(t["MJD-MID"][sub], drifts, "o", ms=20, mec="b", mfc="None",
label="FLATx%s" % len(sub))
for i, this_drift in zip(sub, drifts):
t["xdrift"][i] = this_drift
sub = sub_thar
print("------ THAR ------")
if len(sub) > 0:
drifts = measure_xshift_2dcorr(t["fps"][sub], bias_temp, flat_temp,
xmax=12)
ax.plot(t["MJD-MID"][sub], drifts, "^", ms=20, mec="k", mfc="None",
label="THARx%s" % len(sub))
for i, this_drift in zip(sub, drifts):
t["xdrift"][i] = this_drift
sub = sub_star
print("------ STAR ------")
if len(sub) > 0:
drifts = measure_xshift_2dcorr(t["fps"][sub], bias_temp, flat_temp,
xmax=12)
ax.plot(t["MJD-MID"][sub], drifts, "s-", c="c", ms=20, mec="c",
mfc="None", label="STARx%s" % len(sub))
for i, this_drift in zip(sub, drifts):
t["xdrift"][i] = this_drift
# save figure
plt.grid()
l = plt.legend()
l.set_frame_on(False)
# fig.set_figheight(6)
# fig.set_figwidth(12)
plt.xlabel("MJD-MID")
plt.ylabel("DRIFT/pixel")
fig.tight_layout()
ylim = ax.get_ylim()
plt.ylim(ylim[0] - 2, ylim[1] + 2)
xlim = ax.get_xlim()
plt.xlim(xlim[0], xlim[0] + 1.4)
return t, fig
#
# figpath_pdf = "/hydrogen/song/figs/Xdrift_%s.pdf" % day
# figpath_png = "/hydrogen/song/figs/Xdrift_%s.png" % day
#
# fig.savefig(figpath_pdf)
# print(figpath_pdf)
# fig.savefig(figpath_png)
# print(figpath_png)
#
# # %%
# figpath_fits = "/hydrogen/song/figs/t_%s.fits" % day
#
# t.write(figpath_fits, format="fits", overwrite=True)
| bsd-3-clause |
arjunkhode/ASP | lectures/05-Sinusoidal-model/plots-code/spectral-peaks.py | 22 | 1159 | import numpy as np
import matplotlib.pyplot as plt
from scipy.signal import hamming, triang, blackmanharris
from scipy.fftpack import fft, ifft
import math
import sys, os, functools, time
sys.path.append(os.path.join(os.path.dirname(os.path.realpath(__file__)), '../../../software/models/'))
import dftModel as DFT
import utilFunctions as UF
(fs, x) = UF.wavread('../../../sounds/oboe-A4.wav')
N = 512
M = 511
t = -60
w = np.hamming(M)
start = .8*fs
hN = N/2
hM = (M+1)/2
x1 = x[start:start+M]
mX, pX = DFT.dftAnal(x1, w, N)
ploc = UF.peakDetection(mX, t)
pmag = mX[ploc]
freqaxis = fs*np.arange(mX.size)/float(N)
plt.figure(1, figsize=(9.5, 5.5))
plt.subplot (2,1,1)
plt.plot(freqaxis, mX, 'r', lw=1.5)
plt.axis([300,2500,-70,max(mX)])
plt.plot(fs * ploc / N, pmag, marker='x', color='b', linestyle='', markeredgewidth=1.5)
plt.title('mX + spectral peaks (oboe-A4.wav)')
plt.subplot (2,1,2)
plt.plot(freqaxis,pX,'c', lw=1.5)
plt.axis([300,2500,6,14])
plt.plot(fs * ploc / N, pX[ploc], marker='x', color='b', linestyle='', markeredgewidth=1.5)
plt.title('pX + spectral peaks')
plt.tight_layout()
plt.savefig('spectral-peaks.png')
plt.show()
| agpl-3.0 |
chriscrosscutler/scikit-image | doc/examples/plot_blob.py | 18 | 2796 | """
==============
Blob Detection
==============
Blobs are bright on dark or dark on bright regions in an image. In
this example, blobs are detected using 3 algorithms. The image used
in this case is the Hubble eXtreme Deep Field. Each bright dot in the
image is a star or a galaxy.
Laplacian of Gaussian (LoG)
-----------------------------
This is the most accurate and slowest approach. It computes the Laplacian
of Gaussian images with successively increasing standard deviation and
stacks them up in a cube. Blobs are local maximas in this cube. Detecting
larger blobs is especially slower because of larger kernel sizes during
convolution. Only bright blobs on dark backgrounds are detected. See
:py:meth:`skimage.feature.blob_log` for usage.
Difference of Gaussian (DoG)
----------------------------
This is a faster approximation of LoG approach. In this case the image is
blurred with increasing standard deviations and the difference between
two successively blurred images are stacked up in a cube. This method
suffers from the same disadvantage as LoG approach for detecting larger
blobs. Blobs are again assumed to be bright on dark. See
:py:meth:`skimage.feature.blob_dog` for usage.
Determinant of Hessian (DoH)
----------------------------
This is the fastest approach. It detects blobs by finding maximas in the
matrix of the Determinant of Hessian of the image. The detection speed is
independent of the size of blobs as internally the implementation uses
box filters instead of convolutions. Bright on dark as well as dark on
bright blobs are detected. The downside is that small blobs (<3px) are not
detected accurately. See :py:meth:`skimage.feature.blob_doh` for usage.
"""
from matplotlib import pyplot as plt
from skimage import data
from skimage.feature import blob_dog, blob_log, blob_doh
from math import sqrt
from skimage.color import rgb2gray
image = data.hubble_deep_field()[0:500, 0:500]
image_gray = rgb2gray(image)
blobs_log = blob_log(image_gray, max_sigma=30, num_sigma=10, threshold=.1)
# Compute radii in the 3rd column.
blobs_log[:, 2] = blobs_log[:, 2] * sqrt(2)
blobs_dog = blob_dog(image_gray, max_sigma=30, threshold=.1)
blobs_dog[:, 2] = blobs_dog[:, 2] * sqrt(2)
blobs_doh = blob_doh(image_gray, max_sigma=30, threshold=.01)
blobs_list = [blobs_log, blobs_dog, blobs_doh]
colors = ['yellow', 'lime', 'red']
titles = ['Laplacian of Gaussian', 'Difference of Gaussian',
'Determinant of Hessian']
sequence = zip(blobs_list, colors, titles)
for blobs, color, title in sequence:
fig, ax = plt.subplots(1, 1)
ax.set_title(title)
ax.imshow(image, interpolation='nearest')
for blob in blobs:
y, x, r = blob
c = plt.Circle((x, y), r, color=color, linewidth=2, fill=False)
ax.add_patch(c)
plt.show()
| bsd-3-clause |
fabianp/scikit-learn | examples/cluster/plot_kmeans_assumptions.py | 270 | 2040 | """
====================================
Demonstration of k-means assumptions
====================================
This example is meant to illustrate situations where k-means will produce
unintuitive and possibly unexpected clusters. In the first three plots, the
input data does not conform to some implicit assumption that k-means makes and
undesirable clusters are produced as a result. In the last plot, k-means
returns intuitive clusters despite unevenly sized blobs.
"""
print(__doc__)
# Author: Phil Roth <mr.phil.roth@gmail.com>
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn.cluster import KMeans
from sklearn.datasets import make_blobs
plt.figure(figsize=(12, 12))
n_samples = 1500
random_state = 170
X, y = make_blobs(n_samples=n_samples, random_state=random_state)
# Incorrect number of clusters
y_pred = KMeans(n_clusters=2, random_state=random_state).fit_predict(X)
plt.subplot(221)
plt.scatter(X[:, 0], X[:, 1], c=y_pred)
plt.title("Incorrect Number of Blobs")
# Anisotropicly distributed data
transformation = [[ 0.60834549, -0.63667341], [-0.40887718, 0.85253229]]
X_aniso = np.dot(X, transformation)
y_pred = KMeans(n_clusters=3, random_state=random_state).fit_predict(X_aniso)
plt.subplot(222)
plt.scatter(X_aniso[:, 0], X_aniso[:, 1], c=y_pred)
plt.title("Anisotropicly Distributed Blobs")
# Different variance
X_varied, y_varied = make_blobs(n_samples=n_samples,
cluster_std=[1.0, 2.5, 0.5],
random_state=random_state)
y_pred = KMeans(n_clusters=3, random_state=random_state).fit_predict(X_varied)
plt.subplot(223)
plt.scatter(X_varied[:, 0], X_varied[:, 1], c=y_pred)
plt.title("Unequal Variance")
# Unevenly sized blobs
X_filtered = np.vstack((X[y == 0][:500], X[y == 1][:100], X[y == 2][:10]))
y_pred = KMeans(n_clusters=3, random_state=random_state).fit_predict(X_filtered)
plt.subplot(224)
plt.scatter(X_filtered[:, 0], X_filtered[:, 1], c=y_pred)
plt.title("Unevenly Sized Blobs")
plt.show()
| bsd-3-clause |
istellartech/OpenTsiolkovsky | bin/stat_datapoint_extend.py | 1 | 3877 | #!/usr/bin/python
import numpy as np
import pandas as pd
import os
import json
import sys
import multiprocessing as mp
def read_data_points(arg):
[id_proc, Nproc, input_directory, input_file_template, number_of_sample, sample_points] = arg
fo_buff = ["" for i in range(len(sample_points.keys()))]
shou = int(number_of_sample/Nproc)
amari = number_of_sample - shou * Nproc
start_index = shou * id_proc + min(amari, id_proc)
end_index = shou * (id_proc + 1) + min(amari, id_proc + 1)
for i in range(start_index, end_index):
caseNo = i+1
filename = input_file_template.format(caseNo)
os.system("aws s3 cp "+input_directory+filename+" . > /dev/null")
if id_proc == 0: print("{0:}/{1:}".format(caseNo, end_index))
#os.system("cp data/"+filename+" .") ######## FOR DEBUG########################
# format csv file to proper form
try :
fp = open(filename)
except :
continue
ft = open("tmp_{}.csv".format(id_proc),"w")
for line in fp:
ft.write(line.replace(",\n","\n"))
fp.close()
ft.close()
# fetch data
df = pd.read_csv("tmp_{}.csv".format(id_proc))
for i, k in enumerate(sample_points.keys()):
if k == "landing_time":
fo_buff[i] += str(caseNo)
for v in sample_points[k]:
fo_buff[i] += "," + str(df.iloc[-1][v])
fo_buff[i] += "\n"
else:
fo_buff[i] += str(caseNo)
for v in sample_points[k]:
fo_buff[i] += "," + str(df.pipe(lambda df: df[df["time(s)"] == float(k)]).iloc[0][v])
fo_buff[i] += "\n"
# remove temporary csv
os.system("rm "+filename)
os.system("rm tmp_{}.csv".format(id_proc))
return fo_buff
if __name__ == "__main__":
#Nproc = 1
Nproc = mp.cpu_count()-1
stat_input = "datapoint.json"
print("IST DATAPOINT MAKER")
print("libraries load done.")
argv = sys.argv
if len(argv) > 1:
otmc_mission_name = argv[1]
else:
print "PLEASE INPUT mission_name as the command line argument."
exit()
os.system("aws s3 cp s3://otmc/{0:s}/raw/inp/mc.json .".format(otmc_mission_name))
fp = open("mc.json")
data = json.load(fp)
fp.close()
number_of_sample = data["Ntask"]
input_directory = "s3://otmc/" + otmc_mission_name + "/raw/output/"
input_file_template = "case{0:05d}"+"_{0:s}_dynamics_1_extend.csv".format(data["suffix"])
os.system("aws s3 cp s3://otmc/{0:s}/stat/inp/datapoint.json .".format(otmc_mission_name))
fp = open(stat_input)
stat = json.load(fp)
fp.close()
sample_points = stat["sample points"]
# parallel processing
pool = mp.Pool(Nproc)
callback = pool.map(read_data_points, [(id_proc, Nproc, input_directory, input_file_template, number_of_sample, sample_points) for id_proc in range(Nproc)])
pool.terminate()
pool.close()
# # debug
# id_proc = 0
# callback = [read_data_points((id_proc, Nproc, input_directory, input_file_template, number_of_sample, sample_points))]
# join them
fo_buff = ["" for i in range(len(sample_points.keys()))]
for i in range(Nproc):
for j in range(len(sample_points.keys())):
fo_buff[j] += callback[i][j]
# write out datapoint_*.csv
os.system("rm output/datapoint_*.csv")
for i, k in enumerate(sample_points.keys()):
fo = open("output/datapoint_"+k+".csv","w")
fo.write("caseNo,"+",".join(sample_points[k])+"\n") # title
fo.write(fo_buff[i])
fo.close()
os.system("aws s3 cp output s3://otmc/{0:s}/stat/output/ --exclude '*' --include 'datapoint_*.csv' --recursive".format(otmc_mission_name))
| mit |
svebk/DeepSentiBank_memex | ISIweakLabels/read_hdfs_attributes.py | 1 | 5089 | import os
import os.path as osp
import json
import pickle
import MySQLdb
import random
import datetime
import struct
from sklearn import svm
import numpy as np
global_var = json.load(open('../global_var_all.json'))
isthost=global_var['ist_db_host']
istuser=global_var['ist_db_user']
istpwd=global_var['ist_db_pwd']
istdb=global_var['ist_db_dbname']
localhost=global_var['local_db_host']
localuser=global_var['local_db_user']
localpwd=global_var['local_db_pwd']
localdb=global_var['local_db_dbname']
base_hdfs_path="./trial113"
feature_num = 4096
#base_hdfs_path="hdfs://memex:/user/worker/crf/trial113"
def get_all_precomp_feats(feats_id):
precomp_featurename = 'precomp-features'
precomp_featurefilename = 'precomp-feats.dat'
now=datetime.datetime.now()
# fill features indices
f_pre = open(precomp_featurename,'wb')
for feat_id in feats_id:
f_pre.write(struct.pack('i',feat_id))
f_pre.close()
# Execute get_precomp_feats
command = '../get_precomp_feats '+precomp_featurename+' '+precomp_featurefilename
#print command
os.system(command)
# Read results from precomp_featurefilename
f_pre=open(precomp_featurefilename,'rb')
feats=None
for feat_id in feats_id:
one_feat = f_pre.read(feature_num*4)
if not feats:
feats=one_feat
else:
#concatenante with np
feats=np.vstack((feats,one_feat))
f_pre.close()
return feats
# Get feature from image htid
# Beware that maybe some images where not processed because their download failed.
def get_precompfeatid_fromhtid(image_htid):
db=MySQLdb.connect(host=localhost,user=localuser,passwd=localpwd,db=localdb)
c=db.cursor()
query="select id from uniqueIds where htid=\""+image_htid+"\";"
print query
c.execute(query)
remax = c.fetchall()
print remax
if len(remax):
feat_id = int(remax[0][0])
else:
feat_id = 0
db.close()
return feat_id
# Get all images from an ad
def getImageHtIdsFromAdId(ad_id):
db=MySQLdb.connect(host=isthost,user=istuser,passwd=istpwd,db=istdb)
c=db.cursor()
sql='select id from images where ads_id='+str(ad_id)
c.execute(sql)
re = c.fetchall()
db.close()
return [one_img[0] for one_img in re]
if __name__ == "__main__":
# First get all ads, attributes and images ids.
if not osp.isfile('all_attr_data.pkl'):
all_ads=[]
all_imgs=[]
all_vals=[]
attr_set=set()
attr_vals={}
for part in xrange(0, 16):
filename = base_hdfs_path+"/part-000"+"%02d" % part
fp = open(filename)
for line in fp:
[ads_id,attr,val]=line.split('\t')
# values for each ads
all_ads.append(ads_id)
all_imgs.append(getImageHtIdsFromAdId(ads_id))
all_vals.append([attr,val.rstrip()])
# set of attributes and their values
attr_set.add(attr)
if attr not in attr_vals.keys():
attr_vals[attr]=set()
attr_vals[attr].add(val)
fp.close()
print len(all_ads)
for one_attr in attr_set:
print one_attr+":",attr_vals[one_attr]
all_attr_data={}
all_attr_data['all_ads']=all_ads
all_attr_data['all_vals']=all_vals
all_attr_data['all_imgs']=all_imgs
all_attr_data['attr_set']=attr_set
all_attr_data['attr_vals']=attr_vals
pickle.dump(all_attr_data,open('all_attr_data.pkl',"wb"))
else:
all_attr_data=pickle.load(open('all_attr_data.pkl',"rb"))
# Now we should have all attributes and corresponding images ids.
# Start training SVMs
for one_attr in all_attr_data['attr_set']:
print("Processing attribute {}".format(one_attr))
pos={}
train={}
test={}
labels_train=[]
labels_test=[]
label_id=0
# Get all samples annotated with this attribute
for one_val in all_attr_data['attr_vals'][one_attr]:
print("Getting positive samples of {}".format(one_val))
pos[one_val] = [i for i, x in enumerate(all_attr_data['all_vals']) if x[0]==one_attr and x[1]==one_val]
# Random sample 2/3 of each value as training samples and the last 1/3 at test.
train[one_val] = [pos[one_val][i] for i in sorted(random.sample(xrange(len(pos[one_val])), int(len(pos[one_val])*2./3)))]
test[one_val] = list(set(pos[one_val])-set(train[one_val]))
labels_train.extend([label_id]*len(train[one_val]))
labels_test.extend([label_id]*len(test[one_val]))
label_id=label_id+1
train_feats_id=[]
for one_val in all_attr_data['attr_vals'][one_attr]:
for sample in train[one_val]:
train_feats_id.append(get_precompfeatid_fromhtid(all_attr_data['all_imgs'][sample]))
train_feats=get_all_precomp_feats(train_feats_id)
clf = svm.SVC()
clf.fit(train_feats, labels_train)
pickle.dump(clf,open('svmmodel_'+str(one_attr)+'.pkl','wb'))
data={}
data['train_ids']=train
data['test_ids']=test
data['labels_train']=labels_train
data['labels_test']=labels_test
pickle.dump(data,open('data_'+str(one_attr)+'.pkl','wb'))
| bsd-2-clause |
akail/fiplanner | fiplanner/fiplanner.py | 1 | 8360 | # -*- coding: utf-8 -*-
"""
fiplanner.fiplanner
~~~~~~~~~~~~~~~~~~~
This module implements the central Planning tool
:copyright: (c) 2017 Andrew Kail
:license: BSD, see LICENSE for more details
"""
import datetime as dt
import logging
import pprint
import pandas as pd
import matplotlib.pyplot as plt
from dateutil.relativedelta import relativedelta
import ruamel_yaml as yaml
from fiplanner.errors import ConfigError
from fiplanner.accounts import Debt, Targeted, Investment, Mortgage
LOG = logging.getLogger(__name__)
# A list of account types that are classified as investments
# Right now this should be temporary until I get a class for
# each account
INVESTMENTS = ['401k', '403b', 'roth ira', 'investment']
# Dictionary to help define classes later on
ACCOUNTS = {'debts': ['debt', 'mortgage'], 'targeted': ['targeted'],
'investments': INVESTMENTS
}
# output columns width in characters
COL_WIDTH = 12
def order_debts(debts, debt_method, **kwargs):
"""Generate yield debts in a specific order
Orders the debt account based on the debt_method defined.
It begins by putting the paid off debts first, since they
should be used to get the extra payments later on. If the
method is avalanche, higher interest rate debts come first.
If the method is snowball its sorted by lowest value first and
highest interst second.
Args:
debts: List of Debt objects
debt_method: A string defining the sorting method used
options are 'snowball' and 'avalanche'
Yields:
Debt: A Debt object
"""
# Extract paid off debts first. They need to be first
paid = filter(lambda x: x.value == 0, debts)
unpaid = filter(lambda x: x.value != 0, debts)
# sort unpaid debts by the selected method
if debt_method == 'avalanche':
gen = sorted(unpaid, key=lambda x: (-x.interest, -x.value))
elif debt_method == 'snowball':
gen = sorted(unpaid, key=lambda x: (-x.value, -x.interest), reverse=False)
else:
raise ConfigError("Debt method %s not support" % debt_method)
for debt in paid:
yield debt
for debt in gen:
yield debt
def order_targeted(targets, target_sort='low', **kwargs):
""" Order target accounts
Generator that yields the target accounts
in the specified order, either high or low.
Args:
targets: List of Targeted accounts
target_sort: Sort by high or low value
Yields:
target: Targeted account
"""
if target_sort == 'high':
targets = sorted(targets, key=lambda x: x.value, reverse=True)
elif target_sort == 'low':
targets = sorted(targets, key=lambda x: x.value)
else:
raise ConfigError("Target ordering %s not supported" % target_sort)
for target in targets:
yield target
def order_investments(investments, *args, **kwargs):
"""Order Investment accounts
Generator that yields the investment accounts. Currently
this only returns them in the order they came in
Args:
investments: List of Investment objects
Yields:
Investment: Single Investment
"""
for invest in investments:
yield invest
def order_accounts(accounts, priority, debt_method):
"""Order all the accounts based on input parameters
This generator orders all of the input accounts based
on the priority and other input parameters. This builds off of the
functions defined for order_<account type> and matches the account
type to the function it belongs to. <-- This needs to be worked on
Args:
accounts: List of Objects derived from class Accounts
piority: List of strings defining the priority. The options
are 'debts', 'targeted', 'investments'
debt_method: String defining which debt_method is used.
Yields:
Account: A single account in order
"""
for prior in priority:
func = globals()['order_%s' % prior]
p_accounts = filter(lambda a: a.type in ACCOUNTS[prior], accounts)
yield from func(p_accounts, debt_method=debt_method,
target_sort='high')
class Simulation():
"""Main Simulation object
This class initializes the simulation configuration,
dataframe and iterates through each month of the simulation.
Args:
config: YAML Configuration file
"""
def __init__(self, config):
# read in the configuration file
try:
with open(config, 'r') as infile:
self.config = yaml.load(infile)
except FileNotFoundError:
LOG.error("Configuration file %s not found", config)
raise
now = dt.datetime.now()
start = dt.date(year=now.year, month=now.month,
day=1)
# start next month since we are worried about future calculations
start += relativedelta(months=1)
birthday = dt.datetime.strptime(self.config['profile']['birthday'], '%m-%d-%Y')
self.config['profile']['birthday'] = birthday
self.config['age'] = now.year - birthday.year - \
((now.month, now.day) < (birthday.month, birthday.day))
age_max = self.config['simulation']['age_max']
stop = (birthday + relativedelta(years=(age_max))).date()
self.config['simulation']['start'] = start
self.config['simulation']['stop'] = stop
self.accounts = [self.account_factory(**account) for account in self.config['accounts']]
# initialize dataframe
index = pd.date_range(start, stop, freq='MS')
nrows = len(index)
columns = [account.name for account in self.accounts]
self.df = pd.DataFrame(index=index, columns=columns)
self.df.fillna(0, inplace=True)
self.format_summary_input()
def run(self):
"""Run the plan"""
LOG.info("Starting simulation on %s", self.config['simulation']['start'])
priority = self.config['simulation']['priority']
debt_method = self.config['simulation']['debt_method']
for month in self.df.index:
extra = self.config['profile']['extra']
for account in filter(lambda a: a.type == 'debt', self.accounts):
if account.value >= 0:
extra += account.payment
for account in order_accounts(self.accounts, priority, debt_method):
LOG.debug("\tRunning account %s", account.name)
extra = account.make_payment(extra)
self.df.loc[month][account.name] = account.value
self.df['networth'] = self.df.sum(axis=1)
print(self.df.head())
LOG.info("Ending simulation")
def generate_graphs(self):
"""Generate graphs showing net worth"""
self.df['networth'].plot()
plt.show()
def format_summary_input(self):
ppout = pprint.PrettyPrinter(indent=4)
LOG.info(ppout.pformat(self.config))
def account_factory(self, **kwargs):
""" Build the account
Parameters
----------
**kwargs:
Arbirtrary Arguments
Returns
-------
account:
The appropriate account subclass
"""
try:
name = kwargs['name']
except KeyError:
raise ConfigError("An account's name has not been defined")
try:
_type = kwargs['type']
except KeyError:
raise ConfigError("Account type not defined for %s" % name)
returns = self.config['simulation']['interests']['investments']
LOG.debug("Adding account %s", name)
if _type == 'debt':
return Debt(**kwargs)
elif _type == 'mortgage':
return Mortgage(**kwargs)
elif _type == 'targeted':
return Targeted(**kwargs)
elif _type == 'roth ira':
return Investment(**kwargs, interest=returns)
elif _type == '401k':
return Investment(**kwargs, interest=returns)
elif _type == '403b':
return Investment(**kwargs, interest=returns)
elif _type == 'investment':
return Investment(**kwargs, interest=returns)
else:
raise ConfigError("Type %s doesn't exist" % _type)
| bsd-3-clause |
mohsinjuni/androguard | elsim/elsim/elsim.py | 37 | 16175 | # This file is part of Elsim
#
# Copyright (C) 2012, Anthony Desnos <desnos at t0t0.fr>
# All rights reserved.
#
# Elsim is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Elsim is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with Elsim. If not, see <http://www.gnu.org/licenses/>.
import logging
ELSIM_VERSION = 0.2
log_elsim = logging.getLogger("elsim")
console_handler = logging.StreamHandler()
console_handler.setFormatter(logging.Formatter("%(levelname)s: %(message)s"))
log_elsim.addHandler(console_handler)
log_runtime = logging.getLogger("elsim.runtime") # logs at runtime
log_interactive = logging.getLogger("elsim.interactive") # logs in interactive functions
log_loading = logging.getLogger("elsim.loading") # logs when loading
def set_debug():
log_elsim.setLevel( logging.DEBUG )
def get_debug():
return log_elsim.getEffectiveLevel() == logging.DEBUG
def warning(x):
log_runtime.warning(x)
def error(x):
log_runtime.error(x)
raise()
def debug(x):
log_runtime.debug(x)
from similarity.similarity import *
FILTER_ELEMENT_METH = "FILTER_ELEMENT_METH"
FILTER_CHECKSUM_METH = "FILTER_CHECKSUM_METH" # function to checksum an element
FILTER_SIM_METH = "FILTER_SIM_METH" # function to calculate the similarity between two elements
FILTER_SORT_METH = "FILTER_SORT_METH" # function to sort all similar elements
FILTER_SORT_VALUE = "FILTER_SORT_VALUE" # value which used in the sort method to eliminate not interesting comparisons
FILTER_SKIPPED_METH = "FILTER_SKIPPED_METH" # object to skip elements
FILTER_SIM_VALUE_METH = "FILTER_SIM_VALUE_METH" # function to modify values of the similarity
BASE = "base"
ELEMENTS = "elements"
HASHSUM = "hashsum"
SIMILAR_ELEMENTS = "similar_elements"
HASHSUM_SIMILAR_ELEMENTS = "hash_similar_elements"
NEW_ELEMENTS = "newelements"
HASHSUM_NEW_ELEMENTS = "hash_new_elements"
DELETED_ELEMENTS = "deletedelements"
IDENTICAL_ELEMENTS = "identicalelements"
INTERNAL_IDENTICAL_ELEMENTS = "internal identical elements"
SKIPPED_ELEMENTS = "skippedelements"
SIMILARITY_ELEMENTS = "similarity_elements"
SIMILARITY_SORT_ELEMENTS = "similarity_sort_elements"
class ElsimNeighbors(object):
def __init__(self, x, ys):
import numpy as np
from sklearn.neighbors import NearestNeighbors
#print x, ys
CI = np.array( [x.checksum.get_signature_entropy(), x.checksum.get_entropy()] )
#print CI, x.get_info()
#print
for i in ys:
CI = np.vstack( (CI, [i.checksum.get_signature_entropy(), i.checksum.get_entropy()]) )
#idx = 0
#for i in np.array(CI)[1:]:
# print idx+1, i, ys[idx].get_info()
# idx += 1
self.neigh = NearestNeighbors(2, 0.4)
self.neigh.fit(np.array(CI))
#print self.neigh.kneighbors( CI[0], len(CI) )
self.CI = CI
self.ys = ys
def cmp_elements(self):
z = self.neigh.kneighbors( self.CI[0], 5 )
l = []
cmp_values = z[0][0]
cmp_elements = z[1][0]
idx = 1
for i in cmp_elements[1:]:
#if cmp_values[idx] > 1.0:
# break
#print i, cmp_values[idx], self.ys[ i - 1 ].get_info()
l.append( self.ys[ i - 1 ] )
idx += 1
return l
def split_elements(el, els):
e1 = {}
for i in els:
e1[ i ] = el.get_associated_element( i )
return e1
####
# elements : entropy raw, hash, signature
#
# set elements : hash
# hash table elements : hash --> element
class Elsim(object):
def __init__(self, e1, e2, F, T=None, C=None, libnative=True, libpath="elsim/elsim/similarity/libsimilarity/libsimilarity.so"):
self.e1 = e1
self.e2 = e2
self.F = F
self.compressor = SNAPPY_COMPRESS
set_debug()
if T != None:
self.F[ FILTER_SORT_VALUE ] = T
if isinstance(libnative, str):
libpath = libnative
libnative = True
self.sim = SIMILARITY( libpath, libnative )
if C != None:
if C in H_COMPRESSOR:
self.compressor = H_COMPRESSOR[ C ]
self.sim.set_compress_type( self.compressor )
else:
self.sim.set_compress_type( self.compressor )
self.filters = {}
self._init_filters()
self._init_index_elements()
self._init_similarity()
self._init_sort_elements()
self._init_new_elements()
def _init_filters(self):
self.filters = {}
self.filters[ BASE ] = {}
self.filters[ BASE ].update( self.F )
self.filters[ ELEMENTS ] = {}
self.filters[ HASHSUM ] = {}
self.filters[ IDENTICAL_ELEMENTS ] = set()
self.filters[ SIMILAR_ELEMENTS ] = []
self.filters[ HASHSUM_SIMILAR_ELEMENTS ] = []
self.filters[ NEW_ELEMENTS ] = set()
self.filters[ HASHSUM_NEW_ELEMENTS ] = []
self.filters[ DELETED_ELEMENTS ] = []
self.filters[ SKIPPED_ELEMENTS ] = []
self.filters[ ELEMENTS ][ self.e1 ] = []
self.filters[ HASHSUM ][ self.e1 ] = []
self.filters[ ELEMENTS ][ self.e2 ] = []
self.filters[ HASHSUM ][ self.e2 ] = []
self.filters[ SIMILARITY_ELEMENTS ] = {}
self.filters[ SIMILARITY_SORT_ELEMENTS ] = {}
self.set_els = {}
self.ref_set_els = {}
self.ref_set_ident = {}
def _init_index_elements(self):
self.__init_index_elements( self.e1, 1 )
self.__init_index_elements( self.e2 )
def __init_index_elements(self, ce, init=0):
self.set_els[ ce ] = set()
self.ref_set_els[ ce ] = {}
self.ref_set_ident[ce] = {}
for ae in ce.get_elements():
e = self.filters[BASE][FILTER_ELEMENT_METH]( ae, ce )
if self.filters[BASE][FILTER_SKIPPED_METH].skip( e ):
self.filters[ SKIPPED_ELEMENTS ].append( e )
continue
self.filters[ ELEMENTS ][ ce ].append( e )
fm = self.filters[ BASE ][ FILTER_CHECKSUM_METH ]( e, self.sim )
e.set_checksum( fm )
sha256 = e.getsha256()
self.filters[ HASHSUM ][ ce ].append( sha256 )
if sha256 not in self.set_els[ ce ]:
self.set_els[ ce ].add( sha256 )
self.ref_set_els[ ce ][ sha256 ] = e
self.ref_set_ident[ce][sha256] = []
self.ref_set_ident[ce][sha256].append(e)
def _init_similarity(self):
intersection_elements = self.set_els[ self.e2 ].intersection( self.set_els[ self.e1 ] )
difference_elements = self.set_els[ self.e2 ].difference( intersection_elements )
self.filters[IDENTICAL_ELEMENTS].update([ self.ref_set_els[ self.e1 ][ i ] for i in intersection_elements ])
available_e2_elements = [ self.ref_set_els[ self.e2 ][ i ] for i in difference_elements ]
# Check if some elements in the first file has been modified
for j in self.filters[ELEMENTS][self.e1]:
self.filters[ SIMILARITY_ELEMENTS ][ j ] = {}
#debug("SIM FOR %s" % (j.get_info()))
if j.getsha256() not in self.filters[HASHSUM][self.e2]:
#eln = ElsimNeighbors( j, available_e2_elements )
#for k in eln.cmp_elements():
for k in available_e2_elements:
#debug("%s" % k.get_info())
self.filters[SIMILARITY_ELEMENTS][ j ][ k ] = self.filters[BASE][FILTER_SIM_METH]( self.sim, j, k )
if j.getsha256() not in self.filters[HASHSUM_SIMILAR_ELEMENTS]:
self.filters[SIMILAR_ELEMENTS].append(j)
self.filters[HASHSUM_SIMILAR_ELEMENTS].append( j.getsha256() )
def _init_sort_elements(self):
deleted_elements = []
for j in self.filters[SIMILAR_ELEMENTS]:
#debug("SORT FOR %s" % (j.get_info()))
sort_h = self.filters[BASE][FILTER_SORT_METH]( j, self.filters[SIMILARITY_ELEMENTS][ j ], self.filters[BASE][FILTER_SORT_VALUE] )
self.filters[SIMILARITY_SORT_ELEMENTS][ j ] = set( i[0] for i in sort_h )
ret = True
if sort_h == []:
ret = False
if ret == False:
deleted_elements.append( j )
for j in deleted_elements:
self.filters[ DELETED_ELEMENTS ].append( j )
self.filters[ SIMILAR_ELEMENTS ].remove( j )
def __checksort(self, x, y):
return y in self.filters[SIMILARITY_SORT_ELEMENTS][ x ]
def _init_new_elements(self):
# Check if some elements in the second file are totally new !
for j in self.filters[ELEMENTS][self.e2]:
# new elements can't be in similar elements
if j not in self.filters[SIMILAR_ELEMENTS]:
# new elements hashes can't be in first file
if j.getsha256() not in self.filters[HASHSUM][self.e1]:
ok = True
# new elements can't be compared to another one
for diff_element in self.filters[SIMILAR_ELEMENTS]:
if self.__checksort( diff_element, j ):
ok = False
break
if ok:
if j.getsha256() not in self.filters[HASHSUM_NEW_ELEMENTS]:
self.filters[NEW_ELEMENTS].add( j )
self.filters[HASHSUM_NEW_ELEMENTS].append( j.getsha256() )
def get_similar_elements(self):
""" Return the similar elements
@rtype : a list of elements
"""
return self.get_elem( SIMILAR_ELEMENTS )
def get_new_elements(self):
""" Return the new elements
@rtype : a list of elements
"""
return self.get_elem( NEW_ELEMENTS )
def get_deleted_elements(self):
""" Return the deleted elements
@rtype : a list of elements
"""
return self.get_elem( DELETED_ELEMENTS )
def get_internal_identical_elements(self, ce):
""" Return the internal identical elements
@rtype : a list of elements
"""
return self.get_elem( INTERNAL_IDENTICAL_ELEMENTS )
def get_identical_elements(self):
""" Return the identical elements
@rtype : a list of elements
"""
return self.get_elem( IDENTICAL_ELEMENTS )
def get_skipped_elements(self):
return self.get_elem( SKIPPED_ELEMENTS )
def get_elem(self, attr):
return [ x for x in self.filters[attr] ]
def show_element(self, i, details=True):
print "\t", i.get_info()
if details:
if i.getsha256() == None:
pass
elif i.getsha256() in self.ref_set_els[self.e2]:
if len(self.ref_set_ident[self.e2][i.getsha256()]) > 1:
for ident in self.ref_set_ident[self.e2][i.getsha256()]:
print "\t\t-->", ident.get_info()
else:
print "\t\t-->", self.ref_set_els[self.e2][ i.getsha256() ].get_info()
else:
for j in self.filters[ SIMILARITY_SORT_ELEMENTS ][ i ]:
print "\t\t-->", j.get_info(), self.filters[ SIMILARITY_ELEMENTS ][ i ][ j ]
def get_element_info(self, i):
l = []
if i.getsha256() == None:
pass
elif i.getsha256() in self.ref_set_els[self.e2]:
l.append( [ i, self.ref_set_els[self.e2][ i.getsha256() ] ] )
else:
for j in self.filters[ SIMILARITY_SORT_ELEMENTS ][ i ]:
l.append( [i, j, self.filters[ SIMILARITY_ELEMENTS ][ i ][ j ] ] )
return l
def get_associated_element(self, i):
return list(self.filters[ SIMILARITY_SORT_ELEMENTS ][ i ])[0]
def get_similarity_value(self, new=True):
values = []
self.sim.set_compress_type( BZ2_COMPRESS )
for j in self.filters[SIMILAR_ELEMENTS]:
k = self.get_associated_element( j )
value = self.filters[BASE][FILTER_SIM_METH]( self.sim, j, k )
# filter value
value = self.filters[BASE][FILTER_SIM_VALUE_METH]( value )
values.append( value )
values.extend( [ self.filters[BASE][FILTER_SIM_VALUE_METH]( 0.0 ) for i in self.filters[IDENTICAL_ELEMENTS] ] )
if new == True:
values.extend( [ self.filters[BASE][FILTER_SIM_VALUE_METH]( 1.0 ) for i in self.filters[NEW_ELEMENTS] ] )
else:
values.extend( [ self.filters[BASE][FILTER_SIM_VALUE_METH]( 1.0 ) for i in self.filters[DELETED_ELEMENTS] ] )
self.sim.set_compress_type( self.compressor )
similarity_value = 0.0
for i in values:
similarity_value += (1.0 - i)
if len(values) == 0:
return 0.0
return (similarity_value/len(values)) * 100
def show(self):
print "Elements:"
print "\t IDENTICAL:\t", len(self.get_identical_elements())
print "\t SIMILAR: \t", len(self.get_similar_elements())
print "\t NEW:\t\t", len(self.get_new_elements())
print "\t DELETED:\t", len(self.get_deleted_elements())
print "\t SKIPPED:\t", len(self.get_skipped_elements())
#self.sim.show()
ADDED_ELEMENTS = "added elements"
DELETED_ELEMENTS = "deleted elements"
LINK_ELEMENTS = "link elements"
DIFF = "diff"
class Eldiff(object):
def __init__(self, elsim, F):
self.elsim = elsim
self.F = F
self._init_filters()
self._init_diff()
def _init_filters(self):
self.filters = {}
self.filters[ BASE ] = {}
self.filters[ BASE ].update( self.F )
self.filters[ ELEMENTS ] = {}
self.filters[ ADDED_ELEMENTS ] = {}
self.filters[ DELETED_ELEMENTS ] = {}
self.filters[ LINK_ELEMENTS ] = {}
def _init_diff(self):
for i, j in self.elsim.get_elements():
self.filters[ ADDED_ELEMENTS ][ j ] = []
self.filters[ DELETED_ELEMENTS ][ i ] = []
x = self.filters[ BASE ][ DIFF ]( i, j )
self.filters[ ADDED_ELEMENTS ][ j ].extend( x.get_added_elements() )
self.filters[ DELETED_ELEMENTS ][ i ].extend( x.get_deleted_elements() )
self.filters[ LINK_ELEMENTS ][ j ] = i
#self.filters[ LINK_ELEMENTS ][ i ] = j
def show(self):
for bb in self.filters[ LINK_ELEMENTS ] : #print "la"
print bb.get_info(), self.filters[ LINK_ELEMENTS ][ bb ].get_info()
print "Added Elements(%d)" % (len(self.filters[ ADDED_ELEMENTS ][ bb ]))
for i in self.filters[ ADDED_ELEMENTS ][ bb ]:
print "\t",
i.show()
print "Deleted Elements(%d)" % (len(self.filters[ DELETED_ELEMENTS ][ self.filters[ LINK_ELEMENTS ][ bb ] ]))
for i in self.filters[ DELETED_ELEMENTS ][ self.filters[ LINK_ELEMENTS ][ bb ] ]:
print "\t",
i.show()
print
def get_added_elements(self):
return self.filters[ ADDED_ELEMENTS ]
def get_deleted_elements(self):
return self.filters[ DELETED_ELEMENTS ]
| apache-2.0 |
scikit-multilearn/scikit-multilearn | skmultilearn/ensemble/tests/test_rakeld.py | 1 | 1966 | import unittest
from sklearn.naive_bayes import GaussianNB
from sklearn.svm import SVC
from skmultilearn.ensemble import RakelD
from skmultilearn.tests.classifier_basetest import ClassifierBaseTest
TEST_LABELSET_SIZE = 3
class RakelDTest(ClassifierBaseTest):
def get_rakeld_with_svc(self):
return RakelD(
base_classifier=SVC(probability=True),
base_classifier_require_dense=[False, True],
labelset_size=TEST_LABELSET_SIZE
)
def get_rakeld_with_nb(self):
return RakelD(
base_classifier=GaussianNB(),
base_classifier_require_dense=[True, True],
labelset_size=TEST_LABELSET_SIZE
)
def test_if_sparse_classification_works_on_non_dense_base_classifier(self):
classifier = self.get_rakeld_with_svc()
self.assertClassifierWorksWithSparsity(classifier, 'sparse')
self.assertClassifierPredictsProbabilities(classifier, 'sparse')
def test_if_dense_classification_works_on_non_dense_base_classifier(self):
classifier = self.get_rakeld_with_svc()
self.assertClassifierWorksWithSparsity(classifier, 'dense')
self.assertClassifierPredictsProbabilities(classifier, 'dense')
def test_if_sparse_classification_works_on_dense_base_classifier(self):
classifier = self.get_rakeld_with_nb()
self.assertClassifierWorksWithSparsity(classifier, 'sparse')
self.assertClassifierPredictsProbabilities(classifier, 'sparse')
def test_if_dense_classification_works_on_dense_base_classifier(self):
classifier = self.get_rakeld_with_nb()
self.assertClassifierWorksWithSparsity(classifier, 'dense')
self.assertClassifierPredictsProbabilities(classifier, 'dense')
def test_if_works_with_cross_validation(self):
classifier = self.get_rakeld_with_nb()
self.assertClassifierWorksWithCV(classifier)
if __name__ == '__main__':
unittest.main()
| bsd-2-clause |
PythonBootCampIAG-USP/NASA_PBC2015 | Day_01/05_Basemap/bcbm1.py | 2 | 10253 | #!/usr/bin/env python
# Purpose : Python Boot Camp - Basemap Teaching Program 1.
# Ensure that environment variable PYTHONUNBUFFERED=yes
# This allows STDOUT and STDERR to both be logged in chronological order
import sys # platform, args, run tools
import os # platform, args, run tools
import argparse # For parsing command line
import datetime # For date/time processing
import numpy as np
import h5py
import matplotlib as mpl
mpl.use('Agg', warn=False)
import matplotlib.pyplot as plt
from matplotlib.pyplot import figure, show, subplots
from mpl_toolkits.basemap import Basemap
from mpl_toolkits.basemap import cm as bm_cm
import matplotlib.cm as mpl_cm
#########################################################################
# Command Line Parameters Class
#########################################################################
class Bcbm1CP():
def bcbm1_cp(self, bcbm1_cmd_line):
description = ("Python Boot Camp - Basemap Teaching Program 1")
parser = argparse.ArgumentParser(description=description)
help_text = ("Display processing messages to STDOUT " +
"(DEFAULT=NO)")
parser.add_argument("-v", "--verbose",
default=False,
help=help_text,
action="store_true",
dest="verbose")
help_text = ("Run program in test mode " +
"(DEFAULT=NO)")
parser.add_argument("-t", "--test_mode",
default=False,
help=help_text,
action="store_true",
dest="test_mode")
self.args = parser.parse_args(bcbm1_cmd_line)
if (self.args.verbose):
sys.stdout.write("BCBM1 : bcbm1_cmd_line = " + str(bcbm1_cmd_line) + "\n")
# Return
return(0)
#########################################################################
# Main Program
#########################################################################
class Bcbm1():
def bcbm1(self, bcbm1_cmd_line):
# Start time
self.start_time = datetime.datetime.today()
# Parse input parameters from cmd line
bcbm1_cp1 = Bcbm1CP()
bcbm1_cp1_ret = bcbm1_cp1.bcbm1_cp(bcbm1_cmd_line)
self.bcbm1_cmd_line = bcbm1_cmd_line
if (len(self.bcbm1_cmd_line) == 0):
self.bcbm1_cmd_line = " "
if (bcbm1_cp1_ret):
return(bcbm1_cp1_ret)
self.verbose = bcbm1_cp1.args.verbose
self.test_mode = bcbm1_cp1.args.test_mode
if (self.test_mode):
self.timestamp = "Test Mode Date/Time Stamp"
if (self.verbose):
sys.stdout.write("BCBM1 : Running in test mode\n")
sys.stdout.write("BCBM1 : sys.version = " + str(sys.version) + "\n")
else:
self.timestamp = datetime.datetime.today().strftime("%Y-%m-%d %H:%M:%S")
if (self.verbose):
sys.stdout.write("BCBM1 : Program started : " + str(self.start_time) + "\n")
sys.stdout.write("BCBM1 : sys.version = " + str(sys.version) + "\n")
if (self.verbose):
sys.stdout.write("BCBM1 : sys.version = " + str(sys.version) + "\n")
sys.stdout.write("BCBM1 : self.verbose = " + str(self.verbose) + "\n")
sys.stdout.write("BCBM1 : self.test_mode = " + str(self.test_mode) + "\n")
# Call functions
bcbm1_f11_ret = self.display_map1()
if (bcbm1_f11_ret):
return(bcbm1_f11_ret)
#bcbm1_f21_ret = self.display_map2()
#if (bcbm1_f21_ret):
# return(bcbm1_f21_ret)
#bcbm1_f31_ret = self.display_map3()
#if (bcbm1_f31_ret):
# return(bcbm1_f31_ret)
# End program
self.end_time = datetime.datetime.today()
self.run_time = self.end_time - self.start_time
if (self.verbose):
if (self.test_mode):
pass
else:
sys.stdout.write("BCBM1 : Program ended : " + str(self.end_time) + "\n")
sys.stdout.write("BCBM1 : Run time : " + str(self.run_time) + "\n")
if (self.verbose):
sys.stdout.write("BCBM1 : Program completed normally\n")
return(0)
# Define functions
#------------------------------------------------------------------------------
def display_map1(self):
if (self.verbose):
sys.stdout.write("BCBM1 : display_map1 ACTIVATED\n")
# Set up figure in Matplotlib
self.current_figure = mpl.pyplot.figure(1, figsize=(14.0, 10.0))
self.current_figure.suptitle("Basemap - First Map\n" +
self.timestamp)
self.current_figure.text(0.05, 0.95, "Mollweide Projection")
self.current_figure.subplots_adjust(left=0.05,
right=0.95,
top=0.80,
bottom=0.05,
wspace=0.2,
hspace=0.4)
self.current_plot = self.current_figure.add_subplot(1, 1, 1)
# Plot figure
self.map = Basemap(projection='moll',
lon_0=0,
#lat_0=0,
resolution='c')
#self.map.drawmapboundary(fill_color='aqua')
#self.map.fillcontinents(color='coral',lake_color='aqua')
self.map.drawcoastlines()
#self.map.drawcountries()
#self.map.drawrivers()
#self.map.drawstates()
self.map.drawparallels(np.arange( -90.0, 90.0, 20.0))
self.map.drawmeridians(np.arange(-180.0, 181.0, 20.0))
# Write the output to a graphic file
self.current_figure.savefig("bcbm1_plot1")
mpl.pyplot.close(self.current_figure)
return(0)
#------------------------------------------------------------------------------
def display_map2(self):
if (self.verbose):
sys.stdout.write("BCBM1 : display_map2 ACTIVATED\n")
# Set up figure in Matplotlib
self.current_figure = mpl.pyplot.figure(1, figsize=(14.0, 10.0))
self.current_figure.suptitle("Basemap - Second Map\n" +
self.timestamp)
self.current_figure.text(0.05, 0.95, "Robinson Projection - Blue Marble")
self.current_figure.subplots_adjust(left=0.05,
right=0.95,
top=0.80,
bottom=0.05,
wspace=0.2,
hspace=0.4)
self.current_plot = self.current_figure.add_subplot(1, 1, 1)
# Plot figure
self.map = Basemap(projection='robin',
lon_0=0,
lat_0=0,
resolution='c')
#self.map.drawcoastlines()
#self.map.drawcountries()
#self.map.drawrivers()
#self.map.drawstates()
self.map.drawparallels(np.arange( -90.0, 90.0, 20.0))
self.map.drawmeridians(np.arange(-180.0, 181.0, 20.0))
self.map.bluemarble() # Known bug here - may appear upside down
# Write the output to a graphic file
self.current_figure.savefig("bcbm1_plot2")
mpl.pyplot.close(self.current_figure)
return(0)
#------------------------------------------------------------------------------
def display_map3(self):
if (self.verbose):
sys.stdout.write("BCBM1 : display_map3 ACTIVATED\n")
# Set up figure in Matplotlib
self.current_figure = mpl.pyplot.figure(1, figsize=(14.0, 10.0))
self.current_figure.suptitle("Basemap - Third Map\n" +
self.timestamp)
self.current_figure.text(0.05, 0.95, "Near-Sided Perspective Projection - Different Colours")
self.current_figure.subplots_adjust(left=0.05,
right=0.95,
top=0.80,
bottom=0.05,
wspace=0.2,
hspace=0.4)
self.current_plot = self.current_figure.add_subplot(1, 1, 1)
# Plot figure
self.map = Basemap(projection='nsper',
lon_0=0,
lat_0=0,
resolution='c')
#self.map.drawmapboundary(fill_color='#7777ff')
#self.map.fillcontinents(color='#ddaa66',lake_color='#7777ff')
self.map.drawlsmask(land_color = "#ddaa66",
ocean_color="#7777ff")
#self.map.drawcoastlines()
#self.map.drawcountries()
#self.map.drawrivers()
#self.map.drawstates()
self.map.drawparallels(np.arange( -90.0, 90.0, 20.0))
self.map.drawmeridians(np.arange(-180.0, 181.0, 20.0))
# Display day and night shading
#self.date = datetime.datetime.utcnow()
#self.map_nightshade = self.map.nightshade(self.date)
# Write the output to a graphic file
self.current_figure.savefig("bcbm1_plot3")
mpl.pyplot.close(self.current_figure)
return(0)
#------------------------------------------------------------------------------
####################################################
def main(argv=None): # When run as a script
if argv is None:
bcbm1_cmd_line = sys.argv[1:]
bcbm1 = Bcbm1()
bcbm1_ret = bcbm1.bcbm1(bcbm1_cmd_line)
if __name__ == '__main__':
sys.exit(main())
| mit |
farodin91/servo | tests/heartbeats/process_logs.py | 139 | 16143 | #!/usr/bin/env python
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
import argparse
import matplotlib.pyplot as plt
import numpy as np
import os
from os import path
import sys
import warnings
HB_LOG_IDX_START_TIME = 7
HB_LOG_IDX_END_TIME = HB_LOG_IDX_START_TIME + 1
HB_LOG_IDX_START_ENERGY = 14
HB_LOG_IDX_END_ENERGY = HB_LOG_IDX_START_ENERGY + 1
ENERGY_PROFILER_NAME = 'ApplicationHeartbeat'
SUMMARY_OUTPUT = "summary.txt"
SUMMARY_TIME_IDX = 8
SUMMARY_ENERGY_IDX = SUMMARY_TIME_IDX + 1
SUMMARY_POWER_IDX = SUMMARY_ENERGY_IDX + 1
def autolabel(rects, ax):
"""Attach some text labels.
"""
for rect in rects:
ax.text(rect.get_x() + rect.get_width() / 2., 1.05 * rect.get_height(), '', ha='center', va='bottom')
def plot_raw_totals(config, plot_data, max_time, max_time_std, max_energy, max_energy_std, output_dir, normalize):
"""Plot the raw totals for a configuration.
Keyword arguments:
config -- configuration name
plot_data -- (profiler name, total_time, total_time_std, total_energy, total_energy_std)
max_time, max_time_std, max_energy, max_energy_std -- single values
normalize -- True/False
"""
plot_data = sorted(plot_data)
keys = [p for (p, tt, tts, te, tes) in plot_data]
total_times = [tt for (p, tt, tts, te, tes) in plot_data]
total_times_std = [tts for (p, tt, tts, te, tes) in plot_data]
total_energies = [te for (p, tt, tts, te, tes) in plot_data]
total_energies_std = [tes for (p, tt, tts, te, tes) in plot_data]
fig, ax1 = plt.subplots()
ind = np.arange(len(keys)) # the x locations for the groups
width = 0.35 # the width of the bars
# add some text for labels, title and axes ticks
ax1.set_title('Time/Energy Data for Configuration ' + config)
ax1.set_xticks(ind + width)
ax1.set_xticklabels(keys, rotation=45)
fig.set_tight_layout(True)
fig.set_size_inches(len(plot_data) / 1.5, 8)
ax2 = ax1.twinx()
# Normalize
if normalize:
total_times_std /= np.sum(total_times)
total_times /= np.sum(total_times)
total_energies_std /= np.sum(total_energies)
total_energies /= np.sum(total_energies)
ax1.set_ylabel('Time (Normalized)')
ax2.set_ylabel('Energy (Normalized)')
else:
# set time in us instead of ns
total_times_std /= np.array(1000000.0)
total_times /= np.array(1000000.0)
total_energies_std /= np.array(1000000.0)
total_energies /= np.array(1000000.0)
ax1.set_ylabel('Time (ms)')
ax2.set_ylabel('Energy (Joules)')
rects1 = ax1.bar(ind, total_times, width, color='r', yerr=total_times_std)
rects2 = ax2.bar(ind + width, total_energies, width, color='y', yerr=total_energies_std)
ax1.legend([rects1[0], rects2[0]], ['Time', 'Energy'])
# set axis
x1, x2, y1, y2 = plt.axis()
if normalize:
ax1.set_ylim(ymin=0, ymax=1)
ax2.set_ylim(ymin=0, ymax=1)
else:
ax1.set_ylim(ymin=0, ymax=((max_time + max_time_std) * 1.25 / 1000000.0))
ax2.set_ylim(ymin=0, ymax=((max_energy + max_energy_std) * 1.25 / 1000000.0))
autolabel(rects1, ax1)
autolabel(rects2, ax2)
# plt.show()
plt.savefig(path.join(output_dir, config + ".png"))
plt.close(fig)
def create_raw_total_data(config_data):
"""Get the raw data to plot for a configuration
Return: [(profiler, time_mean, time_stddev, energy_mean, energy_stddev)]
Keyword arguments:
config_data -- (trial, trial_data)
"""
# We can't assume that the same number of heartbeats are always issued across trials
# key: profiler name; value: list of timing sums for each trial
profiler_total_times = {}
# key: profiler name; value: list of energy sums for each trial
profiler_total_energies = {}
for (t, td) in config_data:
for (profiler, ts, te, es, ee) in td:
# sum the total times and energies for each profiler in this trial
total_time = np.sum(te - ts)
total_energy = np.sum(ee - es)
# add to list to be averaged later
time_list = profiler_total_times.get(profiler, [])
time_list.append(total_time)
profiler_total_times[profiler] = time_list
energy_list = profiler_total_energies.get(profiler, [])
energy_list.append(total_energy)
profiler_total_energies[profiler] = energy_list
# Get mean and stddev for time and energy totals
return [(profiler,
np.mean(profiler_total_times[profiler]),
np.std(profiler_total_times[profiler]),
np.mean(profiler_total_energies[profiler]),
np.std(profiler_total_energies[profiler]))
for profiler in profiler_total_times.keys()]
def plot_all_raw_totals(config_list, output_dir):
"""Plot column charts of the raw total time/energy spent in each profiler category.
Keyword arguments:
config_list -- [(config, result of process_config_dir(...))]
output_dir -- where to write plots to
"""
raw_total_norm_out_dir = path.join(output_dir, 'raw_totals_normalized')
os.makedirs(raw_total_norm_out_dir)
raw_total_out_dir = path.join(output_dir, 'raw_totals')
os.makedirs(raw_total_out_dir)
# (name, (profiler, (time_mean, time_stddev, energy_mean, energy_stddev)))
raw_totals_data = [(config, create_raw_total_data(config_data)) for (config, config_data) in config_list]
mean_times = []
mean_times_std = []
mean_energies = []
mean_energies_std = []
for profiler_tup in [config_tup[1] for config_tup in raw_totals_data]:
for (p, tt, tts, te, tes) in profiler_tup:
mean_times.append(tt)
mean_times_std.append(tts)
mean_energies.append(te)
mean_energies_std.append(tes)
# get consistent max time/energy values across plots
max_t = np.max(mean_times)
max_t_std = np.max(mean_times_std)
max_e = np.max(mean_energies)
max_e_std = np.max(mean_energies_std)
[plot_raw_totals(data[0], data[1], max_t, max_t_std, max_e, max_e_std, raw_total_norm_out_dir, True)
for data in raw_totals_data]
[plot_raw_totals(data[0], data[1], max_t, max_t_std, max_e, max_e_std, raw_total_out_dir, False)
for data in raw_totals_data]
def plot_trial_time_series(config, trial, trial_data, max_end_time, max_power, output_dir):
"""Plot time series for a single trial.
Keyword arguments:
config -- the config name
trial -- the trial name
trial_data -- [(profiler, [start times], [end times], [start energies], [end energies])]
max_end_time -- single value to use as max X axis value (for consistency across trials)
output_dir -- the output directory
"""
# TODO: Some profilers may have parallel tasks - need to identify this on plots
max_end_time = max_end_time / 1000000.0
trial_data = sorted(trial_data)
fig, ax1 = plt.subplots()
keys = [p for (p, ts, te, es, ee) in trial_data]
# add some text for labels, title and axes ticks
ax1.set_title('Profiler Activity for ' + config + ', ' + trial)
ax1.set_xlabel('Time (ms)')
ax1.grid(True)
width = 8 # the width of the bars
ax1.set_yticks(10 * np.arange(1, len(keys) + 2))
ax1.set_yticklabels(keys)
ax1.set_ylim(ymin=0, ymax=((len(trial_data) + 1) * 10))
ax1.set_xlim(xmin=0, xmax=max_end_time)
fig.set_tight_layout(True)
fig.set_size_inches(16, len(trial_data) / 3)
i = 10
for (p, ts, te, es, ee) in trial_data:
xranges = [(ts[j] / 1000000.0, (te[j] - ts[j]) / 1000000.0) for j in xrange(len(ts))]
ax1.broken_barh(xranges, (i - 0.5 * width, width))
i += 10
# place a vbar at the final time for this trial
last_profiler_times = map(np.nanmax, filter(lambda x: len(x) > 0, [te for (p, ts, te, es, ee) in trial_data]))
plt.axvline(np.max(last_profiler_times) / 1000000.0, color='black')
power_times = []
power_values = []
for (p, ts, te, es, ee) in trial_data:
if p == ENERGY_PROFILER_NAME:
power_times = te / 1000000.0
power_values = (ee - es) / ((te - ts) / 1000.0)
ax2 = ax1.twinx()
ax2.set_xlim(xmin=0, xmax=max_end_time)
ax2.set_ylim(ymin=0, ymax=max_power)
ax2.set_ylabel('Power (Watts)')
ax2.plot(power_times, power_values, color='r')
# plt.show()
plt.savefig(path.join(output_dir, "ts_" + config + "_" + trial + ".png"))
plt.close(fig)
def hb_energy_times_to_power(es, ee, ts, te):
"""Compute power from start and end energy and times.
Return: power values
"""
return (ee - es) / ((te - ts) / 1000.0)
def plot_all_time_series(config_list, output_dir):
"""Plot column charts of the raw total time/energy spent in each profiler category.
Keyword arguments:
config_list -- [(config, result of process_config_dir(...))]
output_dir -- where to write plots to
"""
time_series_out_dir = path.join(output_dir, 'time_series')
os.makedirs(time_series_out_dir)
max_end_times = []
max_power_values = []
for (c, cd) in config_list:
for (t, td) in cd:
trial_max_end_times = map(np.nanmax, filter(lambda x: len(x) > 0, [te for (p, ts, te, es, ee) in td]))
max_end_times.append(np.nanmax(trial_max_end_times))
for (p, ts, te, es, ee) in td:
# We only care about the energy profiler (others aren't reliable for instant power anyway)
if p == ENERGY_PROFILER_NAME and len(te) > 0:
max_power_values.append(np.nanmax(hb_energy_times_to_power(es, ee, ts, te)))
max_time = np.nanmax(max_end_times)
max_power = np.nanmax(np.array(max_power_values)) * 1.2 # leave a little space at the top
for (config, config_data) in config_list:
[plot_trial_time_series(config, trial, trial_data, max_time, max_power, time_series_out_dir)
for (trial, trial_data) in config_data]
def read_heartbeat_log(profiler_hb_log):
"""Read a heartbeat log file.
Return: (profiler name, [start times], [end times], [start energies], [end energies], [instant powers])
Keyword arguments:
profiler_hb_log -- the file to read
"""
with warnings.catch_warnings():
try:
warnings.simplefilter("ignore")
time_start, time_end, energy_start, energy_end = \
np.loadtxt(profiler_hb_log,
dtype=np.dtype('uint64'),
skiprows=1,
usecols=(HB_LOG_IDX_START_TIME,
HB_LOG_IDX_END_TIME,
HB_LOG_IDX_START_ENERGY,
HB_LOG_IDX_END_ENERGY),
unpack=True,
ndmin=1)
except ValueError:
time_start, time_end, energy_start, energy_end = [], [], [], []
name = path.split(profiler_hb_log)[1].split('-')[1].split('.')[0]
return (name,
np.atleast_1d(time_start),
np.atleast_1d(time_end),
np.atleast_1d(energy_start),
np.atleast_1d(energy_end))
def process_trial_dir(trial_dir):
"""Process trial directory.
Return: [(profiler name, [start times], [end times], [start energies], [end energies])]
Time and energy are normalized to 0 start values.
Keyword arguments:
trial_dir -- the directory for this trial
"""
log_data = map(lambda h: read_heartbeat_log(path.join(trial_dir, h)),
filter(lambda f: f.endswith(".log"), os.listdir(trial_dir)))
# Find the earliest timestamps and energy readings
min_t = np.nanmin(map(np.nanmin, filter(lambda x: len(x) > 0, [ts for (profiler, ts, te, es, ee) in log_data])))
min_e = np.nanmin(map(np.nanmin, filter(lambda x: len(x) > 0, [es for (profiler, ts, te, es, ee) in log_data])))
# Normalize timing/energy data to start values of 0
return [(profiler, ts - min_t, te - min_t, es - min_e, ee - min_e) for (profiler, ts, te, es, ee) in log_data]
def process_config_dir(config_dir):
"""Process a configuration directory.
Return: [(trial, [(profiler name, [start times], [end times], [start energies], [end energies])])]
Keyword arguments:
config_dir -- the directory for this configuration - contains subdirectories for each trial
"""
return [(trial_dir, process_trial_dir(path.join(config_dir, trial_dir))) for trial_dir in os.listdir(config_dir)]
def process_logs(log_dir):
"""Process log directory.
Return: [(config, [(trial, [(profiler name, [start times], [end times], [start energies], [end energies])])])]
Keyword arguments:
log_dir -- the log directory to process - contains subdirectories for each configuration
"""
return [((config_dir.split('_')[1], process_config_dir(path.join(log_dir, config_dir))))
for config_dir in os.listdir(log_dir)]
def find_best_executions(log_dir):
"""Get the best time, energy, and power from the characterization summaries.
Return: ((config, trial, min_time), (config, trial, min_energy), (config, trial, min_power))
Keyword arguments:
results -- the results from process_logs(...).
"""
DEFAULT = ('', '', 1000000000.0)
min_time = DEFAULT
min_energy = DEFAULT
min_power = DEFAULT
for config_dir in os.listdir(log_dir):
for trial_dir in os.listdir(path.join(log_dir, config_dir)):
with open(path.join(log_dir, config_dir, trial_dir, SUMMARY_OUTPUT), "r") as s:
lines = s.readlines()
time = float(lines[SUMMARY_TIME_IDX].split(':')[1])
energy = int(lines[SUMMARY_ENERGY_IDX].split(':')[1])
power = float(lines[SUMMARY_POWER_IDX].split(':')[1])
if time < min_time[2]:
min_time = (config_dir, trial_dir, time)
if energy < min_energy[2]:
min_energy = (config_dir, trial_dir, energy)
if power < min_power:
min_power = (config_dir, trial_dir, power)
return (min_time, min_energy, min_power)
def main():
"""This script processes the log files from the "characterize.py" script and produces visualizations.
"""
# Default log directory
directory = 'heartbeat_logs'
# Default output directory
output_dir = 'plots'
# Default android
android = False
# Parsing the input of the script
parser = argparse.ArgumentParser(description="Process Heartbeat log files from characterization")
parser.add_argument("-d", "--directory",
default=directory,
help="Heartbeat log directory \"-d heartbeat_logs\"")
parser.add_argument("-o", "--output",
default=output_dir,
help="Specify the log output directory, for example \"-o plots\"")
parser.add_argument("--android",
action="store_true",
dest="android",
default=False,
help="Specify if processing results from Android")
args = parser.parse_args()
if args.directory:
directory = args.directory
if args.output:
output_dir = args.output
if args.android:
android = args.android
if not os.path.exists(directory):
print "Input directory does not exist: " + directory
sys.exit(1)
if os.path.exists(output_dir):
print "Output directory already exists: " + output_dir
sys.exit(1)
res = process_logs(directory)
if not android:
best = find_best_executions(directory)
print 'Best time:', best[0]
print 'Best energy:', best[1]
print 'Best power:', best[2]
os.makedirs(output_dir)
plot_all_raw_totals(res, output_dir)
plot_all_time_series(res, output_dir)
if __name__ == "__main__":
main()
| mpl-2.0 |
prisae/empymod | tests/test_utils.py | 1 | 45461 | import pytest
import numpy as np
from numpy.testing import assert_allclose
# Optional import
try:
import scooby
except ImportError:
scooby = False
from empymod import utils, filters
def test_emarray():
out = utils.EMArray(3)
assert out.amp() == 3
assert out.pha() == 0
assert out.real == 3
assert out.imag == 0
out = utils.EMArray(1+1j)
assert out.amp() == np.sqrt(2)
assert_allclose(out.pha(), np.pi/4)
assert out.real == 1
assert out.imag == 1
out = utils.EMArray([1+1j, 0+1j, -1-1j])
assert_allclose(out.amp(), [np.sqrt(2), 1, np.sqrt(2)])
assert_allclose(out.pha(unwrap=False), [np.pi/4, np.pi/2, -3*np.pi/4])
assert_allclose(out.pha(deg=True, unwrap=False), [45., 90., -135.])
assert_allclose(out.pha(deg=True, unwrap=False, lag=False),
[-45., -90., 135.])
assert_allclose(out.pha(deg=True, lag=False), [-45., -90., -225.])
assert_allclose(out.real, [1, 0, -1])
assert_allclose(out.imag, [1, 1, -1])
def test_check_ab(capsys):
# This is another way how check_ab could have been done: hard-coded.
# We use it here to check the output of check_ab.
iab = [11, 12, 13, 14, 15, 16, 21, 22, 23, 24, 25, 26,
31, 32, 33, 34, 35, 36, 41, 42, 43, 44, 45, 46,
51, 52, 53, 54, 55, 56, 61, 62, 63, 64, 65, 66]
oab = [11, 12, 13, 14, 15, 16, 21, 22, 23, 24, 25, 26,
31, 32, 33, 34, 35, 36, 14, 24, 34, 11, 12, 13,
15, 25, 35, 21, 22, 23, 16, 26, 36, 31, 32, 33]
omsrc = np.array([[False, ]*3 + [True, ]*3]*6).ravel()
omrec = [False, ]*18 + [True, ]*18
for i, val in enumerate(iab):
ab, msrc, mrec = utils.check_ab(val, 0)
assert ab == oab[i]
assert msrc == omsrc[i]
assert mrec == omrec[i]
utils.check_ab(36, 3)
out, _ = capsys.readouterr()
outstr = " Input ab : 36\n\n> <ab> IS 36 WHICH IS ZERO; "
assert out == outstr + "returning\n"
utils.check_ab(44, 3)
out, _ = capsys.readouterr()
outstr = " Input ab : 44\n Calculated ab : 11\n"
assert out == outstr
# Check it raises a ValueError if a non-existing ab is provided.
with pytest.raises(ValueError, match='<ab> must be one of: '):
utils.check_ab(77, 0)
# We just check one other thing here, that it fails with a TypeError if a
# list instead of one value is provided. Generally the try/except statement
# with int() should take proper care of all the checking right in check_ab.
with pytest.raises(TypeError, match='<ab> must be an integer'):
utils.check_ab([12, ], 0)
def test_check_bipole():
# Wrong size
with pytest.raises(ValueError, match='Parameter tvar has wrong length!'):
utils.check_bipole([0, 0, 0], 'tvar')
# # Dipole stuff
# Normal case
ipole = [[0, 0, 0], [10, 20, 30], [100, 0, 100], 0, 32]
inp_type = type(ipole[0])
pole, nout, outz, isdipole = utils.check_bipole(ipole, 'tvar')
out_type = type(ipole[0])
assert inp_type == out_type # Check input wasn't altered.
assert_allclose(pole[0], np.array([0, 0, 0]))
assert_allclose(pole[1], np.array([10, 20, 30]))
assert_allclose(pole[2], np.array([100, 0, 100]))
assert nout == 3
assert outz == 3
assert_allclose(isdipole, True)
# Multiple azimuth
pole = [[0, 0, 0], [10, 20, 30], [100, 0, 100], [0, 1, 2], 1]
assert_allclose(pole[0], np.array([0, 0, 0]))
assert_allclose(pole[1], np.array([10, 20, 30]))
assert_allclose(pole[2], np.array([100, 0, 100]))
assert_allclose(pole[3], np.array([0, 1, 2]))
assert_allclose(pole[4], np.array([1, 1, 1]))
# Multiple dip
pole = [[0, 0, 0], [10, 20, 30], [100, 0, 100], 1, [0, 1, 2]]
assert_allclose(pole[0], np.array([0, 0, 0]))
assert_allclose(pole[1], np.array([10, 20, 30]))
assert_allclose(pole[2], np.array([100, 0, 100]))
assert_allclose(pole[3], np.array([1, 1, 1]))
assert_allclose(pole[4], np.array([0, 1, 2]))
# x.size != y.size
pole = [[0, 0], [10, 20, 30], [100, 0, 100], 0, 0]
with pytest.raises(ValueError, match='Parameter tvar-y has wrong shape'):
utils.check_bipole(pole, 'tvar')
# # Bipole stuff
# Dipole instead bipole
pole = [0, 0, 1000, 1000, 10, 10]
with pytest.raises(ValueError, match='At least one of <tvar> is a point'):
utils.check_bipole(pole, 'tvar')
# Normal case
ipole = [0, 0, 1000, 1000, 10, 20]
inp_type = type(ipole[0])
pole, nout, outz, isdipole = utils.check_bipole(ipole, 'tvar')
out_type = type(ipole[0])
assert inp_type == out_type # Check input wasn't altered.
assert_allclose(pole[0], 0)
assert_allclose(pole[1], 0)
assert_allclose(pole[2], 1000)
assert_allclose(pole[3], 1000)
assert_allclose(pole[4], 10)
assert_allclose(pole[5], 20)
assert nout == 1
assert outz == 1
assert_allclose(isdipole, False)
# Pole one has variable depths
pole = [[0, 0], [10, 10], [0, 0], [20, 30], [10, 20], 0]
pole, nout, outz, _ = utils.check_bipole(pole, 'tvar')
assert_allclose(pole[4], [10, 20])
assert_allclose(pole[5], [0, 0])
assert nout == 2
assert outz == 2
# Pole one has variable depths
pole = [[0, 0], [10, 10], [0, 0], [20, 30], 10, [20, 0]]
pole, nout, outz, _ = utils.check_bipole(pole, 'tvar')
assert_allclose(pole[4], [10, 10])
assert_allclose(pole[5], [20, 0])
assert nout == 2
assert outz == 2
def test_check_dipole(capsys):
# correct input, verb > 2, src
isrc = [[1000, 2000], [0, 0], 0]
inp_type = type(isrc[0])
src, nsrc = utils.check_dipole(isrc, 'src', 3)
out, _ = capsys.readouterr()
out_type = type(isrc[0])
assert inp_type == out_type # Check input wasn't altered.
assert nsrc == 2
assert_allclose(src[0], [1000, 2000])
assert_allclose(src[1], [0, 0])
assert_allclose(src[2], 0)
outstr = " Source(s) : 2 dipole(s)\n"
outstr += " > x [m] : 1000 2000\n"
outstr += " > y [m] : 0 0\n"
outstr += " > z [m] : 0\n"
assert out == outstr
# Check print if more than 3 dipoles
utils.check_dipole([[1, 2, 3, 4], [0, 0, 0, 0], 0], 'src', 4)
out, _ = capsys.readouterr()
outstr = " Source(s) : 4 dipole(s)\n"
outstr += " > x [m] : 1 - 4 : 4 [min-max; #]\n"
outstr += " : 1 2 3 4\n"
outstr += " > y [m] : 0 - 0 : 4 [min-max; #]\n"
outstr += " : 0 0 0 0\n"
outstr += " > z [m] : 0\n"
assert out == outstr
# correct input, verb > 2, rec
rec, nrec = utils.check_dipole([0, 0, 0], 'rec', 3)
out, _ = capsys.readouterr()
assert nrec == 1
assert_allclose(rec[0], 0)
assert_allclose(rec[1], 0)
assert_allclose(rec[2], 0)
outstr = " Receiver(s) : 1 dipole(s)\n"
outstr += " > x [m] : 0\n"
outstr += " > y [m] : 0\n"
outstr += " > z [m] : 0\n"
assert out == outstr
# Check Errors: more than one z
with pytest.raises(ValueError, match='Parameter src has wrong shape'):
utils.check_dipole([[0, 0], [0, 0], [0, 0]], 'src', 3)
# Check Errors: wrong number of elements
with pytest.raises(ValueError, match='Parameter rec has wrong shape'):
utils.check_dipole([0, 0, 0, 0], 'rec', 3)
def test_check_frequency(capsys):
rfreq = np.array([1e-20, 1, 1e06])
retaH = np.array([[0.05 + 5.56325028e-30j, 50 + 2.78162514e-30j],
[0.05 + 5.56325028e-10j, 50 + 2.78162514e-10j],
[0.05 + 5.56325028e-04j, 50 + 2.78162514e-04j]])
retaV = np.array([[0.05 + 1.11265006e-29j, 5.55555556 + 2.78162514e-29j],
[0.05 + 1.11265006e-09j, 5.55555556 + 2.78162514e-09j],
[0.05 + 1.11265006e-03j, 5.55555556 + 2.78162514e-03j]])
rzetaH = np.array([[0 + 7.89568352e-26j, 0 + 7.89568352e-26j],
[0 + 7.89568352e-06j, 0 + 7.89568352e-06j],
[0 + 7.89568352e+00j, 0 + 7.89568352e+00j]])
rzetaV = np.array([[0 + 7.89568352e-25j, 0 + 3.94784176e-25j],
[0 + 7.89568352e-05j, 0 + 3.94784176e-05j],
[0 + 7.89568352e+01j, 0 + 3.94784176e+01j]])
output = utils.check_frequency(np.array([0, 1, 1e6]), np.array([20, .02]),
np.array([1, 3]), np.array([10, 5]),
np.array([20, 50]), np.array([1, 1]),
np.array([10, 5]), 3)
out, _ = capsys.readouterr()
assert " frequency [Hz] : " in out
assert "* WARNING :: Frequencies < " in out
freq, etaH, etaV, zetaH, zetaV = output
assert_allclose(freq, rfreq)
assert_allclose(etaH, retaH)
assert_allclose(etaV, retaV)
assert_allclose(zetaH, rzetaH)
assert_allclose(zetaV, rzetaV)
output = utils.check_frequency(-np.array([1e-40, 1, 1e6]),
np.array([20, .02]),
np.array([1, 3]), np.array([10, 5]),
np.array([20, 50]), np.array([1, 1]),
np.array([10, 5]), 3)
out, _ = capsys.readouterr()
assert " s-value [Hz] : " in out
assert "* WARNING :: Laplace val < " in out
freq, etaH, etaV, zetaH, zetaV = output
assert_allclose(freq, rfreq)
def test_check_hankel(capsys):
# # DLF # #
# verbose
ht, htarg = utils.check_hankel('dlf', {}, 4)
out, _ = capsys.readouterr()
assert " Hankel : DLF (Fast Hankel Transform)\n > F" in out
assert " > DLF type : Standard" in out
assert ht == 'dlf'
assert htarg['dlf'].name == filters.key_201_2009().name
assert htarg['pts_per_dec'] == 0
# provide filter-string and unknown parameter
_, htarg = utils.check_hankel('dlf', {'dlf': 'key_201_2009', 'abc': 0}, 1)
out, _ = capsys.readouterr()
assert htarg['dlf'].name == filters.key_201_2009().name
assert htarg['pts_per_dec'] == 0
assert "WARNING :: Unknown htarg {'abc': 0} for method 'dlf'" in out
# provide filter-instance
_, htarg = utils.check_hankel('dlf', {'dlf': filters.kong_61_2007()}, 0)
assert htarg['dlf'].name == filters.kong_61_2007().name
assert htarg['pts_per_dec'] == 0
# provide pts_per_dec
_, htarg = utils.check_hankel('dlf', {'pts_per_dec': -1}, 3)
out, _ = capsys.readouterr()
assert " > DLF type : Lagged Convolution" in out
assert htarg['dlf'].name == filters.key_201_2009().name
assert htarg['pts_per_dec'] == -1
# provide filter-string and pts_per_dec
_, htarg = utils.check_hankel(
'dlf', {'dlf': 'key_201_2009', 'pts_per_dec': 20}, 4)
out, _ = capsys.readouterr()
assert " > DLF type : Splined, 20.0 pts/dec" in out
assert htarg['dlf'].name == filters.key_201_2009().name
assert htarg['pts_per_dec'] == 20
# Assert it can be called repetitively
_, _ = capsys.readouterr()
ht, htarg = utils.check_hankel('dlf', {}, 1)
_, _ = utils.check_hankel(ht, htarg, 1)
out, _ = capsys.readouterr()
assert out == ""
# # QWE # #
# verbose
ht, htarg = utils.check_hankel('qwe', {}, 4)
out, _ = capsys.readouterr()
outstr = " Hankel : Quadrature-with-Extrapolation\n > rtol"
assert outstr in out
assert ht == 'qwe'
assert htarg['rtol'] == 1e-12
assert htarg['atol'] == 1e-30
assert htarg['nquad'] == 51
assert htarg['maxint'] == 100
assert htarg['pts_per_dec'] == 0
assert htarg['diff_quad'] == 100
assert htarg['a'] is None
assert htarg['b'] is None
assert htarg['limit'] is None
# limit
_, htarg = utils.check_hankel('qwe', {'limit': 30}, 0)
assert htarg['rtol'] == 1e-12
assert htarg['atol'] == 1e-30
assert htarg['nquad'] == 51
assert htarg['maxint'] == 100
assert htarg['pts_per_dec'] == 0
assert htarg['diff_quad'] == 100
assert htarg['a'] is None
assert htarg['b'] is None
assert htarg['limit'] == 30
# all arguments
_, htarg = utils.check_hankel(
'qwe', {'rtol': 1e-3, 'atol': 1e-4, 'nquad': 31, 'maxint': 20,
'pts_per_dec': 30, 'diff_quad': 200, 'a': 1e-6, 'b': 160,
'limit': 30},
3)
out, _ = capsys.readouterr()
assert " > a (quad): 1e-06" in out
assert " > b (quad): 160" in out
assert " > limit (quad): 30" in out
assert htarg['rtol'] == 1e-3
assert htarg['atol'] == 1e-4
assert htarg['nquad'] == 31
assert htarg['maxint'] == 20
assert htarg['pts_per_dec'] == 30
assert htarg['diff_quad'] == 200
assert htarg['a'] == 1e-6
assert htarg['b'] == 160
assert htarg['limit'] == 30
# Assert it can be called repetitively
_, _ = capsys.readouterr()
ht, htarg = utils.check_hankel('qwe', {}, 1)
_, _ = utils.check_hankel(ht, htarg, 1)
out, _ = capsys.readouterr()
assert out == ""
# # QUAD # #
# verbose
ht, htarg = utils.check_hankel('quad', {}, 4)
out, _ = capsys.readouterr()
outstr = " Hankel : Quadrature\n > rtol"
assert outstr in out
assert ht == 'quad'
assert htarg['rtol'] == 1e-12
assert htarg['atol'] == 1e-20
assert htarg['limit'] == 500
assert htarg['a'] == 1e-6
assert htarg['b'] == 0.1
assert htarg['pts_per_dec'] == 40
# pts_per_dec
_, htarg = utils.check_hankel('quad', {'pts_per_dec': 100}, 0)
assert htarg['rtol'] == 1e-12
assert htarg['atol'] == 1e-20
assert htarg['limit'] == 500
assert htarg['a'] == 1e-6
assert htarg['b'] == 0.1
assert htarg['pts_per_dec'] == 100
# all arguments
_, htarg = utils.check_hankel(
'quad', {'rtol': 1e-3, 'atol': 1e-4, 'limit': 100, 'a': 1e-10,
'b': 200, 'pts_per_dec': 50},
0)
assert htarg['rtol'] == 1e-3
assert htarg['atol'] == 1e-4
assert htarg['limit'] == 100
assert htarg['a'] == 1e-10
assert htarg['b'] == 200
assert htarg['pts_per_dec'] == 50
# Assert it can be called repetitively
_, _ = capsys.readouterr()
ht, htarg = utils.check_hankel('quad', {}, 1)
_, _ = utils.check_hankel(ht, htarg, 1)
out, _ = capsys.readouterr()
assert out == ""
# wrong ht
with pytest.raises(ValueError, match='must be one of: '):
utils.check_hankel('doesnotexist', {}, 1)
# filter missing attributes
with pytest.raises(AttributeError, match='DLF-filter is missing some'):
utils.check_hankel('dlf', {'dlf': 'key_101_CosSin_2012'}, 1)
def test_check_model(capsys):
# Normal case; xdirect=True (default)
res = utils.check_model(0, [1e20, 20], [1, 0], [0, 1], [50, 80], [10, 1],
[1, 1], True, 3)
depth, res, aniso, epermH, epermV, mpermH, mpermV, isfullspace = res
out, _ = capsys.readouterr()
assert "* WARNING :: Parameter aniso < " in out
assert " direct field : Comp. in frequency domain" in out
assert_allclose(depth, [-np.infty, 0])
assert_allclose(res, [1e20, 20])
assert_allclose(aniso, [1, np.sqrt(1e-20/20)])
assert_allclose(epermH, [0, 1])
assert_allclose(epermV, [50, 80])
assert_allclose(mpermH, [10, 1])
assert_allclose(mpermV, [1, 1])
assert_allclose(isfullspace, False)
# xdirect=False
res = utils.check_model(0, [1e20, 20], [1, 2], [0, 1], [50, 80], [10, 1],
[1, 1], False, 3)
out, _ = capsys.readouterr()
assert " direct field : Comp. in wavenumber domain" in out
# xdirect=None
res = utils.check_model(0, [1e20, 20], [1, 2], [0, 1], [50, 80], [10, 1],
[1, 1], None, 3)
out, _ = capsys.readouterr()
assert " direct field : Not calculated (secondary field)" in out
# Check -np.infty is added to depth
out = utils.check_model([], 2, 1, 1, 1, 1, 1, True, 1)
assert_allclose(out[0], -np.infty)
# Check -np.infty is not added if it is already in depth
out = utils.check_model(-np.infty, 2, 1, 1, 1, 1, 1, True, 1)
assert_allclose(out[0], -np.infty)
# Check verbosity and fullspace
utils.check_model(0, [1, 1], [2, 2], [10, 10], [1, 1], None, [3, 3], True,
4)
out, _ = capsys.readouterr()
outstr1 = " depth [m] : 0\n res [Ohm.m] : 1 1\n aniso"
outstr2 = "S A FULLSPACE; returning analytical frequency-domain solution\n"
assert outstr1 in out
assert outstr2 in out
# Check fullspace if only one value, w\o xdirect
utils.check_model([], 1, 2, 10, 1, 2, 3, True, 4)
out, _ = capsys.readouterr()
assert outstr2 in out
utils.check_model([], 1, 2, 10, 1, 2, 3, False, 4)
out, _ = capsys.readouterr()
assert "MODEL IS A FULLSPACE\n" in out
# Non-continuously in/de-creasing depth
with pytest.raises(ValueError, match='Depth must be continuously incr'):
var = [1, 1, 1, 1]
utils.check_model([0, 100, 90], var, var, var, var, var, var, True, 1)
# A ValueError check
with pytest.raises(ValueError, match='Parameter res has wrong shape'):
utils.check_model(
0, 1, [2, 2], [10, 10], [1, 1], [2, 2], [3, 3], True, 1)
def test_check_all_depths():
depth = np.array([-50, 0, 100, 2000])
res = [6, 1, 2, 3, 4]
aniso = [6, 7, 8, 9, 10]
epermH = [1.0, 1.1, 1.2, 1.3, 1.4]
epermV = [1.5, 1.6, 1.7, 1.8, 1.9]
mpermH = [2.0, 2.1, 2.2, 2.3, 2.4]
mpermV = [2.5, 2.6, 2.7, 2.8, 2.9]
# 1. Ordering as internally used:
# LHS low-to-high (+1, ::+1)
lhs_l2h = utils.check_model(
depth, res, aniso, epermH, epermV, mpermH, mpermV, True, 0)
# RHS high-to-low (-1, ::+1)
rhs_h2l = utils.check_model(
-depth, res, aniso, epermH, epermV, mpermH, mpermV, True, 0)
# 2. Reversed ordering:
# LHS high-to-low (+1, ::-1)
lhs_h2l = utils.check_model(
depth[::-1], res[::-1], aniso[::-1], epermH[::-1], epermV[::-1],
mpermH[::-1], mpermV[::-1], True, 0)
# RHS low-to-high (-1, ::-1)
rhs_l2h = utils.check_model(
-depth[::-1], res[::-1], aniso[::-1], epermH[::-1], epermV[::-1],
mpermH[::-1], mpermV[::-1], True, 0)
for i, var in enumerate([lhs_h2l, rhs_h2l, rhs_l2h]):
if i == 0:
swap = 1 # LHS
else:
swap = -1 # RHS
assert_allclose(lhs_l2h[0][1:], swap*var[0][1:][::swap])
assert_allclose(lhs_l2h[1], var[1][::swap])
assert_allclose(lhs_l2h[2], var[2][::swap])
assert_allclose(lhs_l2h[3], var[3][::swap])
assert_allclose(lhs_l2h[4], var[4][::swap])
assert_allclose(lhs_l2h[5], var[5][::swap])
assert_allclose(lhs_l2h[6], var[6][::swap])
def test_check_time(capsys):
time = np.array([3])
# # DLF # #
# verbose
_, f, ft, ftarg = utils.check_time(time, 0, 'dlf', {}, 4)
out, _ = capsys.readouterr()
assert " time [s] : 3" in out
assert " Fourier : DLF (Sine-Filter)" in out
assert "> DLF type : Lagged Convolution" in out
assert ft == 'dlf'
assert ftarg['dlf'].name == filters.key_201_CosSin_2012().name
assert ftarg['pts_per_dec'] == -1
f1 = np.array([4.87534752e-08, 5.60237934e-08, 6.43782911e-08,
7.39786458e-08, 8.50106448e-08, 9.76877807e-08,
1.12255383e-07, 1.28995366e-07, 1.48231684e-07])
f2 = np.array([2.88109455e+04, 3.31073518e+04, 3.80444558e+04,
4.37178011e+04, 5.02371788e+04, 5.77287529e+04,
6.63375012e+04, 7.62300213e+04, 8.75977547e+04])
assert_allclose(f[:9], f1)
assert_allclose(f[-9:], f2)
assert_allclose(f.size, 201+3)
assert ftarg['kind'] == 'sin'
# filter-string and unknown parameter
_, f, _, ftarg = utils.check_time(
time, -1, 'dlf',
{'dlf': 'key_201_CosSin_2012', 'kind': 'cos', 'notused': 1},
4)
out, _ = capsys.readouterr()
outstr = " time [s] : 3\n"
outstr += " Fourier : DLF (Cosine-Filter)\n > Filter"
assert outstr in out
assert "WARNING :: Unknown ftarg {'notused': 1} for method 'dlf'" in out
assert ft == 'dlf'
assert ftarg['dlf'].name == filters.key_201_CosSin_2012().name
assert ftarg['pts_per_dec'] == -1
assert_allclose(f[:9], f1)
assert_allclose(f[-9:], f2)
assert_allclose(f.size, 201+3)
assert ftarg['kind'] == 'cos'
# filter instance
_, _, _, ftarg = utils.check_time(
time, 1, 'dlf',
{'dlf': filters.key_201_CosSin_2012(), 'kind': 'sin'}, 0)
assert ftarg['dlf'].name == filters.key_201_CosSin_2012().name
assert ftarg['pts_per_dec'] == -1
assert ftarg['kind'] == 'sin'
# pts_per_dec
out, _ = capsys.readouterr() # clear buffer
_, _, _, ftarg = utils.check_time(time, 0, 'dlf', {'pts_per_dec': 30}, 4)
assert ftarg['dlf'].name == filters.key_201_CosSin_2012().name
assert ftarg['pts_per_dec'] == 30
assert ftarg['kind'] == 'sin'
out, _ = capsys.readouterr()
assert " > DLF type : Splined, 30.0 pts/dec" in out
# filter-string and pts_per_dec
_, _, _, ftarg = utils.check_time(
time, 0, 'dlf',
{'dlf': 'key_81_CosSin_2009', 'pts_per_dec': -1, 'kind': 'cos'}, 4)
out, _ = capsys.readouterr()
assert " > DLF type : Lagged Convolution" in out
assert ftarg['dlf'].name == filters.key_81_CosSin_2009().name
assert ftarg['pts_per_dec'] == -1
assert ftarg['kind'] == 'cos'
# pts_per_dec
_, freq, _, ftarg = utils.check_time(
time, 0, 'dlf', {'pts_per_dec': 0, 'kind': 'sin'}, 4)
out, _ = capsys.readouterr()
assert " > DLF type : Standard" in out
assert ftarg['pts_per_dec'] == 0
f_base = filters.key_201_CosSin_2012().base
assert_allclose(np.ravel(f_base/(2*np.pi*time[:, None])), freq)
# filter-string and pts_per_dec
_, _, _, ftarg = utils.check_time(
time, 0, 'dlf',
{'dlf': 'key_81_CosSin_2009', 'pts_per_dec': 50, 'kind': 'cos'}, 0)
assert ftarg['dlf'].name == filters.key_81_CosSin_2009().name
assert ftarg['pts_per_dec'] == 50
assert ftarg['kind'] == 'cos'
# just kind
_, f, _, ftarg = utils.check_time(
time, 0, 'dlf', {'kind': 'sin'}, 0)
assert ftarg['pts_per_dec'] == -1
assert_allclose(f[:9], f1)
assert_allclose(f[-9:], f2)
assert_allclose(f.size, 204)
# Assert it can be called repetitively
_, _ = capsys.readouterr()
_, _, _, ftarg = utils.check_time(time, 0, 'dlf', {}, 1)
_, _, _, _ = utils.check_time(time, 0, 'dlf', ftarg, 1)
out, _ = capsys.readouterr()
assert out == ""
# # QWE # #
# verbose
_, f, ft, ftarg = utils.check_time(time, 0, 'qwe', {}, 4)
out, _ = capsys.readouterr()
outstr = " Fourier : Quadrature-with-Extrapolation\n > rtol"
assert out[24:87] == outstr
assert ft == 'qwe'
assert ftarg['rtol'] == 1e-8
assert ftarg['atol'] == 1e-20
assert ftarg['nquad'] == 21
assert ftarg['maxint'] == 200
assert ftarg['pts_per_dec'] == 20
assert ftarg['diff_quad'] == 100
f1 = np.array([3.16227766e-03, 3.54813389e-03, 3.98107171e-03,
4.46683592e-03, 5.01187234e-03, 5.62341325e-03,
6.30957344e-03, 7.07945784e-03, 7.94328235e-03])
f2 = np.array([1.00000000e+02, 1.12201845e+02, 1.25892541e+02,
1.41253754e+02, 1.58489319e+02, 1.77827941e+02,
1.99526231e+02, 2.23872114e+02, 2.51188643e+02])
assert_allclose(f[:9], f1)
assert_allclose(f[-9:], f2)
assert_allclose(f.size, 99)
assert ftarg['a'] is None
assert ftarg['b'] is None
assert ftarg['limit'] is None
assert ftarg['sincos'] is np.sin
# only limit
_, _, _, ftarg = utils.check_time(time, 1, 'qwe', {'limit': 30}, 0)
assert ftarg['rtol'] == 1e-8
assert ftarg['atol'] == 1e-20
assert ftarg['nquad'] == 21
assert ftarg['maxint'] == 200
assert ftarg['pts_per_dec'] == 20
assert ftarg['diff_quad'] == 100
assert ftarg['a'] is None
assert ftarg['b'] is None
assert ftarg['limit'] == 30
assert ftarg['sincos'] is np.sin
# all arguments
_, _, _, ftarg = utils.check_time(
time, -1, 'qwe',
{'rtol': 1e-3, 'atol': 1e-4, 'nquad': 31, 'maxint': 20,
'pts_per_dec': 30, 'diff_quad': 200, 'a': 0.01, 'b': 0.2,
'limit': 100},
3)
out, _ = capsys.readouterr()
assert " > a (quad): 0.01" in out
assert " > b (quad): 0.2" in out
assert " > limit (quad): 100" in out
assert ftarg['rtol'] == 1e-3
assert ftarg['atol'] == 1e-4
assert ftarg['nquad'] == 31
assert ftarg['maxint'] == 20
assert ftarg['pts_per_dec'] == 30
assert ftarg['diff_quad'] == 200
assert ftarg['a'] == 0.01
assert ftarg['b'] == 0.2
assert ftarg['limit'] == 100
assert ftarg['sincos'] is np.cos
# Assert it can be called repetitively
_, _ = capsys.readouterr()
_, _, _, ftarg = utils.check_time(time, 0, 'qwe', {}, 1)
_, _, _, _ = utils.check_time(time, 0, 'qwe', ftarg, 1)
out, _ = capsys.readouterr()
assert out == ""
# # FFTLog # #
# verbose
_, f, ft, ftarg = utils.check_time(time, 0, 'fftlog', {}, 4)
out, _ = capsys.readouterr()
outstr = " Fourier : FFTLog\n > pts_per_dec"
assert outstr in out
assert ft == 'fftlog'
assert ftarg['pts_per_dec'] == 10
assert_allclose(ftarg['add_dec'], np.array([-2., 1.]))
assert ftarg['q'] == 0
tres = np.array([0.3571562, 0.44963302, 0.56605443, 0.71262031, 0.89713582,
1.12942708, 1.42186445, 1.79002129, 2.25350329,
2.83699255, 3.57156202, 4.49633019, 5.66054433,
7.1262031, 8.97135818, 11.29427079, 14.2186445,
17.90021288, 22.53503287, 28.36992554, 35.71562019,
44.96330186, 56.60544331, 71.26203102, 89.71358175,
112.94270785, 142.18644499, 179.00212881, 225.35032873,
283.69925539])
assert ftarg['mu'] == 0.5
assert_allclose(ftarg['tcalc'], tres)
assert_allclose(ftarg['dlnr'], 0.23025850929940461)
assert_allclose(ftarg['kr'], 1.0610526667295022)
assert_allclose(ftarg['rk'], 0.016449035064149849)
fres = np.array([0.00059525, 0.00074937, 0.00094341, 0.00118768, 0.0014952,
0.00188234, 0.00236973, 0.00298331, 0.00375577,
0.00472823, 0.00595249, 0.00749374, 0.00943407,
0.01187678, 0.01495199, 0.01882343, 0.0236973,
0.02983313, 0.03755769, 0.04728233, 0.05952493,
0.07493744, 0.09434065, 0.11876785, 0.14951986,
0.18823435, 0.23697301, 0.29833134, 0.3755769,
0.47282331])
assert_allclose(f, fres, rtol=1e-5)
# Several parameters
_, _, _, ftarg = utils.check_time(
time, -1, 'fftlog',
{'pts_per_dec': 10, 'add_dec': [-3, 4], 'q': 2}, 0)
assert ftarg['pts_per_dec'] == 10
assert_allclose(ftarg['add_dec'], np.array([-3., 4.]))
assert ftarg['q'] == 1 # q > 1 reset to 1...
assert ftarg['mu'] == -0.5
assert_allclose(ftarg['dlnr'], 0.23025850929940461)
assert_allclose(ftarg['kr'], 0.94312869748639161)
assert_allclose(ftarg['rk'], 1.8505737940600746)
# Assert it can be called repetitively
_, _ = capsys.readouterr()
_, _, _, ftarg = utils.check_time(time, 0, 'fftlog', {}, 1)
_, _, _, _ = utils.check_time(time, 0, 'fftlog', ftarg, 1)
out, _ = capsys.readouterr()
assert out == ""
# # FFT # #
# verbose
_, f, ft, ftarg = utils.check_time(time, 0, 'fft', {}, 4)
out, _ = capsys.readouterr()
assert "Fourier : Fast Fourier Transform FFT\n > dfreq" in out
assert " > pts_per_dec : (linear)" in out
assert ft == 'fft'
assert ftarg['dfreq'] == 0.002
assert ftarg['nfreq'] == 2048
assert ftarg['ntot'] == 2048
assert ftarg['pts_per_dec'] is None
fres = np.array([0.002, 0.004, 0.006, 0.008, 0.01, 4.088, 4.09, 4.092,
4.094, 4.096])
assert_allclose(f[:5], fres[:5])
assert_allclose(f[-5:], fres[-5:])
# Several parameters
_, _, _, ftarg = utils.check_time(
time, 0, 'fft', {'dfreq': 1e-3, 'nfreq': 2**15+1, 'ntot': 3}, 0)
assert ftarg['dfreq'] == 0.001
assert ftarg['nfreq'] == 2**15+1
assert ftarg['ntot'] == 2**16
# Several parameters; pts_per_dec
_, f, _, ftarg = utils.check_time(time, 0, 'fft', {'pts_per_dec': 5}, 3)
out, _ = capsys.readouterr()
assert " > pts_per_dec : 5" in out
assert ftarg['dfreq'] == 0.002
assert ftarg['nfreq'] == 2048
assert ftarg['ntot'] == 2048
assert ftarg['pts_per_dec'] == 5
outf = np.array([2.00000000e-03, 3.22098066e-03, 5.18735822e-03,
8.35419026e-03, 1.34543426e-02, 2.16680888e-02,
3.48962474e-02, 5.62000691e-02, 9.05096680e-02,
1.45764945e-01, 2.34753035e-01, 3.78067493e-01,
6.08874043e-01, 9.80585759e-01, 1.57922389e+00,
2.54332480e+00, 4.09600000e+00])
assert_allclose(f, outf)
# Assert it can be called repetitively
_, _ = capsys.readouterr()
_, _, _, ftarg = utils.check_time(time, 0, 'fft', {}, 1)
_, _, _, _ = utils.check_time(time, 0, 'fft', ftarg, 1)
out, _ = capsys.readouterr()
assert out == ""
# # Various # #
# minimum time
_ = utils.check_time(
0, 0, 'dlf', {'dlf': 'key_201_CosSin_2012', 'kind': 'cos'}, 1)
out, _ = capsys.readouterr()
assert out[:21] == "* WARNING :: Times < "
# Signal != -1, 0, 1
with pytest.raises(ValueError, match='<signal> must be one of:'):
utils.check_time(time, -2, 'dlf', {}, 0)
# ft != cos, sin, dlf, qwe, fftlog,
with pytest.raises(ValueError, match='<ft> must be one of:'):
utils.check_time(time, 0, 'bla', {}, 0)
# filter missing attributes
with pytest.raises(AttributeError, match='DLF-filter is missing some att'):
utils.check_time(time, 0, 'dlf', {'dlf': 'key_201_2012'}, 1)
# filter with wrong kind
with pytest.raises(ValueError, match="'kind' must be either 'sin' or"):
utils.check_time(time, 0, 'dlf', {'kind': 'wrongkind'}, 1)
def test_check_solution(capsys):
# wrong solution
with pytest.raises(ValueError, match='Solution must be one of'):
utils.check_solution('hs', 1, 13, False, False)
# wrong ab/msrc/mrec
with pytest.raises(ValueError, match='Diffusive solution is only imple'):
utils.check_solution('dhs', None, 11, True, False)
# wrong domain
with pytest.raises(ValueError, match='Full fullspace solution is only'):
utils.check_solution('fs', 1, 21, True, True)
def test_get_abs(capsys):
# Check some cases
# general, x/y-pl, x/z-pl x
ang = [[np.pi/4, np.pi/4], [np.pi/6, 0], [0, np.pi/3], [0, 0]]
# Results for EE, ME, EM, MM
res = [[11, 12, 13, 21, 22, 23, 31, 32, 33], [11, 12, 13, 21, 22, 23],
[11, 12, 13, 31, 32, 33], [11, 12, 13], [11, 12, 21, 22, 31, 32],
[11, 12, 21, 22], [11, 12, 31, 32], [11, 12],
[11, 13, 21, 23, 31, 33], [11, 13, 21, 23], [11, 13, 31, 33],
[11, 13], [11, 21, 31], [11, 21], [11, 31], [11],
[14, 24, 34, 15, 25, 35, 16, 26, 36], [14, 24, 34, 15, 25, 35],
[14, 24, 34, 16, 26, 36], [14, 24, 34], [14, 24, 15, 25, 16, 26],
[14, 24, 15, 25], [14, 24, 16, 26], [14, 24],
[14, 34, 15, 35, 16, 36], [14, 34, 15, 35], [14, 34, 16, 36],
[14, 34], [14, 15, 16], [14, 15], [14, 16], [14],
[14, 15, 16, 24, 25, 26, 34, 35, 36], [14, 15, 16, 24, 25, 26],
[14, 15, 16, 34, 35, 36], [14, 15, 16], [14, 15, 24, 25, 34, 35],
[14, 15, 24, 25], [14, 15, 34, 35], [14, 15],
[14, 16, 24, 26, 34, 36], [14, 16, 24, 26], [14, 16, 34, 36],
[14, 16], [14, 24, 34], [14, 24], [14, 34], [14],
[11, 12, 13, 21, 22, 23, 31, 32, 33], [11, 12, 13, 21, 22, 23],
[11, 12, 13, 31, 32, 33], [11, 12, 13], [11, 12, 21, 22, 31, 32],
[11, 12, 21, 22], [11, 12, 31, 32], [11, 12],
[11, 13, 21, 23, 31, 33], [11, 13, 21, 23], [11, 13, 31, 33],
[11, 13], [11, 21, 31], [11, 21], [11, 31], [11]]
i = 0
for msrc in [False, True]:
for mrec in [False, True]:
for src in ang:
for rec in ang:
out = utils.get_abs(msrc, mrec, src[0], src[1], rec[0],
rec[1], 0)
assert_allclose(out, res[i])
i += 1
# Check some more
# y/z-plane, z-dir
ang = [[np.pi/2, 0], [0, np.pi/2]]
# Results for EE, ME, EM, MM
res = [[22], [32], [23], [33], [25], [26], [35], [36], [25], [35], [26],
[36], [22], [32], [23], [33]]
i = 0
for msrc in [False, True]:
for mrec in [False, True]:
for src in ang:
for rec in ang:
out = utils.get_abs(msrc, mrec, src[0], src[1], rec[0],
rec[1], 0)
assert_allclose(out, res[i])
i += 1
# Check print statement
_ = utils.get_abs(True, True, 90, 0, 0, 90, 3)
out, _ = capsys.readouterr()
assert out == " Required ab's : 11 12 31 32\n"
# Assure that for different, but aligned angles, they are not deleted.
ab_calc = utils.get_abs(
False, False, np.array([0., np.pi/2]), np.array([0., 0.]),
np.array([0.]), np.array([0.]), 0)
assert_allclose(ab_calc, [11, 12])
ab_calc = utils.get_abs(
False, False, np.array([0., 0.]), np.array([3*np.pi/2, np.pi]),
np.array([0.]), np.array([0.]), 0)
assert_allclose(ab_calc, [11, 13])
ab_calc = utils.get_abs(
False, False, np.array([0.]), np.array([0.]),
np.array([0., np.pi/2]), np.array([0., 0.]), 0)
assert_allclose(ab_calc, [11, 21])
ab_calc = utils.get_abs(
False, False, np.array([0.]), np.array([0.]),
np.array([0., 0.]), np.array([3*np.pi/2, np.pi]), 0)
assert_allclose(ab_calc, [11, 31])
def test_get_geo_fact():
res = np.array([0.017051023225738, 0.020779123804907, -0.11077204227395,
-0.081155809427821, -0.098900024313067, 0.527229048585517,
-0.124497144079623, -0.151717673241039, 0.808796206796408])
res2 = np.rot90(np.fliplr(res.reshape(3, -1))).ravel()
# EE, MM
ab = [11, 12, 13, 21, 22, 23, 31, 32, 33]
i = 0
for i in range(9):
out = utils.get_geo_fact(ab[i], 13.45, 23.8, 124.3, 5.3, False, False)
assert_allclose(out[0], res[i])
out = utils.get_geo_fact(ab[i], 13.45, 23.8, 124.3, 5.3, True, True)
assert_allclose(out[0], res[i])
i += 1
# ME, EM
ab = [14, 15, 16, 24, 25, 26, 34, 35, 36]
i = 0
for i in range(9):
out = utils.get_geo_fact(ab[i], 13.45, 23.8, 124.3, 5.3, False, True)
assert_allclose(out[0], res2[i])
out = utils.get_geo_fact(ab[i], 13.45, 23.8, 124.3, 5.3, True, False)
assert_allclose(out[0], res[i])
i += 1
def test_get_layer_nr():
bip = np.array([0, 0, 300])
lbip, zbip = utils.get_layer_nr(bip, np.array([-np.infty, 500]))
assert lbip == 0
assert zbip == 300
lbip, _ = utils.get_layer_nr(bip, np.array([-np.infty, 0, 300, 500]))
assert lbip == 1
lbip, _ = utils.get_layer_nr(bip, np.array([-np.infty, 0, 200]))
assert lbip == 2
bip = np.array([np.zeros(4), np.zeros(4), np.arange(4)*100])
lbip, _ = utils.get_layer_nr(bip, np.array([-np.infty, 0, 200]))
assert_allclose(lbip, [0, 1, 1, 2])
def test_get_off_ang(capsys):
src = [np.array([0, 100]), np.array([0, 100]), np.array([0, 100])]
rec = [np.array([0, 5000]), np.array([0, 100]), np.array([0, 200])]
resoff = np.array([0.001, 5001, 141.42135623730951, 4900])
resang = np.array([np.nan, 0.019997333973150531, -2.3561944901923448, 0.])
off, ang = utils.get_off_ang(src, rec, 2, 2, 3)
out, _ = capsys.readouterr()
assert out[:23] == "* WARNING :: Offsets < "
assert_allclose(off, resoff)
assert_allclose(ang, resang, equal_nan=True)
def test_get_azm_dip(capsys):
# Dipole, src, ninpz = 1
inp = [np.array([0]), np.array([0]), np.array([0]), np.array([0]),
np.array([np.pi/4])]
out = utils.get_azm_dip(inp, 0, 1, 1, True, 300, 'src', 0)
assert out[0][0] == inp[0]
assert out[0][1] == inp[1]
assert out[0][2] == inp[2]
assert out[0][3] == inp[3]
assert out[0][4] == inp[4]
assert out[1] == 0
assert_allclose(out[2], 0.013707783890402)
assert out[3] == 1
assert out[4] == 1
assert out[5] == 300
# Dipole, rec, ninpz = 2, verbose
inp = [np.array([0, 0]), np.array([0, 0]), np.array([0, 100]),
np.array([np.pi/2]), np.array([np.pi/3])]
out = utils.get_azm_dip(inp, 0, 2, 52, True, 300, 'rec', 4)
outstr, _ = capsys.readouterr()
assert out[0][0] == inp[0][0]
assert out[0][1] == inp[1][0]
assert out[0][2] == inp[2][0]
assert out[0][3] == inp[3]
assert out[0][4] == inp[4]
assert_allclose(out[1], 0.027415567780804)
assert_allclose(out[2], 0.018277045187203)
assert out[3] == 1
assert out[4] == 1
assert out[5] == 1
assert outstr[:42] == " Receiver(s) : 1 dipole(s)\n > x"
# Bipole, src, ninpz = 1, intpts = 5, verbose
inp = [np.array([-50]), np.array([50]), np.array([50]), np.array([100]),
np.array([0]), np.array([0])]
out = utils.get_azm_dip(inp, 0, 1, 5, False, 300, 'src', 4)
outstr, _ = capsys.readouterr()
assert_allclose(out[0][0],
np.array([-45.309, -26.923, 0., 26.923, 45.309]))
assert_allclose(out[0][1], np.array([52.346, 61.538, 75., 88.462, 97.654]))
assert_allclose(out[0][2], np.array([0., 0., 0., 0., 0.]))
assert_allclose(out[1], 0.463647609000806)
assert out[2] == 0
assert_allclose(out[3], np.array([0.118463442528094, 0.239314335249683,
0.284444444444445, 0.239314335249683, 0.118463442528094]))
assert out[4] == 5
assert_allclose(out[5], 33541.01966249684483)
assert outstr[:47] == " Source(s) : 1 bipole(s)\n > intpts"
# Bipole, rec, ninpz = 2, intpts = 1, verbose
inp = [np.array([-50, 0]), np.array([50, 0]), np.array([0, -50]),
np.array([0, 50]), np.array([0, 100]), np.array([0, 100])]
out = utils.get_azm_dip(inp, 0, 2, 1, False, 300, 'rec', 4)
outstr, _ = capsys.readouterr()
assert out[0][0] == 0
assert out[0][1] == 0
assert out[0][2] == 0
assert out[1] == 0
assert out[2] == 0
assert out[3] == 1
assert out[4] == 1
assert out[5] == 100
assert outstr[:47] == " Receiver(s) : 1 bipole(s)\n > intpts"
def test_get_kwargs(capsys):
kwargs1 = {'ft': 'sin', 'depth': []}
ft, ht = utils.get_kwargs(['ft', 'ht'], ['dlf', 'dlf'], kwargs1)
out, _ = capsys.readouterr()
assert ft == 'sin'
assert ht == 'dlf'
assert "* WARNING :: Unused **kwargs: {'depth': []}" in out
kwargs2 = {'depth': [], 'unknown': 1}
with pytest.raises(TypeError, match='Unexpected '):
utils.get_kwargs(['verb', ], [0, ], kwargs2)
def test_printstartfinish(capsys):
t0 = utils.printstartfinish(0)
assert isinstance(t0, float)
out, _ = capsys.readouterr()
assert out == ""
t0 = utils.printstartfinish(3)
assert isinstance(t0, float)
out, _ = capsys.readouterr()
assert ":: empymod START :: v" in out
utils.printstartfinish(0, t0)
out, _ = capsys.readouterr()
assert out == ""
utils.printstartfinish(3, t0)
out, _ = capsys.readouterr()
assert out[:27] == "\n:: empymod END; runtime = "
utils.printstartfinish(3, t0, 13)
out, _ = capsys.readouterr()
assert out[-19:] == "13 kernel call(s)\n\n"
def test_conv_warning(capsys):
# If converged, no output
utils.conv_warning(True, ['', '', '', 51, ''], 'Hankel', 0)
out, _ = capsys.readouterr()
assert out == ""
# If not converged, but verb=0, no output
utils.conv_warning(False, ['', '', '', 51, ''], 'Hankel', 0)
out, _ = capsys.readouterr()
assert out == ""
# If not converged, and verb>0, print
utils.conv_warning(False, ['', '', '', 51, ''], 'Hankel', 1)
out, _ = capsys.readouterr()
assert '* WARNING :: Hankel-quadrature did not converge' in out
# If converged, and verb>1, no output
utils.conv_warning(True, ['', '', '', 51, ''], 'Hankel', 1)
out, _ = capsys.readouterr()
assert out == ""
def test_check_shape():
# Ensure no Error is raised
utils._check_shape(np.zeros((3, 4)), 'tvar', (3, 4))
utils._check_shape(np.zeros((3, 4)), 'tvar', (3, 4), (2, ))
utils._check_shape(np.zeros((3, 4)), 'tvar', (2,), (3, 4))
# Ensure Error is raised
with pytest.raises(ValueError, match='Parameter tvar has wrong shape'):
utils._check_shape(np.zeros((3, 4)), 'tvar', (2,))
with pytest.raises(ValueError, match='Parameter tvar has wrong shape'):
utils._check_shape(np.zeros((3, 4)), 'tvar', (2,), (1, 4))
def test_check_var():
# This is basically np.array(), with an optional call to _check_shape
# above. Just three simple checks therefore; one without call, one with one
# shape, and one with two shapes
# Without shapes
out = utils._check_var(np.pi, int, 3, 'tvar')
assert out[0, 0, 0] == 3
# One shape, but wrong
with pytest.raises(ValueError, match='Parameter tvar has wrong shape'):
out = utils._check_var(np.arange(3)*.5, float, 1, 'tvar', (1, 3))
# Two shapes, second one is correct
out = utils._check_var(np.arange(3)*.5, float, 1, 'tvar', (1, 3), (3, ))
def test_strvar():
out = utils._strvar(np.arange(3)*np.pi)
assert out == "0 3.14159 6.28319"
out = utils._strvar(np.pi, '{:20.10e}')
assert out == " 3.1415926536e+00"
def test_prnt_min_max_val(capsys):
utils._prnt_min_max_val(np.arange(1, 5)*2, 'tvar', 0)
out, _ = capsys.readouterr()
assert out == "tvar 2 - 8 : 4 [min-max; #]\n"
utils._prnt_min_max_val(np.arange(1, 5)*2, 'tvar', 4)
out, _ = capsys.readouterr()
outstr = "tvar 2 - 8 : 4 [min-max; #]\n : 2 4 6 8\n"
assert out == outstr
utils._prnt_min_max_val(np.array(1), 'tvar', 0)
out, _ = capsys.readouterr()
assert out == "tvar 1\n"
utils._prnt_min_max_val(np.array(1), 'tvar', 4)
out, _ = capsys.readouterr()
assert out == "tvar 1\n"
def test_check_min(capsys):
# Have to provide copies, as they are changed in place...good/bad?
# inp > minval verb = 0
inp1 = np.array([1e-3])
out1 = utils._check_min(inp1.copy(), 1e-20, 'name', 'unit', 0)
out, _ = capsys.readouterr()
assert out == ""
assert_allclose(inp1, out1)
# inp > minval verb = 1
inp2 = np.array([1e3, 10])
out2 = utils._check_min(inp2.copy(), 1e-10, 'name', 'unit', 1)
out, _ = capsys.readouterr()
assert out == ""
assert_allclose(inp2, out2)
# inp < minval verb = 0
inp3 = np.array([1e-6])
out3 = utils._check_min(inp3.copy(), 1e-3, 'name', 'unit', 0)
out, _ = capsys.readouterr()
assert out == ""
assert_allclose(np.array([1e-3]), out3)
# inp < minval verb = 1
inp4 = np.array([1e-20, 1e-3])
out4 = utils._check_min(inp4.copy(), 1e-15, 'name', 'unit', 1)
out, _ = capsys.readouterr()
assert out[:35] == "* WARNING :: name < 1e-15 unit are "
assert_allclose(np.array([1e-15, 1e-3]), out4)
def test_minimum():
# Check default values
d = utils.get_minimum()
assert d['min_freq'] == 1e-20
assert d['min_time'] == 1e-20
assert d['min_off'] == 1e-3
assert d['min_res'] == 1e-20
assert d['min_angle'] == 1e-10
# Set all default values to new values
utils.set_minimum(1e-2, 1e-3, 1, 1e-4, 1e-5)
# Check new values
d = utils.get_minimum()
assert d['min_freq'] == 1e-2
assert d['min_time'] == 1e-3
assert d['min_off'] == 1
assert d['min_res'] == 1e-4
assert d['min_angle'] == 1e-5
def test_report(capsys):
out, _ = capsys.readouterr() # Empty capsys
# Reporting is now done by the external package scooby.
# We just ensure the shown packages do not change (core and optional).
if scooby:
out1 = scooby.Report(
core=['numpy', 'scipy', 'numba', 'empymod'],
optional=['IPython', 'matplotlib'],
ncol=3)
out2 = utils.Report()
# Ensure they're the same; exclude time to avoid errors.
assert out1.__repr__()[115:] == out2.__repr__()[115:]
else: # soft dependency
_ = utils.Report()
out, _ = capsys.readouterr() # Empty capsys
assert 'WARNING :: `empymod.Report` requires `scooby`' in out
| apache-2.0 |
Sunhick/ThinkStats2 | code/hinc_soln.py | 67 | 4296 | """This file contains code used in "Think Stats",
by Allen B. Downey, available from greenteapress.com
Copyright 2014 Allen B. Downey
License: GNU GPLv3 http://www.gnu.org/licenses/gpl.html
"""
from __future__ import print_function
import numpy as np
import pandas
import hinc
import thinkplot
import thinkstats2
"""This file contains a solution to an exercise in Think Stats:
The distributions of wealth and income are sometimes modeled using
lognormal and Pareto distributions. To see which is better, let's
look at some data.
The Current Population Survey (CPS) is joint effort of the Bureau
of Labor Statistics and the Census Bureau to study income and related
variables. Data collected in 2013 is available from
http://www.census.gov/hhes/www/cpstables/032013/hhinc/toc.htm.
I downloaded hinc06.xls, which is an Excel spreadsheet with
information about household income, and converted it to hinc06.csv,
a CSV file you will find in the repository for this book. You
will also find hinc.py, which reads the CSV file.
Extract the distribution of incomes from this dataset. Are any of the
analytic distributions in this chapter a good model of the data? A
solution to this exercise is in hinc_soln.py.
My solution generates three figures:
1) The CDF of income on a linear scale.
2) The CCDF on a log-log scale along with a Pareto model intended
to match the tail behavior.
3) The CDF on a log-x scale along with a lognormal model chose to
match the median and inter-quartile range.
My conclusions based on these figures are:
1) The Pareto model is probably a reasonable choice for the top
10-20% of incomes.
2) The lognormal model captures the shape of the distribution better,
but the data deviate substantially from the model. With different
choices for sigma, you could match the upper or lower tail, but not
both at the same time.
In summary I would say that neither model captures the whole distribution,
so you might have to
1) look for another analytic model,
2) choose one that captures the part of the distribution that is most
relevent, or
3) avoid using an analytic model altogether.
"""
class SmoothCdf(thinkstats2.Cdf):
"""Represents a CDF based on calculated quantiles.
"""
def Render(self):
"""Because this CDF was not computed from a sample, it
should not be rendered as a step function.
"""
return self.xs, self.ps
def Prob(self, x):
"""Compute CDF(x), interpolating between known values.
"""
return np.interp(x, self.xs, self.ps)
def Value(self, p):
"""Compute inverse CDF(x), interpolating between probabilities.
"""
return np.interp(p, self.ps, self.xs)
def MakeFigures(df):
"""Plots the CDF of income in several forms.
"""
xs, ps = df.income.values, df.ps.values
cdf = SmoothCdf(xs, ps, label='data')
cdf_log = SmoothCdf(np.log10(xs), ps, label='data')
# linear plot
thinkplot.Cdf(cdf)
thinkplot.Save(root='hinc_linear',
xlabel='household income',
ylabel='CDF')
# pareto plot
# for the model I chose parameters by hand to fit the tail
xs, ys = thinkstats2.RenderParetoCdf(xmin=55000, alpha=2.5,
low=0, high=250000)
thinkplot.Plot(xs, 1-ys, label='model', color='0.8')
thinkplot.Cdf(cdf, complement=True)
thinkplot.Save(root='hinc_pareto',
xlabel='log10 household income',
ylabel='CCDF',
xscale='log',
yscale='log')
# lognormal plot
# for the model I estimate mu and sigma using
# percentile-based statistics
median = cdf_log.Percentile(50)
iqr = cdf_log.Percentile(75) - cdf_log.Percentile(25)
std = iqr / 1.349
# choose std to match the upper tail
std = 0.35
print(median, std)
xs, ps = thinkstats2.RenderNormalCdf(median, std, low=3.5, high=5.5)
thinkplot.Plot(xs, ps, label='model', color='0.8')
thinkplot.Cdf(cdf_log)
thinkplot.Save(root='hinc_normal',
xlabel='log10 household income',
ylabel='CDF')
def main():
df = hinc.ReadData()
MakeFigures(df)
if __name__ == "__main__":
main()
| gpl-3.0 |
Windy-Ground/scikit-learn | benchmarks/bench_sample_without_replacement.py | 397 | 8008 | """
Benchmarks for sampling without replacement of integer.
"""
from __future__ import division
from __future__ import print_function
import gc
import sys
import optparse
from datetime import datetime
import operator
import matplotlib.pyplot as plt
import numpy as np
import random
from sklearn.externals.six.moves import xrange
from sklearn.utils.random import sample_without_replacement
def compute_time(t_start, delta):
mu_second = 0.0 + 10 ** 6 # number of microseconds in a second
return delta.seconds + delta.microseconds / mu_second
def bench_sample(sampling, n_population, n_samples):
gc.collect()
# start time
t_start = datetime.now()
sampling(n_population, n_samples)
delta = (datetime.now() - t_start)
# stop time
time = compute_time(t_start, delta)
return time
if __name__ == "__main__":
###########################################################################
# Option parser
###########################################################################
op = optparse.OptionParser()
op.add_option("--n-times",
dest="n_times", default=5, type=int,
help="Benchmark results are average over n_times experiments")
op.add_option("--n-population",
dest="n_population", default=100000, type=int,
help="Size of the population to sample from.")
op.add_option("--n-step",
dest="n_steps", default=5, type=int,
help="Number of step interval between 0 and n_population.")
default_algorithms = "custom-tracking-selection,custom-auto," \
"custom-reservoir-sampling,custom-pool,"\
"python-core-sample,numpy-permutation"
op.add_option("--algorithm",
dest="selected_algorithm",
default=default_algorithms,
type=str,
help="Comma-separated list of transformer to benchmark. "
"Default: %default. \nAvailable: %default")
# op.add_option("--random-seed",
# dest="random_seed", default=13, type=int,
# help="Seed used by the random number generators.")
(opts, args) = op.parse_args()
if len(args) > 0:
op.error("this script takes no arguments.")
sys.exit(1)
selected_algorithm = opts.selected_algorithm.split(',')
for key in selected_algorithm:
if key not in default_algorithms.split(','):
raise ValueError("Unknown sampling algorithm \"%s\" not in (%s)."
% (key, default_algorithms))
###########################################################################
# List sampling algorithm
###########################################################################
# We assume that sampling algorithm has the following signature:
# sample(n_population, n_sample)
#
sampling_algorithm = {}
###########################################################################
# Set Python core input
sampling_algorithm["python-core-sample"] = \
lambda n_population, n_sample: \
random.sample(xrange(n_population), n_sample)
###########################################################################
# Set custom automatic method selection
sampling_algorithm["custom-auto"] = \
lambda n_population, n_samples, random_state=None: \
sample_without_replacement(n_population,
n_samples,
method="auto",
random_state=random_state)
###########################################################################
# Set custom tracking based method
sampling_algorithm["custom-tracking-selection"] = \
lambda n_population, n_samples, random_state=None: \
sample_without_replacement(n_population,
n_samples,
method="tracking_selection",
random_state=random_state)
###########################################################################
# Set custom reservoir based method
sampling_algorithm["custom-reservoir-sampling"] = \
lambda n_population, n_samples, random_state=None: \
sample_without_replacement(n_population,
n_samples,
method="reservoir_sampling",
random_state=random_state)
###########################################################################
# Set custom reservoir based method
sampling_algorithm["custom-pool"] = \
lambda n_population, n_samples, random_state=None: \
sample_without_replacement(n_population,
n_samples,
method="pool",
random_state=random_state)
###########################################################################
# Numpy permutation based
sampling_algorithm["numpy-permutation"] = \
lambda n_population, n_sample: \
np.random.permutation(n_population)[:n_sample]
###########################################################################
# Remove unspecified algorithm
sampling_algorithm = dict((key, value)
for key, value in sampling_algorithm.items()
if key in selected_algorithm)
###########################################################################
# Perform benchmark
###########################################################################
time = {}
n_samples = np.linspace(start=0, stop=opts.n_population,
num=opts.n_steps).astype(np.int)
ratio = n_samples / opts.n_population
print('Benchmarks')
print("===========================")
for name in sorted(sampling_algorithm):
print("Perform benchmarks for %s..." % name, end="")
time[name] = np.zeros(shape=(opts.n_steps, opts.n_times))
for step in xrange(opts.n_steps):
for it in xrange(opts.n_times):
time[name][step, it] = bench_sample(sampling_algorithm[name],
opts.n_population,
n_samples[step])
print("done")
print("Averaging results...", end="")
for name in sampling_algorithm:
time[name] = np.mean(time[name], axis=1)
print("done\n")
# Print results
###########################################################################
print("Script arguments")
print("===========================")
arguments = vars(opts)
print("%s \t | %s " % ("Arguments".ljust(16),
"Value".center(12),))
print(25 * "-" + ("|" + "-" * 14) * 1)
for key, value in arguments.items():
print("%s \t | %s " % (str(key).ljust(16),
str(value).strip().center(12)))
print("")
print("Sampling algorithm performance:")
print("===============================")
print("Results are averaged over %s repetition(s)." % opts.n_times)
print("")
fig = plt.figure('scikit-learn sample w/o replacement benchmark results')
plt.title("n_population = %s, n_times = %s" %
(opts.n_population, opts.n_times))
ax = fig.add_subplot(111)
for name in sampling_algorithm:
ax.plot(ratio, time[name], label=name)
ax.set_xlabel('ratio of n_sample / n_population')
ax.set_ylabel('Time (s)')
ax.legend()
# Sort legend labels
handles, labels = ax.get_legend_handles_labels()
hl = sorted(zip(handles, labels), key=operator.itemgetter(1))
handles2, labels2 = zip(*hl)
ax.legend(handles2, labels2, loc=0)
plt.show()
| bsd-3-clause |
murali-munna/scikit-learn | examples/applications/plot_outlier_detection_housing.py | 243 | 5577 | """
====================================
Outlier detection on a real data set
====================================
This example illustrates the need for robust covariance estimation
on a real data set. It is useful both for outlier detection and for
a better understanding of the data structure.
We selected two sets of two variables from the Boston housing data set
as an illustration of what kind of analysis can be done with several
outlier detection tools. For the purpose of visualization, we are working
with two-dimensional examples, but one should be aware that things are
not so trivial in high-dimension, as it will be pointed out.
In both examples below, the main result is that the empirical covariance
estimate, as a non-robust one, is highly influenced by the heterogeneous
structure of the observations. Although the robust covariance estimate is
able to focus on the main mode of the data distribution, it sticks to the
assumption that the data should be Gaussian distributed, yielding some biased
estimation of the data structure, but yet accurate to some extent.
The One-Class SVM algorithm
First example
-------------
The first example illustrates how robust covariance estimation can help
concentrating on a relevant cluster when another one exists. Here, many
observations are confounded into one and break down the empirical covariance
estimation.
Of course, some screening tools would have pointed out the presence of two
clusters (Support Vector Machines, Gaussian Mixture Models, univariate
outlier detection, ...). But had it been a high-dimensional example, none
of these could be applied that easily.
Second example
--------------
The second example shows the ability of the Minimum Covariance Determinant
robust estimator of covariance to concentrate on the main mode of the data
distribution: the location seems to be well estimated, although the covariance
is hard to estimate due to the banana-shaped distribution. Anyway, we can
get rid of some outlying observations.
The One-Class SVM is able to capture the real data structure, but the
difficulty is to adjust its kernel bandwidth parameter so as to obtain
a good compromise between the shape of the data scatter matrix and the
risk of over-fitting the data.
"""
print(__doc__)
# Author: Virgile Fritsch <virgile.fritsch@inria.fr>
# License: BSD 3 clause
import numpy as np
from sklearn.covariance import EllipticEnvelope
from sklearn.svm import OneClassSVM
import matplotlib.pyplot as plt
import matplotlib.font_manager
from sklearn.datasets import load_boston
# Get data
X1 = load_boston()['data'][:, [8, 10]] # two clusters
X2 = load_boston()['data'][:, [5, 12]] # "banana"-shaped
# Define "classifiers" to be used
classifiers = {
"Empirical Covariance": EllipticEnvelope(support_fraction=1.,
contamination=0.261),
"Robust Covariance (Minimum Covariance Determinant)":
EllipticEnvelope(contamination=0.261),
"OCSVM": OneClassSVM(nu=0.261, gamma=0.05)}
colors = ['m', 'g', 'b']
legend1 = {}
legend2 = {}
# Learn a frontier for outlier detection with several classifiers
xx1, yy1 = np.meshgrid(np.linspace(-8, 28, 500), np.linspace(3, 40, 500))
xx2, yy2 = np.meshgrid(np.linspace(3, 10, 500), np.linspace(-5, 45, 500))
for i, (clf_name, clf) in enumerate(classifiers.items()):
plt.figure(1)
clf.fit(X1)
Z1 = clf.decision_function(np.c_[xx1.ravel(), yy1.ravel()])
Z1 = Z1.reshape(xx1.shape)
legend1[clf_name] = plt.contour(
xx1, yy1, Z1, levels=[0], linewidths=2, colors=colors[i])
plt.figure(2)
clf.fit(X2)
Z2 = clf.decision_function(np.c_[xx2.ravel(), yy2.ravel()])
Z2 = Z2.reshape(xx2.shape)
legend2[clf_name] = plt.contour(
xx2, yy2, Z2, levels=[0], linewidths=2, colors=colors[i])
legend1_values_list = list( legend1.values() )
legend1_keys_list = list( legend1.keys() )
# Plot the results (= shape of the data points cloud)
plt.figure(1) # two clusters
plt.title("Outlier detection on a real data set (boston housing)")
plt.scatter(X1[:, 0], X1[:, 1], color='black')
bbox_args = dict(boxstyle="round", fc="0.8")
arrow_args = dict(arrowstyle="->")
plt.annotate("several confounded points", xy=(24, 19),
xycoords="data", textcoords="data",
xytext=(13, 10), bbox=bbox_args, arrowprops=arrow_args)
plt.xlim((xx1.min(), xx1.max()))
plt.ylim((yy1.min(), yy1.max()))
plt.legend((legend1_values_list[0].collections[0],
legend1_values_list[1].collections[0],
legend1_values_list[2].collections[0]),
(legend1_keys_list[0], legend1_keys_list[1], legend1_keys_list[2]),
loc="upper center",
prop=matplotlib.font_manager.FontProperties(size=12))
plt.ylabel("accessibility to radial highways")
plt.xlabel("pupil-teacher ratio by town")
legend2_values_list = list( legend2.values() )
legend2_keys_list = list( legend2.keys() )
plt.figure(2) # "banana" shape
plt.title("Outlier detection on a real data set (boston housing)")
plt.scatter(X2[:, 0], X2[:, 1], color='black')
plt.xlim((xx2.min(), xx2.max()))
plt.ylim((yy2.min(), yy2.max()))
plt.legend((legend2_values_list[0].collections[0],
legend2_values_list[1].collections[0],
legend2_values_list[2].collections[0]),
(legend2_values_list[0], legend2_values_list[1], legend2_values_list[2]),
loc="upper center",
prop=matplotlib.font_manager.FontProperties(size=12))
plt.ylabel("% lower status of the population")
plt.xlabel("average number of rooms per dwelling")
plt.show()
| bsd-3-clause |
OGGM/oggm | oggm/core/massbalance.py | 1 | 50774 | """Mass-balance models"""
# Built ins
import logging
# External libs
import cftime
import numpy as np
import pandas as pd
import netCDF4
from scipy.interpolate import interp1d
from scipy import optimize as optimization
# Locals
import oggm.cfg as cfg
from oggm.cfg import SEC_IN_YEAR, SEC_IN_MONTH
from oggm.utils import (SuperclassMeta, lazy_property, floatyear_to_date,
date_to_floatyear, monthly_timeseries, ncDataset,
tolist, clip_min, clip_max, clip_array)
from oggm.exceptions import InvalidWorkflowError, InvalidParamsError
from oggm import entity_task
# Module logger
log = logging.getLogger(__name__)
class MassBalanceModel(object, metaclass=SuperclassMeta):
"""Interface and common logic for all mass balance models used in OGGM.
All mass-balance models should implement this interface.
Attributes
----------
valid_bounds : [float, float]
The altitudinal bounds where the MassBalanceModel is valid. This is
necessary for automated ELA search.
"""
def __init__(self):
""" Initialize."""
self.valid_bounds = None
self.hemisphere = None
self.rho = cfg.PARAMS['ice_density']
def __repr__(self):
"""String Representation of the mass-balance model"""
summary = ['<oggm.MassBalanceModel>']
summary += [' Class: ' + self.__class__.__name__]
summary += [' Attributes:']
# Add all scalar attributes
for k, v in self.__dict__.items():
if np.isscalar(v) and not k.startswith('_'):
summary += [' - {}: {}'.format(k, v)]
return '\n'.join(summary) + '\n'
# TODO: remove this in OGGM v1.5
@property
def prcp_bias(self):
raise AttributeError('prcp_bias has been renamed to prcp_fac as it is '
'a multiplicative factor, please use prcp_fac '
'instead.')
@prcp_bias.setter
def prcp_bias(self, new_prcp_fac):
raise AttributeError('prcp_bias has been renamed to prcp_fac as it is '
'a multiplicative factor. If you want to '
'change the precipitation scaling factor use '
'prcp_fac instead.')
def get_monthly_mb(self, heights, year=None, fl_id=None, fls=None):
"""Monthly mass-balance at given altitude(s) for a moment in time.
Units: [m s-1], or meters of ice per second
Note: `year` is optional because some simpler models have no time
component.
Parameters
----------
heights: ndarray
the atitudes at which the mass-balance will be computed
year: float, optional
the time (in the "hydrological floating year" convention)
fl_id: float, optional
the index of the flowline in the fls array (might be ignored
by some MB models)
fls: list of flowline instances, optional
the flowlines array, in case the MB model implementation needs
to know details about the glacier geometry at the moment the
MB model is called
Returns
-------
the mass-balance (same dim as `heights`) (units: [m s-1])
"""
raise NotImplementedError()
def get_annual_mb(self, heights, year=None, fl_id=None, fls=None):
"""Like `self.get_monthly_mb()`, but for annual MB.
For some simpler mass-balance models ``get_monthly_mb()` and
`get_annual_mb()`` can be equivalent.
Units: [m s-1], or meters of ice per second
Note: `year` is optional because some simpler models have no time
component.
Parameters
----------
heights: ndarray
the altitudes at which the mass-balance will be computed
year: float, optional
the time (in the "floating year" convention)
fl_id: float, optional
the index of the flowline in the fls array (might be ignored
by some MB models)
fls: list of flowline instances, optional
the flowlines array, in case the MB model implementation needs
to know details about the glacier geometry at the moment the
MB model is called
Returns
-------
the mass-balance (same dim as `heights`) (units: [m s-1])
"""
raise NotImplementedError()
def get_specific_mb(self, heights=None, widths=None, fls=None,
year=None):
"""Specific mb for this year and a specific glacier geometry.
Units: [mm w.e. yr-1], or millimeter water equivalent per year
Parameters
----------
heights: ndarray
the altitudes at which the mass-balance will be computed.
Overridden by ``fls`` if provided
widths: ndarray
the widths of the flowline (necessary for the weighted average).
Overridden by ``fls`` if provided
fls: list of flowline instances, optional
Another way to get heights and widths - overrides them if
provided.
year: float, optional
the time (in the "hydrological floating year" convention)
Returns
-------
the specific mass-balance (units: mm w.e. yr-1)
"""
if len(np.atleast_1d(year)) > 1:
out = [self.get_specific_mb(heights=heights, widths=widths,
fls=fls, year=yr)
for yr in year]
return np.asarray(out)
if fls is not None:
mbs = []
widths = []
for i, fl in enumerate(fls):
_widths = fl.widths
try:
# For rect and parabola don't compute spec mb
_widths = np.where(fl.thick > 0, _widths, 0)
except AttributeError:
pass
widths = np.append(widths, _widths)
mbs = np.append(mbs, self.get_annual_mb(fl.surface_h,
fls=fls, fl_id=i,
year=year))
else:
mbs = self.get_annual_mb(heights, year=year)
return np.average(mbs, weights=widths) * SEC_IN_YEAR * self.rho
def get_ela(self, year=None, **kwargs):
"""Compute the equilibrium line altitude for this year
Parameters
----------
year: float, optional
the time (in the "hydrological floating year" convention)
**kwargs: any other keyword argument accepted by self.get_annual_mb
Returns
-------
the equilibrium line altitude (ELA, units: m)
"""
if len(np.atleast_1d(year)) > 1:
return np.asarray([self.get_ela(year=yr, **kwargs) for yr in year])
if self.valid_bounds is None:
raise ValueError('attribute `valid_bounds` needs to be '
'set for the ELA computation.')
# Check for invalid ELAs
b0, b1 = self.valid_bounds
if (np.any(~np.isfinite(
self.get_annual_mb([b0, b1], year=year, **kwargs))) or
(self.get_annual_mb([b0], year=year, **kwargs)[0] > 0) or
(self.get_annual_mb([b1], year=year, **kwargs)[0] < 0)):
return np.NaN
def to_minimize(x):
return (self.get_annual_mb([x], year=year, **kwargs)[0] *
SEC_IN_YEAR * self.rho)
return optimization.brentq(to_minimize, *self.valid_bounds, xtol=0.1)
class ScalarMassBalance(MassBalanceModel):
"""Constant mass-balance, everywhere."""
def __init__(self, mb=0.):
""" Initialize.
Parameters
----------
mb: float
Fix the mass balance to a certain value (unit: [mm w.e. yr-1])
"""
super(ScalarMassBalance, self).__init__()
self.hemisphere = 'nh'
self.valid_bounds = [-2e4, 2e4] # in m
self._mb = mb
def get_monthly_mb(self, heights, **kwargs):
mb = np.asarray(heights) * 0 + self._mb
return mb / SEC_IN_YEAR / self.rho
def get_annual_mb(self, heights, **kwargs):
mb = np.asarray(heights) * 0 + self._mb
return mb / SEC_IN_YEAR / self.rho
class LinearMassBalance(MassBalanceModel):
"""Constant mass-balance as a linear function of altitude.
"""
def __init__(self, ela_h, grad=3., max_mb=None):
""" Initialize.
Parameters
----------
ela_h: float
Equilibrium line altitude (units: [m])
grad: float
Mass-balance gradient (unit: [mm w.e. yr-1 m-1])
max_mb: float
Cap the mass balance to a certain value (unit: [mm w.e. yr-1])
Attributes
----------
temp_bias : float, default 0
A "temperature bias" doesn't makes much sense in the linear MB
context, but we implemented a simple empirical rule:
+ 1K -> ELA + 150 m
"""
super(LinearMassBalance, self).__init__()
self.hemisphere = 'nh'
self.valid_bounds = [-1e4, 2e4] # in m
self.orig_ela_h = ela_h
self.ela_h = ela_h
self.grad = grad
self.max_mb = max_mb
self._temp_bias = 0
@property
def temp_bias(self):
"""Temperature bias to add to the original series."""
return self._temp_bias
@temp_bias.setter
def temp_bias(self, value):
"""Temperature bias to change the ELA."""
self.ela_h = self.orig_ela_h + value * 150
self._temp_bias = value
def get_monthly_mb(self, heights, **kwargs):
mb = (np.asarray(heights) - self.ela_h) * self.grad
if self.max_mb is not None:
clip_max(mb, self.max_mb, out=mb)
return mb / SEC_IN_YEAR / self.rho
def get_annual_mb(self, heights, **kwargs):
return self.get_monthly_mb(heights, **kwargs)
class PastMassBalance(MassBalanceModel):
"""Mass balance during the climate data period."""
def __init__(self, gdir, mu_star=None, bias=None,
filename='climate_historical', input_filesuffix='',
repeat=False, ys=None, ye=None, check_calib_params=True):
"""Initialize.
Parameters
----------
gdir : GlacierDirectory
the glacier directory
mu_star : float, optional
set to the alternative value of mu* you want to use
(the default is to use the calibrated value).
bias : float, optional
set to the alternative value of the calibration bias [mm we yr-1]
you want to use (the default is to use the calibrated value)
Note that this bias is *substracted* from the computed MB. Indeed:
BIAS = MODEL_MB - REFERENCE_MB.
filename : str, optional
set to a different BASENAME if you want to use alternative climate
data.
input_filesuffix : str
the file suffix of the input climate file
repeat : bool
Whether the climate period given by [ys, ye] should be repeated
indefinitely in a circular way
ys : int
The start of the climate period where the MB model is valid
(default: the period with available data)
ye : int
The end of the climate period where the MB model is valid
(default: the period with available data)
check_calib_params : bool
OGGM will try hard not to use wrongly calibrated mu* by checking
the parameters used during calibration and the ones you are
using at run time. If they don't match, it will raise an error.
Set to False to suppress this check.
Attributes
----------
temp_bias : float, default 0
Add a temperature bias to the time series
prcp_fac : float, default cfg.PARAMS['prcp_scaling_factor']
Precipitation factor to the time series (called factor to make clear
that it is a multiplicative factor in contrast to the additive
`temp_bias`)
"""
super(PastMassBalance, self).__init__()
self.valid_bounds = [-1e4, 2e4] # in m
if mu_star is None:
df = gdir.read_json('local_mustar')
mu_star = df['mu_star_glacierwide']
if check_calib_params:
if not df['mu_star_allsame']:
msg = ('You seem to use the glacier-wide mu* to compute '
'the mass-balance although this glacier has '
'different mu* for its flowlines. Set '
'`check_calib_params=False` to prevent this '
'error.')
raise InvalidWorkflowError(msg)
if bias is None:
if cfg.PARAMS['use_bias_for_run']:
df = gdir.read_json('local_mustar')
bias = df['bias']
else:
bias = 0.
self.mu_star = mu_star
self.bias = bias
# Parameters
self.t_solid = cfg.PARAMS['temp_all_solid']
self.t_liq = cfg.PARAMS['temp_all_liq']
self.t_melt = cfg.PARAMS['temp_melt']
prcp_fac = cfg.PARAMS['prcp_scaling_factor']
# check if valid prcp_fac is used
if prcp_fac <= 0:
raise InvalidParamsError('prcp_fac has to be above zero!')
default_grad = cfg.PARAMS['temp_default_gradient']
# Check the climate related params to the GlacierDir to make sure
if check_calib_params:
mb_calib = gdir.get_climate_info()['mb_calib_params']
for k, v in mb_calib.items():
if v != cfg.PARAMS[k]:
msg = ('You seem to use different mass-balance parameters '
'than used for the calibration. Set '
'`check_calib_params=False` to ignore this '
'warning.')
raise InvalidWorkflowError(msg)
# Public attrs
self.hemisphere = gdir.hemisphere
self.repeat = repeat
# Private attrs
# to allow prcp_fac to be changed after instantiation
# prescribe the prcp_fac as it is instantiated
self._prcp_fac = prcp_fac
# same for temp bias
self._temp_bias = 0.
# Read file
fpath = gdir.get_filepath(filename, filesuffix=input_filesuffix)
with ncDataset(fpath, mode='r') as nc:
# time
time = nc.variables['time']
try:
time = netCDF4.num2date(time[:], time.units)
except ValueError:
# This is for longer time series
time = cftime.num2date(time[:], time.units, calendar='noleap')
ny, r = divmod(len(time), 12)
if r != 0:
raise ValueError('Climate data should be N full years')
# This is where we switch to hydro float year format
# Last year gives the tone of the hydro year
self.years = np.repeat(np.arange(time[-1].year-ny+1,
time[-1].year+1), 12)
self.months = np.tile(np.arange(1, 13), ny)
# Read timeseries and correct it
self.temp = nc.variables['temp'][:].astype(np.float64) + self._temp_bias
self.prcp = nc.variables['prcp'][:].astype(np.float64) * self._prcp_fac
if 'gradient' in nc.variables:
grad = nc.variables['gradient'][:].astype(np.float64)
# Security for stuff that can happen with local gradients
g_minmax = cfg.PARAMS['temp_local_gradient_bounds']
grad = np.where(~np.isfinite(grad), default_grad, grad)
grad = clip_array(grad, g_minmax[0], g_minmax[1])
else:
grad = self.prcp * 0 + default_grad
self.grad = grad
self.ref_hgt = nc.ref_hgt
self.ys = self.years[0] if ys is None else ys
self.ye = self.years[-1] if ye is None else ye
# adds the possibility of changing prcp_fac
# after instantiation with properly changing the prcp time series
@property
def prcp_fac(self):
return self._prcp_fac
@prcp_fac.setter
def prcp_fac(self, new_prcp_fac):
# just to check that no invalid prcp_factors are used
if np.any(np.asarray(new_prcp_fac) <= 0):
raise InvalidParamsError('prcp_fac has to be above zero!')
if len(np.atleast_1d(new_prcp_fac)) == 12:
# OK so that's monthly stuff
# We dirtily assume that user just used calendar month
sm = cfg.PARAMS['hydro_month_' + self.hemisphere]
new_prcp_fac = np.roll(new_prcp_fac, 13 - sm)
new_prcp_fac = np.tile(new_prcp_fac, len(self.prcp) // 12)
self.prcp *= new_prcp_fac / self._prcp_fac
# update old prcp_fac in order that it can be updated again ...
self._prcp_fac = new_prcp_fac
# same for temp_bias:
@property
def temp_bias(self):
return self._temp_bias
@temp_bias.setter
def temp_bias(self, new_temp_bias):
if len(np.atleast_1d(new_temp_bias)) == 12:
# OK so that's monthly stuff
# We dirtily assume that user just used calendar month
sm = cfg.PARAMS['hydro_month_' + self.hemisphere]
new_temp_bias = np.roll(new_temp_bias, 13 - sm)
new_temp_bias = np.tile(new_temp_bias, len(self.temp) // 12)
self.temp += new_temp_bias - self._temp_bias
# update old temp_bias in order that it can be updated again ...
self._temp_bias = new_temp_bias
def get_monthly_climate(self, heights, year=None):
"""Monthly climate information at given heights.
Note that prcp is corrected with the precipitation factor and that
all other model biases (temp and prcp) are applied.
Returns
-------
(temp, tempformelt, prcp, prcpsol)
"""
y, m = floatyear_to_date(year)
if self.repeat:
y = self.ys + (y - self.ys) % (self.ye - self.ys + 1)
if y < self.ys or y > self.ye:
raise ValueError('year {} out of the valid time bounds: '
'[{}, {}]'.format(y, self.ys, self.ye))
pok = np.where((self.years == y) & (self.months == m))[0][0]
# Read already (temperature bias and precipitation factor corrected!)
itemp = self.temp[pok]
iprcp = self.prcp[pok]
igrad = self.grad[pok]
# For each height pixel:
# Compute temp and tempformelt (temperature above melting threshold)
npix = len(heights)
temp = np.ones(npix) * itemp + igrad * (heights - self.ref_hgt)
tempformelt = temp - self.t_melt
clip_min(tempformelt, 0, out=tempformelt)
# Compute solid precipitation from total precipitation
prcp = np.ones(npix) * iprcp
fac = 1 - (temp - self.t_solid) / (self.t_liq - self.t_solid)
prcpsol = prcp * clip_array(fac, 0, 1)
return temp, tempformelt, prcp, prcpsol
def _get_2d_annual_climate(self, heights, year):
# Avoid code duplication with a getter routine
year = np.floor(year)
if self.repeat:
year = self.ys + (year - self.ys) % (self.ye - self.ys + 1)
if year < self.ys or year > self.ye:
raise ValueError('year {} out of the valid time bounds: '
'[{}, {}]'.format(year, self.ys, self.ye))
pok = np.where(self.years == year)[0]
if len(pok) < 1:
raise ValueError('Year {} not in record'.format(int(year)))
# Read already (temperature bias and precipitation factor corrected!)
itemp = self.temp[pok]
iprcp = self.prcp[pok]
igrad = self.grad[pok]
# For each height pixel:
# Compute temp and tempformelt (temperature above melting threshold)
heights = np.asarray(heights)
npix = len(heights)
grad_temp = np.atleast_2d(igrad).repeat(npix, 0)
grad_temp *= (heights.repeat(12).reshape(grad_temp.shape) -
self.ref_hgt)
temp2d = np.atleast_2d(itemp).repeat(npix, 0) + grad_temp
temp2dformelt = temp2d - self.t_melt
clip_min(temp2dformelt, 0, out=temp2dformelt)
# Compute solid precipitation from total precipitation
prcp = np.atleast_2d(iprcp).repeat(npix, 0)
fac = 1 - (temp2d - self.t_solid) / (self.t_liq - self.t_solid)
prcpsol = prcp * clip_array(fac, 0, 1)
return temp2d, temp2dformelt, prcp, prcpsol
def get_annual_climate(self, heights, year=None):
"""Annual climate information at given heights.
Note that prcp is corrected with the precipitation factor and that
all other model biases (temp and prcp) are applied.
Returns
-------
(temp, tempformelt, prcp, prcpsol)
"""
t, tmelt, prcp, prcpsol = self._get_2d_annual_climate(heights, year)
return (t.mean(axis=1), tmelt.sum(axis=1),
prcp.sum(axis=1), prcpsol.sum(axis=1))
def get_monthly_mb(self, heights, year=None, add_climate=False, **kwargs):
t, tmelt, prcp, prcpsol = self.get_monthly_climate(heights, year=year)
mb_month = prcpsol - self.mu_star * tmelt
mb_month -= self.bias * SEC_IN_MONTH / SEC_IN_YEAR
if add_climate:
return (mb_month / SEC_IN_MONTH / self.rho, t, tmelt,
prcp, prcpsol)
return mb_month / SEC_IN_MONTH / self.rho
def get_annual_mb(self, heights, year=None, add_climate=False, **kwargs):
t, tmelt, prcp, prcpsol = self._get_2d_annual_climate(heights, year)
mb_annual = np.sum(prcpsol - self.mu_star * tmelt, axis=1)
mb_annual = (mb_annual - self.bias) / SEC_IN_YEAR / self.rho
if add_climate:
return (mb_annual, t.mean(axis=1), tmelt.sum(axis=1),
prcp.sum(axis=1), prcpsol.sum(axis=1))
return mb_annual
class ConstantMassBalance(MassBalanceModel):
"""Constant mass-balance during a chosen period.
This is useful for equilibrium experiments.
"""
def __init__(self, gdir, mu_star=None, bias=None,
y0=None, halfsize=15, filename='climate_historical',
input_filesuffix='', **kwargs):
"""Initialize
Parameters
----------
gdir : GlacierDirectory
the glacier directory
mu_star : float, optional
set to the alternative value of mu* you want to use
(the default is to use the calibrated value)
bias : float, optional
set to the alternative value of the annual bias [mm we yr-1]
you want to use (the default is to use the calibrated value)
y0 : int, optional, default: tstar
the year at the center of the period of interest. The default
is to use tstar as center.
halfsize : int, optional
the half-size of the time window (window size = 2 * halfsize + 1)
filename : str, optional
set to a different BASENAME if you want to use alternative climate
data.
input_filesuffix : str
the file suffix of the input climate file
"""
super(ConstantMassBalance, self).__init__()
self.mbmod = PastMassBalance(gdir, mu_star=mu_star, bias=bias,
filename=filename,
input_filesuffix=input_filesuffix,
**kwargs)
if y0 is None:
df = gdir.read_json('local_mustar')
y0 = df['t_star']
# This is a quick'n dirty optimisation
try:
fls = gdir.read_pickle('model_flowlines')
h = []
for fl in fls:
# We use bed because of overdeepenings
h = np.append(h, fl.bed_h)
h = np.append(h, fl.surface_h)
zminmax = np.round([np.min(h)-50, np.max(h)+2000])
except FileNotFoundError:
# in case we don't have them
with ncDataset(gdir.get_filepath('gridded_data')) as nc:
if np.isfinite(nc.min_h_dem):
# a bug sometimes led to non-finite
zminmax = [nc.min_h_dem-250, nc.max_h_dem+1500]
else:
zminmax = [nc.min_h_glacier-1250, nc.max_h_glacier+1500]
self.hbins = np.arange(*zminmax, step=10)
self.valid_bounds = self.hbins[[0, -1]]
self.y0 = y0
self.halfsize = halfsize
self.years = np.arange(y0-halfsize, y0+halfsize+1)
self.hemisphere = gdir.hemisphere
@property
def temp_bias(self):
"""Temperature bias to add to the original series."""
return self.mbmod.temp_bias
@temp_bias.setter
def temp_bias(self, value):
"""Temperature bias to add to the original series."""
for attr_name in ['_lazy_interp_yr', '_lazy_interp_m']:
if hasattr(self, attr_name):
delattr(self, attr_name)
self.mbmod.temp_bias = value
@property
def prcp_fac(self):
"""Precipitation factor to apply to the original series."""
return self.mbmod.prcp_fac
@prcp_fac.setter
def prcp_fac(self, value):
"""Precipitation factor to apply to the original series."""
for attr_name in ['_lazy_interp_yr', '_lazy_interp_m']:
if hasattr(self, attr_name):
delattr(self, attr_name)
self.mbmod.prcp_fac = value
@property
def bias(self):
"""Residual bias to apply to the original series."""
return self.mbmod.bias
@bias.setter
def bias(self, value):
"""Residual bias to apply to the original series."""
self.mbmod.bias = value
@lazy_property
def interp_yr(self):
# annual MB
mb_on_h = self.hbins*0.
for yr in self.years:
mb_on_h += self.mbmod.get_annual_mb(self.hbins, year=yr)
return interp1d(self.hbins, mb_on_h / len(self.years))
@lazy_property
def interp_m(self):
# monthly MB
months = np.arange(12)+1
interp_m = []
for m in months:
mb_on_h = self.hbins*0.
for yr in self.years:
yr = date_to_floatyear(yr, m)
mb_on_h += self.mbmod.get_monthly_mb(self.hbins, year=yr)
interp_m.append(interp1d(self.hbins, mb_on_h / len(self.years)))
return interp_m
def get_monthly_climate(self, heights, year=None):
"""Average climate information at given heights.
Note that prcp is corrected with the precipitation factor and that
all other biases (precipitation, temp) are applied
Returns
-------
(temp, tempformelt, prcp, prcpsol)
"""
_, m = floatyear_to_date(year)
yrs = [date_to_floatyear(y, m) for y in self.years]
heights = np.atleast_1d(heights)
nh = len(heights)
shape = (len(yrs), nh)
temp = np.zeros(shape)
tempformelt = np.zeros(shape)
prcp = np.zeros(shape)
prcpsol = np.zeros(shape)
for i, yr in enumerate(yrs):
t, tm, p, ps = self.mbmod.get_monthly_climate(heights, year=yr)
temp[i, :] = t
tempformelt[i, :] = tm
prcp[i, :] = p
prcpsol[i, :] = ps
return (np.mean(temp, axis=0),
np.mean(tempformelt, axis=0),
np.mean(prcp, axis=0),
np.mean(prcpsol, axis=0))
def get_annual_climate(self, heights, year=None):
"""Average climate information at given heights.
Note that prcp is corrected with the precipitation factor and that
all other biases (precipitation, temp) are applied
Returns
-------
(temp, tempformelt, prcp, prcpsol)
"""
yrs = monthly_timeseries(self.years[0], self.years[-1],
include_last_year=True)
heights = np.atleast_1d(heights)
nh = len(heights)
shape = (len(yrs), nh)
temp = np.zeros(shape)
tempformelt = np.zeros(shape)
prcp = np.zeros(shape)
prcpsol = np.zeros(shape)
for i, yr in enumerate(yrs):
t, tm, p, ps = self.mbmod.get_monthly_climate(heights, year=yr)
temp[i, :] = t
tempformelt[i, :] = tm
prcp[i, :] = p
prcpsol[i, :] = ps
# Note that we do not weight for number of days per month:
# this is consistent with OGGM's calendar
return (np.mean(temp, axis=0),
np.mean(tempformelt, axis=0) * 12,
np.mean(prcp, axis=0) * 12,
np.mean(prcpsol, axis=0) * 12)
def get_monthly_mb(self, heights, year=None, add_climate=False, **kwargs):
yr, m = floatyear_to_date(year)
if add_climate:
t, tmelt, prcp, prcpsol = self.get_monthly_climate(heights, year=year)
return self.interp_m[m-1](heights), t, tmelt, prcp, prcpsol
return self.interp_m[m-1](heights)
def get_annual_mb(self, heights, year=None, add_climate=False, **kwargs):
mb = self.interp_yr(heights)
if add_climate:
t, tmelt, prcp, prcpsol = self.get_annual_climate(heights)
return mb, t, tmelt, prcp, prcpsol
return mb
class RandomMassBalance(MassBalanceModel):
"""Random shuffle of all MB years within a given time period.
This is useful for finding a possible past glacier state or for sensitivity
experiments.
Note that this is going to be sensitive to extreme years in certain
periods, but it is by far more physically reasonable than other
approaches based on gaussian assumptions.
"""
def __init__(self, gdir, mu_star=None, bias=None,
y0=None, halfsize=15, seed=None,
filename='climate_historical', input_filesuffix='',
all_years=False, unique_samples=False, **kwargs):
"""Initialize.
Parameters
----------
gdir : GlacierDirectory
the glacier directory
mu_star : float, optional
set to the alternative value of mu* you want to use
(the default is to use the calibrated value)
bias : float, optional
set to the alternative value of the calibration bias [mm we yr-1]
you want to use (the default is to use the calibrated value)
Note that this bias is *substracted* from the computed MB. Indeed:
BIAS = MODEL_MB - REFERENCE_MB.
y0 : int, optional, default: tstar
the year at the center of the period of interest. The default
is to use tstar as center.
halfsize : int, optional
the half-size of the time window (window size = 2 * halfsize + 1)
seed : int, optional
Random seed used to initialize the pseudo-random number generator.
filename : str, optional
set to a different BASENAME if you want to use alternative climate
data.
input_filesuffix : str
the file suffix of the input climate file
all_years : bool
if True, overwrites ``y0`` and ``halfsize`` to use all available
years.
unique_samples: bool
if true, chosen random mass-balance years will only be available
once per random climate period-length
if false, every model year will be chosen from the random climate
period with the same probability
**kwargs:
kyeword arguments to pass to the PastMassBalance model
"""
super(RandomMassBalance, self).__init__()
self.valid_bounds = [-1e4, 2e4] # in m
self.mbmod = PastMassBalance(gdir, mu_star=mu_star, bias=bias,
filename=filename,
input_filesuffix=input_filesuffix,
**kwargs)
# Climate period
if all_years:
self.years = self.mbmod.years
else:
if y0 is None:
df = gdir.read_json('local_mustar')
y0 = df['t_star']
self.years = np.arange(y0-halfsize, y0+halfsize+1)
self.yr_range = (self.years[0], self.years[-1]+1)
self.ny = len(self.years)
self.hemisphere = gdir.hemisphere
# RandomState
self.rng = np.random.RandomState(seed)
self._state_yr = dict()
# Sampling without replacement
self.unique_samples = unique_samples
if self.unique_samples:
self.sampling_years = self.years
@property
def temp_bias(self):
"""Temperature bias to add to the original series."""
return self.mbmod.temp_bias
@temp_bias.setter
def temp_bias(self, value):
"""Temperature bias to add to the original series."""
for attr_name in ['_lazy_interp_yr', '_lazy_interp_m']:
if hasattr(self, attr_name):
delattr(self, attr_name)
self.mbmod.temp_bias = value
@property
def prcp_fac(self):
"""Precipitation factor to apply to the original series."""
return self.mbmod.prcp_fac
@prcp_fac.setter
def prcp_fac(self, value):
"""Precipitation factor to apply to the original series."""
for attr_name in ['_lazy_interp_yr', '_lazy_interp_m']:
if hasattr(self, attr_name):
delattr(self, attr_name)
self.mbmod.prcp_fac = value
@property
def bias(self):
"""Residual bias to apply to the original series."""
return self.mbmod.bias
@bias.setter
def bias(self, value):
"""Residual bias to apply to the original series."""
self.mbmod.bias = value
def get_state_yr(self, year=None):
"""For a given year, get the random year associated to it."""
year = int(year)
if year not in self._state_yr:
if self.unique_samples:
# --- Sampling without replacement ---
if self.sampling_years.size == 0:
# refill sample pool when all years were picked once
self.sampling_years = self.years
# choose one year which was not used in the current period
_sample = self.rng.choice(self.sampling_years)
# write chosen year to dictionary
self._state_yr[year] = _sample
# update sample pool: remove the chosen year from it
self.sampling_years = np.delete(
self.sampling_years,
np.where(self.sampling_years == _sample))
else:
# --- Sampling with replacement ---
self._state_yr[year] = self.rng.randint(*self.yr_range)
return self._state_yr[year]
def get_monthly_mb(self, heights, year=None, **kwargs):
ryr, m = floatyear_to_date(year)
ryr = date_to_floatyear(self.get_state_yr(ryr), m)
return self.mbmod.get_monthly_mb(heights, year=ryr, **kwargs)
def get_annual_mb(self, heights, year=None, **kwargs):
ryr = self.get_state_yr(int(year))
return self.mbmod.get_annual_mb(heights, year=ryr, **kwargs)
class UncertainMassBalance(MassBalanceModel):
"""Adding uncertainty to a mass balance model.
There are three variables for which you can add uncertainty:
- temperature (additive bias)
- precipitation (multiplicative factor)
- residual (a bias in units of MB)
"""
def __init__(self, basis_model,
rdn_temp_bias_seed=None, rdn_temp_bias_sigma=0.1,
rdn_prcp_bias_seed=None, rdn_prcp_bias_sigma=0.1,
rdn_bias_seed=None, rdn_bias_sigma=100):
"""Initialize.
Parameters
----------
basis_model : MassBalanceModel
the model to which you want to add the uncertainty to
rdn_temp_bias_seed : int
the seed of the random number generator
rdn_temp_bias_sigma : float
the standard deviation of the random temperature error
rdn_prcp_bias_seed : int
the seed of the random number generator
(to be consistent this should be renamed prcp_fac as well)
rdn_prcp_bias_sigma : float
the standard deviation of the random precipitation error
(to be consistent this should be renamed prcp_fac as well)
rdn_bias_seed : int
the seed of the random number generator
rdn_bias_sigma : float
the standard deviation of the random MB error
"""
super(UncertainMassBalance, self).__init__()
# the aim here is to change temp_bias and prcp_fac so
self.mbmod = basis_model
self.hemisphere = basis_model.hemisphere
self.valid_bounds = self.mbmod.valid_bounds
self.rng_temp = np.random.RandomState(rdn_temp_bias_seed)
self.rng_prcp = np.random.RandomState(rdn_prcp_bias_seed)
self.rng_bias = np.random.RandomState(rdn_bias_seed)
self._temp_sigma = rdn_temp_bias_sigma
self._prcp_sigma = rdn_prcp_bias_sigma
self._bias_sigma = rdn_bias_sigma
self._state_temp = dict()
self._state_prcp = dict()
self._state_bias = dict()
@property
def temp_bias(self):
"""Temperature bias to add to the original series."""
return self.mbmod.temp_bias
@temp_bias.setter
def temp_bias(self, value):
"""Temperature bias to add to the original series."""
for attr_name in ['_lazy_interp_yr', '_lazy_interp_m']:
if hasattr(self, attr_name):
delattr(self, attr_name)
self.mbmod.temp_bias = value
@property
def prcp_fac(self):
"""Precipitation factor to apply to the original series."""
return self.mbmod.prcp_fac
@prcp_fac.setter
def prcp_fac(self, value):
"""Precipitation factor to apply to the original series."""
self.mbmod.prcp_fac = value
def _get_state_temp(self, year):
year = int(year)
if year not in self._state_temp:
self._state_temp[year] = self.rng_temp.randn() * self._temp_sigma
return self._state_temp[year]
def _get_state_prcp(self, year):
year = int(year)
if year not in self._state_prcp:
self._state_prcp[year] = self.rng_prcp.randn() * self._prcp_sigma
return self._state_prcp[year]
def _get_state_bias(self, year):
year = int(year)
if year not in self._state_bias:
self._state_bias[year] = self.rng_bias.randn() * self._bias_sigma
return self._state_bias[year]
def get_monthly_mb(self, heights, year=None, **kwargs):
raise NotImplementedError()
def get_annual_mb(self, heights, year=None, fl_id=None, **kwargs):
# Keep the original biases and add a random error
_t = self.mbmod.temp_bias
_p = self.mbmod.prcp_fac
_b = self.mbmod.bias
self.mbmod.temp_bias = self._get_state_temp(year) + _t
self.mbmod.prcp_fac = self._get_state_prcp(year) + _p
self.mbmod.bias = self._get_state_bias(year) + _b
try:
out = self.mbmod.get_annual_mb(heights, year=year, fl_id=fl_id)
except BaseException:
self.mbmod.temp_bias = _t
self.mbmod.prcp_fac = _p
self.mbmod.bias = _b
raise
# Back to normal
self.mbmod.temp_bias = _t
self.mbmod.prcp_fac = _p
self.mbmod.bias = _b
return out
class MultipleFlowlineMassBalance(MassBalanceModel):
"""Handle mass-balance at the glacier level instead of flowline level.
Convenience class doing not much more than wrapping a list of mass-balance
models, one for each flowline.
This is useful for real-case studies, where each flowline might have a
different mu*.
Attributes
----------
fls : list
list of flowline objects
mb_models : list
list of mass-balance objects
"""
def __init__(self, gdir, fls=None, mu_star=None,
mb_model_class=PastMassBalance, use_inversion_flowlines=False,
input_filesuffix='', bias=None, **kwargs):
"""Initialize.
Parameters
----------
gdir : GlacierDirectory
the glacier directory
mu_star : float or list of floats, optional
set to the alternative value of mu* you want to use
(the default is to use the calibrated value). Give a list of values
for flowline-specific mu*
fls : list, optional
list of flowline objects to use (defaults to 'model_flowlines',
and if not available, to 'inversion_flowlines')
mb_model_class : class, optional
the mass-balance model to use (e.g. PastMassBalance,
ConstantMassBalance...)
use_inversion_flowlines: bool, optional
if True 'inversion_flowlines' instead of 'model_flowlines' will be
used.
input_filesuffix : str
the file suffix of the input climate file
bias : float, optional
set to the alternative value of the calibration bias [mm we yr-1]
you want to use (the default is to use the calibrated value)
Note that this bias is *substracted* from the computed MB. Indeed:
BIAS = MODEL_MB - REFERENCE_MB.
kwargs : kwargs to pass to mb_model_class
"""
# Read in the flowlines
if use_inversion_flowlines:
fls = gdir.read_pickle('inversion_flowlines')
if fls is None:
try:
fls = gdir.read_pickle('model_flowlines')
except FileNotFoundError:
raise InvalidWorkflowError('Need a valid `model_flowlines` '
'file. If you explicitly want to '
'use `inversion_flowlines`, set '
'use_inversion_flowlines=True.')
self.fls = fls
_y0 = kwargs.get('y0', None)
# User mu*?
if mu_star is not None:
mu_star = tolist(mu_star, length=len(fls))
for fl, mu in zip(self.fls, mu_star):
fl.mu_star = mu
# Initialise the mb models
self.flowline_mb_models = []
for fl in self.fls:
# Merged glaciers will need different climate files, use filesuffix
if (fl.rgi_id is not None) and (fl.rgi_id != gdir.rgi_id):
rgi_filesuffix = '_' + fl.rgi_id + input_filesuffix
else:
rgi_filesuffix = input_filesuffix
# merged glaciers also have a different MB bias from calibration
if ((bias is None) and cfg.PARAMS['use_bias_for_run'] and
(fl.rgi_id != gdir.rgi_id)):
df = gdir.read_json('local_mustar', filesuffix='_' + fl.rgi_id)
fl_bias = df['bias']
else:
fl_bias = bias
# Constant and RandomMassBalance need y0 if not provided
if (issubclass(mb_model_class, RandomMassBalance) or
issubclass(mb_model_class, ConstantMassBalance)) and (
fl.rgi_id != gdir.rgi_id) and (_y0 is None):
df = gdir.read_json('local_mustar', filesuffix='_' + fl.rgi_id)
kwargs['y0'] = df['t_star']
self.flowline_mb_models.append(
mb_model_class(gdir, mu_star=fl.mu_star, bias=fl_bias,
input_filesuffix=rgi_filesuffix, **kwargs))
self.valid_bounds = self.flowline_mb_models[-1].valid_bounds
self.hemisphere = gdir.hemisphere
@property
def temp_bias(self):
"""Temperature bias to add to the original series."""
return self.flowline_mb_models[0].temp_bias
@temp_bias.setter
def temp_bias(self, value):
"""Temperature bias to add to the original series."""
for mbmod in self.flowline_mb_models:
mbmod.temp_bias = value
@property
def prcp_fac(self):
"""Precipitation factor to apply to the original series."""
return self.flowline_mb_models[0].prcp_fac
@prcp_fac.setter
def prcp_fac(self, value):
"""Precipitation factor to apply to the original series."""
for mbmod in self.flowline_mb_models:
mbmod.prcp_fac = value
@property
def bias(self):
"""Residual bias to apply to the original series."""
return self.flowline_mb_models[0].bias
@bias.setter
def bias(self, value):
"""Residual bias to apply to the original series."""
for mbmod in self.flowline_mb_models:
mbmod.bias = value
def get_monthly_mb(self, heights, year=None, fl_id=None, **kwargs):
if fl_id is None:
raise ValueError('`fl_id` is required for '
'MultipleFlowlineMassBalance!')
return self.flowline_mb_models[fl_id].get_monthly_mb(heights,
year=year,
**kwargs)
def get_annual_mb(self, heights, year=None, fl_id=None, **kwargs):
if fl_id is None:
raise ValueError('`fl_id` is required for '
'MultipleFlowlineMassBalance!')
return self.flowline_mb_models[fl_id].get_annual_mb(heights,
year=year,
**kwargs)
def get_annual_mb_on_flowlines(self, fls=None, year=None):
"""Get the MB on all points of the glacier at once.
Parameters
----------
fls: list, optional
the list of flowlines to get the mass-balance from. Defaults
to self.fls
year: float, optional
the time (in the "floating year" convention)
Returns
-------
Tuple of (heights, widths, mass_balance) 1D arrays
"""
if fls is None:
fls = self.fls
heights = []
widths = []
mbs = []
for i, fl in enumerate(fls):
h = fl.surface_h
heights = np.append(heights, h)
widths = np.append(widths, fl.widths)
mbs = np.append(mbs, self.get_annual_mb(h, year=year, fl_id=i))
return heights, widths, mbs
def get_specific_mb(self, heights=None, widths=None, fls=None,
year=None):
if heights is not None or widths is not None:
raise ValueError('`heights` and `widths` kwargs do not work with '
'MultipleFlowlineMassBalance!')
if fls is None:
fls = self.fls
if len(np.atleast_1d(year)) > 1:
out = [self.get_specific_mb(fls=fls, year=yr) for yr in year]
return np.asarray(out)
mbs = []
widths = []
for i, (fl, mb_mod) in enumerate(zip(self.fls, self.flowline_mb_models)):
_widths = fl.widths
try:
# For rect and parabola don't compute spec mb
_widths = np.where(fl.thick > 0, _widths, 0)
except AttributeError:
pass
widths = np.append(widths, _widths)
mb = mb_mod.get_annual_mb(fl.surface_h, year=year, fls=fls, fl_id=i)
mbs = np.append(mbs, mb * SEC_IN_YEAR * mb_mod.rho)
return np.average(mbs, weights=widths)
def get_ela(self, year=None, **kwargs):
# ELA here is not without ambiguity.
# We compute a mean weighted by area.
if len(np.atleast_1d(year)) > 1:
return np.asarray([self.get_ela(year=yr) for yr in year])
elas = []
areas = []
for fl_id, (fl, mb_mod) in enumerate(zip(self.fls,
self.flowline_mb_models)):
elas = np.append(elas, mb_mod.get_ela(year=year, fl_id=fl_id,
fls=self.fls))
areas = np.append(areas, np.sum(fl.widths))
return np.average(elas, weights=areas)
@entity_task(log)
def fixed_geometry_mass_balance(gdir, ys=None, ye=None, years=None,
monthly_step=False,
use_inversion_flowlines=True,
climate_filename='climate_historical',
climate_input_filesuffix=''):
"""Computes the mass-balance with climate input from e.g. CRU or a GCM.
Parameters
----------
gdir : :py:class:`oggm.GlacierDirectory`
the glacier directory to process
ys : int
start year of the model run (default: from the climate file)
date)
ye : int
end year of the model run (default: from the climate file)
years : array of ints
override ys and ye with the years of your choice
monthly_step : bool
whether to store the diagnostic data at a monthly time step or not
(default is yearly)
use_inversion_flowlines : bool
whether to use the inversion flowlines or the model flowlines
climate_filename : str
name of the climate file, e.g. 'climate_historical' (default) or
'gcm_data'
climate_input_filesuffix: str
filesuffix for the input climate file
"""
if monthly_step:
raise NotImplementedError('monthly_step not implemented yet')
mb = MultipleFlowlineMassBalance(gdir, mb_model_class=PastMassBalance,
filename=climate_filename,
use_inversion_flowlines=use_inversion_flowlines,
input_filesuffix=climate_input_filesuffix)
if years is None:
if ys is None:
ys = mb.flowline_mb_models[0].ys
if ye is None:
ye = mb.flowline_mb_models[0].ye
years = np.arange(ys, ye + 1)
odf = pd.Series(data=mb.get_specific_mb(year=years),
index=years)
return odf
| bsd-3-clause |
jklenzing/pysat | pysat/tests/test_omni_hro.py | 2 | 6792 | import datetime as dt
import numpy as np
import pandas as pds
import pysat
from pysat.instruments import omni_hro
class TestOMNICustom():
def setup(self):
"""Runs before every method to create a clean testing setup."""
# Load a test instrument
self.testInst = pysat.Instrument('pysat', 'testing', sat_id='12',
tag='1min', clean_level='clean')
self.testInst.load(2009, 1)
# Recast time in minutes rather than seconds
self.testInst.data.index = \
pds.Series([t + dt.timedelta(seconds=60-i) +
dt.timedelta(minutes=i)
for i, t in enumerate(self.testInst.data.index)])
# Add IMF data
self.testInst['BX_GSM'] = pds.Series([3.17384966, 5.98685138,
1.78749668, 0.38628409,
2.73080263, 1.58814078,
5.24880448, 3.92347300,
5.59494670, 0.93246592,
5.23676319, 1.14214992],
index=self.testInst.data.index)
self.testInst['BY_GSM'] = pds.Series([3.93531272, 2.50331246,
0.99765539, 1.07203600,
5.43752734, 5.10629137,
0.59588891, 2.19412638,
0.15550858, 3.75433603,
4.82323932, 3.61784563],
index=self.testInst.data.index)
self.testInst['BZ_GSM'] = pds.Series([3.94396168, 5.61163579,
4.02930788, 5.47347958,
5.69823962, 0.47219819,
1.47760461, 3.47187188,
4.12581021, 4.40641671,
2.87780562, 0.58539121],
index=self.testInst.data.index)
self.testInst['flow_speed'] = \
pds.Series([394.396168, 561.163579,
402.930788, 547.347958,
569.823962, 47.219819,
147.760461, 347.187188,
412.581021, 440.641671,
287.780562, 58.539121],
index=self.testInst.data.index)
def teardown(self):
"""Runs after every method to clean up previous testing."""
del self.testInst
# def test_time_shift_to_magnetic_poles(self):
# """Test time shift of OMNI data"""
# pysat.instruments.omni_hro.time_shift_to_magnetic_poles(self.testInst)
#
# assert True
def test_clock_angle(self):
""" Test clock angle."""
# Run the clock angle routine
omni_hro.calculate_clock_angle(self.testInst)
# Set test clock angle
test_angle = np.array([44.93710732, 24.04132437, 13.90673288,
11.08167359, 43.65882745, 84.71666707,
21.96325222, 32.29174675, 2.15855047,
40.43151704, 59.17741091, 80.80882619])
# Test the difference. There may be a 2 pi integer ambiguity
test_diff = abs(test_angle - self.testInst['clock_angle'])
assert np.all(test_diff < 1.0e-8)
def test_yz_plane_mag(self):
""" Test the Byz plane magnitude calculation."""
# Run the clock angle routine
omni_hro.calculate_clock_angle(self.testInst)
# Calculate plane magnitude
test_mag = np.array([5.57149172, 6.14467489, 4.15098040, 5.57747612,
7.87633407, 5.12807787, 1.59323538, 4.10707742,
4.12873986, 5.78891590, 5.61652942, 3.66489971])
# Test the difference
test_diff = abs(test_mag - self.testInst['BYZ_GSM'])
assert np.all(test_diff < 1.0e-8)
def test_yz_plane_cv(self):
""" Test the IMF steadiness CV calculation."""
# Run the clock angle and steadiness routines
omni_hro.calculate_clock_angle(self.testInst)
omni_hro.calculate_imf_steadiness(self.testInst, steady_window=5,
min_window_frac=0.8)
# Ensure the BYZ coefficient of variation is calculated correctly
byz_cv = np.array([np.nan, 0.158620, 0.229267, 0.239404, 0.469371,
0.470944, 0.495892, 0.384522, 0.396275, 0.208209,
0.221267, np.nan])
# Test the difference
test_diff = abs(byz_cv - self.testInst['BYZ_CV'])
assert test_diff[np.isnan(test_diff)].shape[0] == 2
assert np.all(test_diff[~np.isnan(test_diff)] < 1.0e-6)
assert np.all(np.isnan(self.testInst['BYZ_CV']) == np.isnan(byz_cv))
def test_clock_angle_std(self):
""" Test the IMF steadiness standard deviation calculation."""
# Run the clock angle and steadiness routines
omni_hro.calculate_clock_angle(self.testInst)
omni_hro.calculate_imf_steadiness(self.testInst, steady_window=5,
min_window_frac=0.8)
# Ensure the BYZ coefficient of variation is calculated correctly
ca_std = np.array([np.nan, 13.317200, 14.429278, 27.278579,
27.468469, 25.500730, 27.673033, 27.512069,
19.043833, 26.616713, 29.250390, np.nan])
# Test the difference
test_diff = abs(ca_std - self.testInst['clock_angle_std'])
assert test_diff[np.isnan(test_diff)].shape[0] == 2
assert np.all(test_diff[~np.isnan(test_diff)] < 1.0e-6)
assert np.all(np.isnan(self.testInst['clock_angle_std']) ==
np.isnan(ca_std))
def test_dayside_recon(self):
""" Test the IMF steadiness standard deviation calculation."""
# Run the clock angle and steadiness routines
omni_hro.calculate_clock_angle(self.testInst)
omni_hro.calculate_dayside_reconnection(self.testInst)
# Ensure the BYZ coefficient of variation is calculated correctly
rcon = np.array([698.297487, 80.233896, 3.033586, 2.216075,
1425.310083, 486.460306, 2.350339, 103.843722,
0.000720, 534.586320, 1464.596772, 388.974792])
# Test the difference
test_diff = abs(rcon - self.testInst['recon_day'])
assert test_diff.shape[0] == 12
assert np.all(test_diff < 1.0e-6)
| bsd-3-clause |
jjx02230808/project0223 | sklearn/decomposition/tests/test_truncated_svd.py | 73 | 6086 | """Test truncated SVD transformer."""
import numpy as np
import scipy.sparse as sp
from sklearn.decomposition import TruncatedSVD
from sklearn.utils import check_random_state
from sklearn.utils.testing import (assert_array_almost_equal, assert_equal,
assert_raises, assert_greater,
assert_array_less)
# Make an X that looks somewhat like a small tf-idf matrix.
# XXX newer versions of SciPy have scipy.sparse.rand for this.
shape = 60, 55
n_samples, n_features = shape
rng = check_random_state(42)
X = rng.randint(-100, 20, np.product(shape)).reshape(shape)
X = sp.csr_matrix(np.maximum(X, 0), dtype=np.float64)
X.data[:] = 1 + np.log(X.data)
Xdense = X.A
def test_algorithms():
svd_a = TruncatedSVD(30, algorithm="arpack")
svd_r = TruncatedSVD(30, algorithm="randomized", random_state=42)
Xa = svd_a.fit_transform(X)[:, :6]
Xr = svd_r.fit_transform(X)[:, :6]
assert_array_almost_equal(Xa, Xr, decimal=5)
comp_a = np.abs(svd_a.components_)
comp_r = np.abs(svd_r.components_)
# All elements are equal, but some elements are more equal than others.
assert_array_almost_equal(comp_a[:9], comp_r[:9])
assert_array_almost_equal(comp_a[9:], comp_r[9:], decimal=2)
def test_attributes():
for n_components in (10, 25, 41):
tsvd = TruncatedSVD(n_components).fit(X)
assert_equal(tsvd.n_components, n_components)
assert_equal(tsvd.components_.shape, (n_components, n_features))
def test_too_many_components():
for algorithm in ["arpack", "randomized"]:
for n_components in (n_features, n_features + 1):
tsvd = TruncatedSVD(n_components=n_components, algorithm=algorithm)
assert_raises(ValueError, tsvd.fit, X)
def test_sparse_formats():
for fmt in ("array", "csr", "csc", "coo", "lil"):
Xfmt = Xdense if fmt == "dense" else getattr(X, "to" + fmt)()
tsvd = TruncatedSVD(n_components=11)
Xtrans = tsvd.fit_transform(Xfmt)
assert_equal(Xtrans.shape, (n_samples, 11))
Xtrans = tsvd.transform(Xfmt)
assert_equal(Xtrans.shape, (n_samples, 11))
def test_inverse_transform():
for algo in ("arpack", "randomized"):
# We need a lot of components for the reconstruction to be "almost
# equal" in all positions. XXX Test means or sums instead?
tsvd = TruncatedSVD(n_components=52, random_state=42)
Xt = tsvd.fit_transform(X)
Xinv = tsvd.inverse_transform(Xt)
assert_array_almost_equal(Xinv, Xdense, decimal=1)
def test_integers():
Xint = X.astype(np.int64)
tsvd = TruncatedSVD(n_components=6)
Xtrans = tsvd.fit_transform(Xint)
assert_equal(Xtrans.shape, (n_samples, tsvd.n_components))
def test_explained_variance():
# Test sparse data
svd_a_10_sp = TruncatedSVD(10, algorithm="arpack")
svd_r_10_sp = TruncatedSVD(10, algorithm="randomized", random_state=42)
svd_a_20_sp = TruncatedSVD(20, algorithm="arpack")
svd_r_20_sp = TruncatedSVD(20, algorithm="randomized", random_state=42)
X_trans_a_10_sp = svd_a_10_sp.fit_transform(X)
X_trans_r_10_sp = svd_r_10_sp.fit_transform(X)
X_trans_a_20_sp = svd_a_20_sp.fit_transform(X)
X_trans_r_20_sp = svd_r_20_sp.fit_transform(X)
# Test dense data
svd_a_10_de = TruncatedSVD(10, algorithm="arpack")
svd_r_10_de = TruncatedSVD(10, algorithm="randomized", random_state=42)
svd_a_20_de = TruncatedSVD(20, algorithm="arpack")
svd_r_20_de = TruncatedSVD(20, algorithm="randomized", random_state=42)
X_trans_a_10_de = svd_a_10_de.fit_transform(X.toarray())
X_trans_r_10_de = svd_r_10_de.fit_transform(X.toarray())
X_trans_a_20_de = svd_a_20_de.fit_transform(X.toarray())
X_trans_r_20_de = svd_r_20_de.fit_transform(X.toarray())
# helper arrays for tests below
svds = (svd_a_10_sp, svd_r_10_sp, svd_a_20_sp, svd_r_20_sp, svd_a_10_de,
svd_r_10_de, svd_a_20_de, svd_r_20_de)
svds_trans = (
(svd_a_10_sp, X_trans_a_10_sp),
(svd_r_10_sp, X_trans_r_10_sp),
(svd_a_20_sp, X_trans_a_20_sp),
(svd_r_20_sp, X_trans_r_20_sp),
(svd_a_10_de, X_trans_a_10_de),
(svd_r_10_de, X_trans_r_10_de),
(svd_a_20_de, X_trans_a_20_de),
(svd_r_20_de, X_trans_r_20_de),
)
svds_10_v_20 = (
(svd_a_10_sp, svd_a_20_sp),
(svd_r_10_sp, svd_r_20_sp),
(svd_a_10_de, svd_a_20_de),
(svd_r_10_de, svd_r_20_de),
)
svds_sparse_v_dense = (
(svd_a_10_sp, svd_a_10_de),
(svd_a_20_sp, svd_a_20_de),
(svd_r_10_sp, svd_r_10_de),
(svd_r_20_sp, svd_r_20_de),
)
# Assert the 1st component is equal
for svd_10, svd_20 in svds_10_v_20:
assert_array_almost_equal(
svd_10.explained_variance_ratio_,
svd_20.explained_variance_ratio_[:10],
decimal=5,
)
# Assert that 20 components has higher explained variance than 10
for svd_10, svd_20 in svds_10_v_20:
assert_greater(
svd_20.explained_variance_ratio_.sum(),
svd_10.explained_variance_ratio_.sum(),
)
# Assert that all the values are greater than 0
for svd in svds:
assert_array_less(0.0, svd.explained_variance_ratio_)
# Assert that total explained variance is less than 1
for svd in svds:
assert_array_less(svd.explained_variance_ratio_.sum(), 1.0)
# Compare sparse vs. dense
for svd_sparse, svd_dense in svds_sparse_v_dense:
assert_array_almost_equal(svd_sparse.explained_variance_ratio_,
svd_dense.explained_variance_ratio_)
# Test that explained_variance is correct
for svd, transformed in svds_trans:
total_variance = np.var(X.toarray(), axis=0).sum()
variances = np.var(transformed, axis=0)
true_explained_variance_ratio = variances / total_variance
assert_array_almost_equal(
svd.explained_variance_ratio_,
true_explained_variance_ratio,
)
| bsd-3-clause |
dboyliao/Scipy_Numpy_Learning | python_examples/scikits_412_ex1.py | 2 | 1181 | import numpy as np
import scipy.ndimage as ndimage
import skimage.morphology as morph
import matplotlib.pyplot as plt
# Generating data points with a non-uniform background
x = np.random.uniform(low=0, high=200, size=20).astype(int)
y = np.random.uniform(low=0, high=400, size=20).astype(int)
# Creating image with non-uniform background
func = lambda x, y: np.cos(x) + np.sin(y)
grid_x, grid_y = np.mgrid[0:12:200j, 0:24:400j]
bkg = func(grid_x, grid_y)
bkg = bkg / np.max(bkg)
# Creating points
clean = np.zeros((200, 400))
clean[(x, y)] += 5
clean = ndimage.gaussian_filter(clean, 3)
clean = clean / np.max(clean)
# Combining both the non-uniform background
# and points
fimg = bkg + clean
fimg = fimg / np.max(fimg)
# Calculating local maxima
lm1 = morph.is_local_maximum(fimg)
x1, y1 = np.where(lm1.T == True)
# Creating figure to show local maximum detection
# rate success
fig = plt.figure(figsize=(8, 4))
ax = fig.add_subplot(111)
ax.imshow(fimg)
ax.scatter(x1, y1, s=100, facecolor='none', edgecolor='#009999')
ax.set_xlim(0, 400)
ax.set_ylim(0, 200)
ax.xaxis.set_visible(False)
ax.yaxis.set_visible(False)
fig.savefig('scikits_412_ex1.pdf', bbox_inches='tight')
| mit |
robbymeals/scikit-learn | sklearn/feature_selection/tests/test_base.py | 170 | 3666 | import numpy as np
from scipy import sparse as sp
from nose.tools import assert_raises, assert_equal
from numpy.testing import assert_array_equal
from sklearn.base import BaseEstimator
from sklearn.feature_selection.base import SelectorMixin
from sklearn.utils import check_array
class StepSelector(SelectorMixin, BaseEstimator):
"""Retain every `step` features (beginning with 0)"""
def __init__(self, step=2):
self.step = step
def fit(self, X, y=None):
X = check_array(X, 'csc')
self.n_input_feats = X.shape[1]
return self
def _get_support_mask(self):
mask = np.zeros(self.n_input_feats, dtype=bool)
mask[::self.step] = True
return mask
support = [True, False] * 5
support_inds = [0, 2, 4, 6, 8]
X = np.arange(20).reshape(2, 10)
Xt = np.arange(0, 20, 2).reshape(2, 5)
Xinv = X.copy()
Xinv[:, 1::2] = 0
y = [0, 1]
feature_names = list('ABCDEFGHIJ')
feature_names_t = feature_names[::2]
feature_names_inv = np.array(feature_names)
feature_names_inv[1::2] = ''
def test_transform_dense():
sel = StepSelector()
Xt_actual = sel.fit(X, y).transform(X)
Xt_actual2 = StepSelector().fit_transform(X, y)
assert_array_equal(Xt, Xt_actual)
assert_array_equal(Xt, Xt_actual2)
# Check dtype matches
assert_equal(np.int32, sel.transform(X.astype(np.int32)).dtype)
assert_equal(np.float32, sel.transform(X.astype(np.float32)).dtype)
# Check 1d list and other dtype:
names_t_actual = sel.transform(feature_names)
assert_array_equal(feature_names_t, names_t_actual.ravel())
# Check wrong shape raises error
assert_raises(ValueError, sel.transform, np.array([[1], [2]]))
def test_transform_sparse():
sparse = sp.csc_matrix
sel = StepSelector()
Xt_actual = sel.fit(sparse(X)).transform(sparse(X))
Xt_actual2 = sel.fit_transform(sparse(X))
assert_array_equal(Xt, Xt_actual.toarray())
assert_array_equal(Xt, Xt_actual2.toarray())
# Check dtype matches
assert_equal(np.int32, sel.transform(sparse(X).astype(np.int32)).dtype)
assert_equal(np.float32, sel.transform(sparse(X).astype(np.float32)).dtype)
# Check wrong shape raises error
assert_raises(ValueError, sel.transform, np.array([[1], [2]]))
def test_inverse_transform_dense():
sel = StepSelector()
Xinv_actual = sel.fit(X, y).inverse_transform(Xt)
assert_array_equal(Xinv, Xinv_actual)
# Check dtype matches
assert_equal(np.int32,
sel.inverse_transform(Xt.astype(np.int32)).dtype)
assert_equal(np.float32,
sel.inverse_transform(Xt.astype(np.float32)).dtype)
# Check 1d list and other dtype:
names_inv_actual = sel.inverse_transform(feature_names_t)
assert_array_equal(feature_names_inv, names_inv_actual.ravel())
# Check wrong shape raises error
assert_raises(ValueError, sel.inverse_transform, np.array([[1], [2]]))
def test_inverse_transform_sparse():
sparse = sp.csc_matrix
sel = StepSelector()
Xinv_actual = sel.fit(sparse(X)).inverse_transform(sparse(Xt))
assert_array_equal(Xinv, Xinv_actual.toarray())
# Check dtype matches
assert_equal(np.int32,
sel.inverse_transform(sparse(Xt).astype(np.int32)).dtype)
assert_equal(np.float32,
sel.inverse_transform(sparse(Xt).astype(np.float32)).dtype)
# Check wrong shape raises error
assert_raises(ValueError, sel.inverse_transform, np.array([[1], [2]]))
def test_get_support():
sel = StepSelector()
sel.fit(X, y)
assert_array_equal(support, sel.get_support())
assert_array_equal(support_inds, sel.get_support(indices=True))
| bsd-3-clause |
alexandrovteam/curatr | mcf_standard_browser/standards_review/tasks.py | 1 | 15528 | import logging
import sys
import traceback
import dateutil
import pandas as pd
import pymzml
from celery import shared_task
from celery.schedules import crontab
from celery.task import periodic_task
from standards_review.models import Adduct, FragmentationSpectrum, Xic, LcInfo, MsInfo, InstrumentInfo
from standards_review.models import Molecule, Standard, ProcessingError
from django.conf import settings
from standards_review.tools import DatabaseLogHandler
import os
import datetime
import numpy as np
@shared_task
def add_batch_standard(metadata, csv_file):
"""
handle a csv fil of standards
header line should be "mcfid","name","formula", "inchi", "solubility", "vendor","vendor_id", "hmdb_id" , "chebi_id", "lipidmaps_id", "cas_id", "pubchem_id". "date","location","lot_num"
To Be Set:
### Standard
# mandatory
molecule = models.ForeignKey(Molecule, default=Molecule.objects.all().filter(name='DUMMY'))
MCFID = models.IntegerField(null=True, blank=True)# if blank MCFID == Standard.pk
# optional
vendor = models.TextField(null=True, blank=True)
vendor_cat = models.TextField(null=True, blank=True)
lot_num = models.TextField(null=True, blank=True)
location = models.TextField(null=True, blank=True)
purchase_date = models.DateField(null=True, blank=True)
If Not Existing:
### Molecule
# mandatory
name = models.TextField(default = "")
sum_formula = models.TextField(null=True)
pubchem_id = models.TextField(null=True, blank=True)
# Optional
inchi_code = models.TextField(default="")
exact_mass = models.FloatField(default=0.0)
solubility = models.TextField(null=True, blank=True)
# External reference numbers
hmdb_id = models.TextField(null=True, blank=True)
chebi_id = models.TextField(null=True, blank=True)
lipidmaps_id = models.TextField(null=True, blank=True)
cas_id = models.TextField(null=True, blank=True)
:param csv_file:
:return:
"""
error_list = []
df = pd.read_csv(csv_file, sep="\t")
logging.info('I read the file')
df.columns = [x.replace(" ", "_").lower() for x in df.columns]
logging.info("I replaced columns")
# Fill missing values
df['id'] = df['id'].astype(str)
df.fillna("", inplace=True)
logging.info("Shape: {}".format(df.shape))
for row in df.iterrows():
logging.info("row: {}".format(row))
try:
# clean up input
entry = row[1]
if entry['formula'] == '':
raise ValueError('sum formula cannot be blank')
# for tag in entry.keys():
# if entry[tag] != "":
# entry[tag] = entry[tag].encode("utf8") # make strings safe
if entry['pubchem_id'] != "":
molecule = Molecule.objects.all().filter(pubchem_id=entry['pubchem_id'])
else:
molecule = Molecule.objects.all().filter(name__iexact=entry['name']) # filter lowercase
if molecule.exists():
molecule = molecule[0]
else:
molecule = Molecule(
name=entry["name"],
sum_formula=entry["formula"],
inchi_code=entry["inchi"],
solubility=entry["solubility"],
hmdb_id=entry["hmdb_id"],
chebi_id=entry["chebi_id"],
lipidmaps_id=entry["lipidmaps_id"],
cas_id=entry["cas_id"],
pubchem_id=entry["pubchem_id"],
)
logging.info("about to save " + molecule.name)
logging.info(molecule)
molecule.save()
logging.info("Successfully saved " + molecule.name)
if entry['id'] == 'nan':
s = Standard(molecule=molecule)
else:
s = Standard.objects.all().filter(inventory_id=entry['id'])
if s.exists(): # standard already added, overwrite
s = s[0]
s.molecule=molecule
else:
s = Standard(molecule=molecule)
s.vendor = entry["vendor"]
s.vendor_cat = entry["vendor_id"]
s.save()
except:
error_list.append([entry['name'], sys.exc_info()[1]])
logging.warning("Failed for: {} with {}".format(entry['name'], sys.exc_info()[1]))
return error_list
@shared_task
def handle_uploaded_files(metadata, mzml_filepath, d):
print(__file__ + str(d.id))
logger = logging.getLogger(__file__ + str(d.id))
logger.addHandler(DatabaseLogHandler(d, level=logging.DEBUG))
logger.debug("adding dataset {}".format(mzml_filepath))
try:
msrun = pymzml.run.Reader(mzml_filepath)
ppm = float(metadata['mass_accuracy_ppm'])
mz_tol_quad = float(metadata['quad_window_mz'])
ionization_method = metadata['ionization_method']
ion_analyzer = metadata['ion_analyzer']
scan_time = []
standards = Standard.objects.all().filter(pk__in=metadata['standards'])
adducts = Adduct.objects.all().filter(pk__in=metadata['adducts'])
mz_upper = {}
mz_lower = {}
mz = {}
logger.debug(standards.count())
for standard in standards:
mz_upper[standard] = {}
mz_lower[standard] = {}
mz[standard] = {}
for adduct in adducts:
mz[standard][adduct] = standard.molecule.get_mz(adduct)
logger.debug({'standard': standard})
logger.debug({'mz': mz[standard][adduct]})
delta_mz = mz[standard][adduct] * ppm * 1e-6
mz_upper[standard][adduct] = mz[standard][adduct] + delta_mz
mz_lower[standard][adduct] = mz[standard][adduct] - delta_mz
logger.debug('adding dataset')
try:
lc_info = metadata['lc_info']
ms_info = metadata['ms_info']
instrument_info = metadata['instrument_info']
except LookupError:
logger.debug('no instrument information supplied; using empty string instead')
lc_info = ms_info = instrument_info = ''
lc_info_stripped = set()
ms_info_stripped = set()
instr_info_stripped = set()
tag_sets = (lc_info_stripped, ms_info_stripped, instr_info_stripped)
for tag_str, tag_set in zip([lc_info, ms_info, instrument_info], tag_sets):
for tag in tag_str.split(', '):
if tag:
tag_stripped = tag.replace(',', '').strip()
tag_set.add(tag_stripped)
for tag_set, TagClass, attrname in zip(tag_sets, [LcInfo, MsInfo, InstrumentInfo],
['lc_info', 'ms_info', 'instrument_info']):
for tag in tag_set:
tag_obj = TagClass.objects.get_or_create(content=tag)[0]
if tag_obj not in getattr(d, attrname).all():
getattr(d, attrname).add(tag_obj)
d.ionization_method = ionization_method
d.ion_analyzer = ion_analyzer
d.mass_accuracy_ppm = ppm
d.save()
for standard in standards:
d.standards_present.add(standard)
for adduct in adducts:
d.adducts_present.add(adduct)
d.save()
logger.debug('adding msms')
xics = {}
spec_n = 0
for spectrum in msrun:
spec_n += 1
if spectrum['ms level'] == 1:
scan_time.append(spectrum['scan start time'])
last_ms1 = spectrum
# Iterate adducts/standards and get values as required
for standard in standards:
if standard not in xics:
xics[standard] = {}
for adduct in adducts:
if adduct not in xics[standard]:
xics[standard][adduct] = []
if spectrum['ms level'] == 1:
x = 0
for m, i in spectrum.peaks('centroided'):
if all([m >= mz_lower[standard][adduct], m <= mz_upper[standard][adduct]]):
x += i
xics[standard][adduct].append(x)
if spectrum['ms level'] == 2:
add_msms = False
pre_mz, pre_int = spectrum.selected_precursors[0]
mz_tol_this_adduct = mz[standard][adduct] * ppm * 1e-6
if any((abs(pre_mz - mz[standard][adduct]) <= mz_tol_this_adduct,
abs(pre_mz - mz[standard][adduct]) <= mz_tol_quad)): # frag spectrum probably the target
add_msms = True
if add_msms:
if xics[standard][adduct]:
logging.debug(xics[standard][adduct][-1], pre_int)
if not pre_int:
pre_int = xics[standard][adduct][-1]
mzs = last_ms1.mz
ints = last_ms1.i
quad_ints = [ii for m, ii in zip(mzs, ints) if
all((m >= pre_mz - mz_tol_quad, m <= pre_mz + mz_tol_quad))]
ppm_ints = [ii for m, ii in zip(mzs, ints) if
all((m >= pre_mz - mz_tol_this_adduct, m <= pre_mz + mz_tol_this_adduct))]
quad_ints_sum = sum(quad_ints)
ppm_ints_sum = sum(ppm_ints)
if ppm_ints_sum == 0:
pre_fraction = 0
else:
pre_fraction = ppm_ints_sum / quad_ints_sum
else:
pre_int = -1
pre_fraction = 0
ce_type = ''
ce_energy = ''
ce_gas = ''
tfs = spectrum.element.find(
"./{ns}cvParam[@accession='MS:1000512']".format(
ns=spectrum.ns
)
) # "MS:1000512" = Thermo filter string
if tfs:
ce_str = tfs.get('value').split('@')[1].split('[')[0]
else:
pcList = spectrum.element.find(spectrum.ns+"precursorList")
activation = pcList.getchildren()[0].find(spectrum.ns+"activation")
ce_type = activation.getchildren()[0].attrib['name']
ce_energy = dict(activation.getchildren()[1].items())
#for element in spectrum.xmlTree:
# if element.get('accession') == "MS:1000133":
# ce_type = element.items()
# elif element.get('accession') == "MS:1000045":
# ce_energy = dict(element.items())
ce_str = "{} {} {}".format(ce_energy['name'], ce_energy['value'], ce_energy['unitName'])
f = FragmentationSpectrum(precursor_mz=pre_mz,
rt=spectrum['scan start time'], dataset=d, spec_num=spec_n,
precursor_quad_fraction=pre_fraction, ms1_intensity=pre_int,
collision_energy=ce_str)
f.set_centroid_mzs(spectrum.mz)
f.set_centroid_ints(spectrum.i)
f.collision = ce_str
f.save()
logger.debug("adding xics")
for standard in standards:
for adduct in adducts:
# if np.sum(xics[standard][adduct]) > 0:
x = Xic(mz=standard.molecule.get_mz(adduct), dataset=d)
x.set_xic(xics[standard][adduct])
x.set_rt(scan_time)
x.standard = standard
x.adduct = adduct
x.save()
except Exception as e:
exc_info = sys.exc_info()
message = "Dataset processing failed. Here's what we know, check the log for more details. {}".format(e)
p = ProcessingError(dataset=d, message=message)
p.save()
logger.error(e)
logger.error(traceback.print_exception(*exc_info))
del exc_info
d.processing_finished = True
d.save()
logger.debug('done')
logger.debug("added = True")
return True
@periodic_task(run_every=crontab(minute=0, hour=4)) # every day at 4am
def scrape_pubchem_for_inchi():
from pubchempy import BadRequestError, Compound, NotFoundError
for m in Molecule.objects.filter(pubchem_id__isnull=False).order_by('pubchem_id'):
if m.inchi_code == "":
try:
c = Compound.from_cid(m.pubchem_id)
m.inchi_code = c.inchi
m.save()
except (BadRequestError, NotFoundError):
logging.error('Invalid PubChem CID: {}'.format(m.pubchem_id))
@periodic_task(run_every=crontab(minute=0, hour=1, day_of_week=1)) # every week at 1am
def remove_old_spectra():
"""
Deletes unreviewed spectra from the database if they have been there for longer than settings.SPECTRA_LIFETIME (weeks)
"""
logging.debug('running tidy old')
if settings.SPECTRA_LIFETIME:
time_threshold = (datetime.datetime.now() - datetime.timedelta(weeks=settings.SPECTRA_LIFETIME)).date()
logging.debug(('deleting spectra before', time_threshold))
spectra = FragmentationSpectrum.objects.filter(reviewed=False).filter(date_added__lt=time_threshold)
logging.debug(('number spectra to delete:', spectra.count()))
spectra.delete()
@periodic_task(run_every=crontab(minute=0, hour=0)) # every day at midnight
def periodic_export():
from . import export
for polarity in [None, 'positive', 'negative']:
spectra = FragmentationSpectrum.objects.all().filter(reviewed=True).exclude(standard=None)
if polarity:
if polarity == 'positive':
spectra = spectra.exclude(adduct__charge__lte=0)
elif polarity == 'negative':
spectra = spectra.exclude(adduct__charge__gte=0)
else:
raise ValueError("value of polarity not valid {}".format(polarity))
spec_pairs = [[spectrum, zip(spectrum.centroid_mzs, spectrum.centroid_ints,
(999 / (np.max(spectrum.centroid_ints)) * spectrum.centroid_ints).astype(int))] for
spectrum in spectra]
for fmt in [at for at in dir(export) if at.startswith('write')]:
fmt_name = fmt.replace("write_", "")
d = datetime.datetime.now()
fn = os.path.join(settings.MEDIA_ROOT, d.strftime("%y%d%m_{}.zip".format(fmt_name)))
if polarity:
fn = fn.replace(fmt_name, fmt_name + polarity)
getattr(export, fmt)(fn, spec_pairs)
| apache-2.0 |
madjelan/scikit-learn | doc/datasets/mldata_fixture.py | 367 | 1183 | """Fixture module to skip the datasets loading when offline
Mock urllib2 access to mldata.org and create a temporary data folder.
"""
from os import makedirs
from os.path import join
import numpy as np
import tempfile
import shutil
from sklearn import datasets
from sklearn.utils.testing import install_mldata_mock
from sklearn.utils.testing import uninstall_mldata_mock
def globs(globs):
# Create a temporary folder for the data fetcher
global custom_data_home
custom_data_home = tempfile.mkdtemp()
makedirs(join(custom_data_home, 'mldata'))
globs['custom_data_home'] = custom_data_home
return globs
def setup_module():
# setup mock urllib2 module to avoid downloading from mldata.org
install_mldata_mock({
'mnist-original': {
'data': np.empty((70000, 784)),
'label': np.repeat(np.arange(10, dtype='d'), 7000),
},
'iris': {
'data': np.empty((150, 4)),
},
'datasets-uci-iris': {
'double0': np.empty((150, 4)),
'class': np.empty((150,)),
},
})
def teardown_module():
uninstall_mldata_mock()
shutil.rmtree(custom_data_home)
| bsd-3-clause |
realisticus/data-science-from-scratch | code/visualizing_data.py | 58 | 5116 | import matplotlib.pyplot as plt
from collections import Counter
def make_chart_simple_line_chart(plt):
years = [1950, 1960, 1970, 1980, 1990, 2000, 2010]
gdp = [300.2, 543.3, 1075.9, 2862.5, 5979.6, 10289.7, 14958.3]
# create a line chart, years on x-axis, gdp on y-axis
plt.plot(years, gdp, color='green', marker='o', linestyle='solid')
# add a title
plt.title("Nominal GDP")
# add a label to the y-axis
plt.ylabel("Billions of $")
plt.show()
def make_chart_simple_bar_chart(plt):
movies = ["Annie Hall", "Ben-Hur", "Casablanca", "Gandhi", "West Side Story"]
num_oscars = [5, 11, 3, 8, 10]
# bars are by default width 0.8, so we'll add 0.1 to the left coordinates
# so that each bar is centered
xs = [i + 0.1 for i, _ in enumerate(movies)]
# plot bars with left x-coordinates [xs], heights [num_oscars]
plt.bar(xs, num_oscars)
plt.ylabel("# of Academy Awards")
plt.title("My Favorite Movies")
# label x-axis with movie names at bar centers
plt.xticks([i + 0.5 for i, _ in enumerate(movies)], movies)
plt.show()
def make_chart_histogram(plt):
grades = [83,95,91,87,70,0,85,82,100,67,73,77,0]
decile = lambda grade: grade // 10 * 10
histogram = Counter(decile(grade) for grade in grades)
plt.bar([x - 4 for x in histogram.keys()], # shift each bar to the left by 4
histogram.values(), # give each bar its correct height
8) # give each bar a width of 8
plt.axis([-5, 105, 0, 5]) # x-axis from -5 to 105,
# y-axis from 0 to 5
plt.xticks([10 * i for i in range(11)]) # x-axis labels at 0, 10, ..., 100
plt.xlabel("Decile")
plt.ylabel("# of Students")
plt.title("Distribution of Exam 1 Grades")
plt.show()
def make_chart_misleading_y_axis(plt, mislead=True):
mentions = [500, 505]
years = [2013, 2014]
plt.bar([2012.6, 2013.6], mentions, 0.8)
plt.xticks(years)
plt.ylabel("# of times I heard someone say 'data science'")
# if you don't do this, matplotlib will label the x-axis 0, 1
# and then add a +2.013e3 off in the corner (bad matplotlib!)
plt.ticklabel_format(useOffset=False)
if mislead:
# misleading y-axis only shows the part above 500
plt.axis([2012.5,2014.5,499,506])
plt.title("Look at the 'Huge' Increase!")
else:
plt.axis([2012.5,2014.5,0,550])
plt.title("Not So Huge Anymore.")
plt.show()
def make_chart_several_line_charts(plt):
variance = [1,2,4,8,16,32,64,128,256]
bias_squared = [256,128,64,32,16,8,4,2,1]
total_error = [x + y for x, y in zip(variance, bias_squared)]
xs = range(len(variance))
# we can make multiple calls to plt.plot
# to show multiple series on the same chart
plt.plot(xs, variance, 'g-', label='variance') # green solid line
plt.plot(xs, bias_squared, 'r-.', label='bias^2') # red dot-dashed line
plt.plot(xs, total_error, 'b:', label='total error') # blue dotted line
# because we've assigned labels to each series
# we can get a legend for free
# loc=9 means "top center"
plt.legend(loc=9)
plt.xlabel("model complexity")
plt.title("The Bias-Variance Tradeoff")
plt.show()
def make_chart_scatter_plot(plt):
friends = [ 70, 65, 72, 63, 71, 64, 60, 64, 67]
minutes = [175, 170, 205, 120, 220, 130, 105, 145, 190]
labels = ['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i']
plt.scatter(friends, minutes)
# label each point
for label, friend_count, minute_count in zip(labels, friends, minutes):
plt.annotate(label,
xy=(friend_count, minute_count), # put the label with its point
xytext=(5, -5), # but slightly offset
textcoords='offset points')
plt.title("Daily Minutes vs. Number of Friends")
plt.xlabel("# of friends")
plt.ylabel("daily minutes spent on the site")
plt.show()
def make_chart_scatterplot_axes(plt, equal_axes=False):
test_1_grades = [ 99, 90, 85, 97, 80]
test_2_grades = [100, 85, 60, 90, 70]
plt.scatter(test_1_grades, test_2_grades)
plt.xlabel("test 1 grade")
plt.ylabel("test 2 grade")
if equal_axes:
plt.title("Axes Are Comparable")
plt.axis("equal")
else:
plt.title("Axes Aren't Comparable")
plt.show()
def make_chart_pie_chart(plt):
plt.pie([0.95, 0.05], labels=["Uses pie charts", "Knows better"])
# make sure pie is a circle and not an oval
plt.axis("equal")
plt.show()
if __name__ == "__main__":
make_chart_simple_line_chart(plt)
make_chart_simple_bar_chart(plt)
make_chart_histogram(plt)
make_chart_misleading_y_axis(plt, mislead=True)
make_chart_misleading_y_axis(plt, mislead=False)
make_chart_several_line_charts(plt)
make_chart_scatterplot_axes(plt, equal_axes=False)
make_chart_scatterplot_axes(plt, equal_axes=True)
make_chart_pie_chart(plt)
| unlicense |
fdion/stemgraphic | stemgraphic/alpha.py | 1 | 95126 | # -*- coding: utf-8 -*-
""" stemgraphic.alpha.
BRAND NEW in V.0.5.0!
Stemgraphic provides a complete set of functions to handle everything related to stem-and-leaf plots. alpha is a
module of the stemgraphic package to add support for categorical and text variables.
The module also adds functionality to handle whole words, beside stem-and-leaf bigrams and n-grams.
For example, for the word "alabaster":
With word_ functions, we can look at the word frequency in a text, or compare it through a distance function
(default to Levenshtein) to other words in a corpus
With stem_ functions, we can look at the fundamental stem-and-leaf, stem would be 'a' and leaf would be 'l', for
a bigram 'al'. With a stem_order of 1 and a leaf_order of 2, we would have 'a' and 'la', for a trigram 'ala', so
on and so forth.
"""
from math import radians
import re
import unicodedata
from urllib.request import urlopen
from warnings import warn
try:
import Levenshtein
except ImportError:
Levenshtein = None
import matplotlib as mpl
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
import matplotlib.ticker as mticker
import numpy as np
import pandas as pd
import seaborn as sns
from .helpers import stack_columns, CHAR_FILTER, LETTERS, NON_ALPHA
def add_missing_letters(mat, stem_order, leaf_order, letters=None):
""" Add missing stems based on LETTERS. defaults to a-z alphabet.
:param mat: matrix to modify
:param stem_order: how many stem characters per data point to display, defaults to 1
:param leaf_order: how many leaf characters per data point to display, defaults to 1
:param letters: letters that must be present as stems
:return: the modified matrix
"""
if letters is None:
letters = LETTERS
if stem_order > 1 or leaf_order > 1:
# No change, can't force every missing leaf bigram
return mat
previous_col = 0
for letter in letters:
if letter not in mat.columns:
mat.insert(previous_col + 1, letter, np.NaN)
previous_col += 1
else:
previous_col = list(mat.columns.values).index(letter)
return mat
# noinspection PyPep8Naming
def heatmap(src, alpha_only=False, annotate=False, asFigure=False, ax=None, caps=False, compact=True, # NOQA
display=None, interactive=True, leaf_order=1, leaf_skip=0, random_state=None, stem_order=1,
stem_skip=0, stop_words=None):
""" The heatmap displays the same underlying data as the stem-and-leaf plot, but instead of stacking the leaves,
they are left in their respective columns. Row 'a' and Column 'b' would have the count of words starting
with 'ab'. The heatmap is useful to look at patterns. For distribution, stem\_graphic is better suited.
:param src: string, filename, url, list, numpy array, time series, pandas or dask dataframe
:param alpha_only: only use stems from a-z alphabet
:param annotate: display annotations (Z) on heatmap
:param asFigure: return plot as plotly figure (for web applications)
:param ax: matplotlib axes instance, usually from a figure or other plot
:param caps: bool, True to be case sensitive
:param compact: remove empty stems
:param display: maximum number of data points to display, forces sampling if smaller than len(df)
:param interactive: if cufflinks is loaded, renders as interactive plot in notebook
:param leaf_order: how many leaf characters per data point to display, defaults to 1
:param leaf_skip: how many leaf characters to skip, defaults to 0 - useful w/shared bigrams: 'wol','wor','woo'
:param random_state: initial random seed for the sampling process, for reproducible research
:param stem_order: how many stem characters per data point to display, defaults to 1
:param stem_skip: how many stem characters to skip, defaults to 0 - useful to zoom in on a single root letter
:param stop_words: stop words to remove. None (default), list or builtin EN (English), ES (Spanish) or FR (French)
:return:
"""
_, alpha_matrix, _ = ngram_data(
src,
alpha_only=alpha_only,
caps=caps,
compact=compact,
display=display,
leaf_order=leaf_order,
leaf_skip=leaf_skip,
rows_only=False,
random_state=random_state,
stem_order=stem_order,
stem_skip=stem_skip,
stop_words=stop_words
)
if not compact:
alpha_matrix.word = add_missing_letters(alpha_matrix.word, stem_order, leaf_order)
if isinstance(src, str):
title = 'stem-and-leaf heatmap for {}'.format(src)
else:
title = 'stem-and-leaf heatmap'
if interactive:
try:
fig = alpha_matrix.word.T.iplot(kind='heatmap', asFigure=asFigure, title=title)
except AttributeError:
if ax is None:
fig, ax = plt.subplots(figsize=(20, 16))
ax.set_title(title)
sns.heatmap(alpha_matrix.word, annot=annotate, ax=ax)
else:
if ax is None:
fig, ax = plt.subplots(figsize=(20, 16))
ax.set_title(title)
sns.heatmap(alpha_matrix.word, annot=annotate, ax=ax)
return alpha_matrix, ax
# noinspection PyUnboundLocalVariable
def heatmap_grid(src1, src2, src3=None, src4=None, alpha_only=True, annot=False, caps=False, center=0, cmap=None,
display=1000, leaf_order=1, leaf_skip=0, random_state=None, reverse=False, robust=False, stem_order=1,
stem_skip=0, stop_words=None, threshold=0):
""" heatmap_grid.
With stem_graphic, it is possible to directly compare two different sources. In the case of a heatmap,
two different data sets cannot be visualized directly on a single heatmap. For this task, we designed
heatmap_grid to adapt to the number of sources to build a layout. It can take from 2 to 4 different source.
With 2 sources, a square grid will be generated, allowing for horizontal and vertical comparisons,
with an extra heatmap showing the difference between the two matrices. It also computes a norm for that
difference matrix. The smaller the value, the closer the two heatmaps are.
With 3 sources, it builds a triangular grid, with each source heatmap in a corner and the difference between
each pair in between.
Finally, with 4 sources, a 3 x 3 grid is built, each source in a corner and the
difference between each pair in between, with the center expressing the difference between top left
and bottom right diagonal.
:param src1: string, filename, url, list, numpy array, time series, pandas or dask dataframe (required)
:param src2: string, filename, url, list, numpy array, time series, pandas or dask dataframe (required)
:param src3: string, filename, url, list, numpy array, time series, pandas or dask dataframe (optional)
:param src4: string, filename, url, list, numpy array, time series, pandas or dask dataframe (optional)
:param alpha_only: only use stems from a-z alphabet
:param annot: display annotations (Z) on heatmap
:param caps: bool, True to be case sensitive, defaults to False, recommended for comparisons.
:param center: the center of the divergent color map for the difference heatmaps
:param cmap: color map for difference heatmap or None (default) to use the builtin red / blue divergent map
:param display: maximum number of data points to display, forces sampling if smaller than len(df)
:param leaf_order: how many leaf characters per data point to display, defaults to 1
:param leaf_skip: how many leaf characters to skip, defaults to 0 - useful w/shared bigrams: 'wol','wor','woo'
:param robust: reduce effect of outliers on difference heatmap
:param random_state: initial random seed for the sampling process, for reproducible research
:param stem_order: how many stem characters per data point to display, defaults to 1
:param stem_skip: how many stem characters to skip, defaults to 0 - useful to zoom in on a single root letter
:param stop_words: stop words to remove. None (default), list or builtin EN (English), ES (Spanish) or FR (French)
:param threshold: absolute value minimum count difference for a difference heatmap element to be visible
:return:
"""
res1, alpha1, x1 = ngram_data(src1, alpha_only=alpha_only, display=display, stem_order=stem_order,
leaf_order=leaf_order, leaf_skip=leaf_skip, random_state=random_state, rows_only=False,
stop_words=stop_words, reverse=reverse,
caps=caps)
res2, alpha2, x2 = ngram_data(src2, alpha_only=alpha_only, display=display, stem_order=stem_order,
leaf_order=leaf_order, leaf_skip=leaf_skip, random_state=random_state, rows_only=False,
stop_words=stop_words, reverse=reverse,
caps=caps)
alpha1 = add_missing_letters(alpha1.word, stem_order, leaf_order)
alpha2 = add_missing_letters(alpha2.word, stem_order, leaf_order)
if src3 is not None:
res3, alpha3, x3 = ngram_data(src3, alpha_only=alpha_only, display=display, stem_order=stem_order,
leaf_order=leaf_order, leaf_skip=leaf_skip, random_state=random_state, rows_only=False,
stop_words=stop_words, caps=caps, reverse=reverse)
alpha3 = add_missing_letters(alpha3.word, stem_order, leaf_order)
if src4 is not None:
res4, alpha4, x4 = ngram_data(src4, alpha_only=alpha_only, display=display, stem_order=stem_order,
leaf_order=leaf_order, leaf_skip=leaf_skip, random_state=random_state, rows_only=False,
stop_words=stop_words, caps=caps, reverse=reverse)
alpha4 = add_missing_letters(alpha4.word, stem_order, leaf_order)
diff1, norm1, ratio1 = matrix_difference(alpha1, alpha2, thresh=threshold)
mvmin = alpha1.min().min()
mvmax = alpha1.max().max()
if src3:
# noinspection PyUnboundLocalVariable
diff2, norm2, ratio2 = matrix_difference(alpha1, alpha3, thresh=threshold)
diff3, norm3, ratio3 = matrix_difference(alpha2, alpha3, thresh=threshold)
if src4:
# noinspection PyUnboundLocalVariable
diff4, norm4, ratio4 = matrix_difference(alpha2, alpha4, thresh=threshold)
diff5, norm5, ratio5 = matrix_difference(alpha3, alpha4, thresh=threshold)
diff6, norm6, ratio6 = matrix_difference(alpha1, alpha4, thresh=threshold)
if cmap is None:
cmap = sns.diverging_palette(220, 10, as_cmap=True)
if center is not None:
data = diff1
calc_data = data[~np.isnan(data)]
vmin = np.percentile(calc_data, 2) if robust else calc_data.min().min()
vmax = np.percentile(calc_data, 98) if robust else calc_data.max().max()
vrange = max(vmax - center, center - vmin)
normalize = mpl.colors.Normalize(center - vrange, center + vrange)
cmin, cmax = normalize([vmin, vmax])
cc = np.linspace(cmin, cmax, 256)
cmap = mpl.colors.ListedColormap(cmap(cc))
if src3 is None and src4 is None:
fig, ((ax1, ax3), (ax4, ax2)) = plt.subplots(2, 2, figsize=(20, 16))
sns.heatmap(alpha1, annot=annot, ax=ax1, vmin=mvmin, vmax=mvmax, square=True)
else:
fig, ((ax1, ax2, ax3), (ax4, ax5, ax6), (ax7, ax8, ax9)) = plt.subplots(3, 3, figsize=(20, 20))
sns.heatmap(alpha1, annot=annot, ax=ax1, vmin=mvmin, vmax=mvmax, square=True)
ax1.set_title(src1)
if src3:
ax1.text(1.3, 0.7, '->',
size=12,
horizontalalignment='center',
verticalalignment='center',
rotation='horizontal',
transform=ax1.transAxes)
ax1.text(0.3, -0.1, '->',
size=12,
horizontalalignment='center',
verticalalignment='center',
rotation=-90,
transform=ax1.transAxes)
if src4:
ax1.text(1.3, -0.1, '->',
size=12,
horizontalalignment='center',
verticalalignment='center',
rotation=-45,
transform=ax1.transAxes)
# noinspection PyUnboundLocalVariable
ax5.text(1.3, -0.1, '->',
size=12,
horizontalalignment='center',
verticalalignment='center',
rotation=-45,
transform=ax5.transAxes)
ax2.set_title('changes ({})'.format(norm1 / ratio1))
# noinspection PyUnboundLocalVariable,PyUnboundLocalVariable
sns.heatmap(diff1, annot=True if norm1 < 100 else False, ax=ax2, vmin=vmin, vmax=vmax, cmap=cmap, square=True)
ax2.text(1.3, 0.7, '->',
size=12,
horizontalalignment='center',
verticalalignment='center',
rotation='horizontal',
transform=ax2.transAxes)
ax3.set_title(src2)
sns.heatmap(alpha2, ax=ax3, vmin=mvmin, vmax=mvmax, square=True)
if src3:
# noinspection PyUnboundLocalVariable,PyUnboundLocalVariable
ax4.set_title('changes ({})'.format(norm2 / ratio2))
# noinspection PyUnboundLocalVariable
sns.heatmap(diff2, annot=True if norm2 < 100 else False, ax=ax4, vmin=vmin, vmax=vmax, cmap=cmap, square=True)
ax4.text(0.3, -0.1, '->',
size=12,
horizontalalignment='center',
verticalalignment='center',
rotation=-90,
transform=ax4.transAxes)
# noinspection PyUnboundLocalVariable
ax7.set_title(src3)
sns.heatmap(alpha3, annot=annot, ax=ax7, vmin=mvmin, vmax=mvmax)
if src4:
# noinspection PyUnboundLocalVariable,PyUnboundLocalVariable
ax5.set_title('changes ({})'.format(norm6 / ratio6))
# noinspection PyUnboundLocalVariable
sns.heatmap(diff6, annot=True if norm6 < 100 else False, ax=ax5, vmin=vmin, vmax=vmax, cmap=cmap,
square=True)
else:
# noinspection PyUnboundLocalVariable,PyUnboundLocalVariable
ax5.set_title('changes ({})'.format(norm3 / ratio3))
# noinspection PyUnboundLocalVariable
sns.heatmap(diff3, annot=True if norm3 < 100 else False, ax=ax5, vmin=vmin, vmax=vmax, cmap=cmap,
square=True)
else:
ax4.set_title(src2)
sns.heatmap(alpha2, ax=ax4, vmin=mvmin, vmax=mvmax, square=True)
if src4:
ax3.text(0.7, -0.1, '->',
size=12,
horizontalalignment='center',
verticalalignment='center',
rotation=-90,
transform=ax3.transAxes)
# noinspection PyUnboundLocalVariable,PyUnboundLocalVariable,PyUnboundLocalVariable
ax6.set_title('changes ({})'.format(norm4 / ratio4))
# noinspection PyUnboundLocalVariable
sns.heatmap(diff4, annot=True if norm4 < 100 else False, ax=ax6, vmin=vmin, vmax=vmax, cmap=cmap, square=True)
ax6.text(0.7, -0.1, '->',
size=12,
horizontalalignment='center',
verticalalignment='center',
rotation=-90,
transform=ax6.transAxes)
ax7.text(1.3, 0.3, '->',
size=12,
horizontalalignment='center',
verticalalignment='center',
rotation='horizontal',
transform=ax7.transAxes)
# noinspection PyUnboundLocalVariable
ax8.text(1.3, 0.3, '->',
size=12,
horizontalalignment='center',
verticalalignment='center',
rotation='horizontal',
transform=ax8.transAxes)
# noinspection PyUnboundLocalVariable
ax9.set_title(src4)
sns.heatmap(alpha4, annot=annot, ax=ax9, vmin=mvmin, vmax=mvmax)
# noinspection PyUnboundLocalVariable,PyUnboundLocalVariable
ax8.set_title('changes ({})'.format(norm5 / ratio5))
# noinspection PyUnboundLocalVariable
sns.heatmap(diff5, annot=True if norm5 < 100 else False, ax=ax8, vmin=vmin, vmax=vmax, cmap=cmap, square=True)
elif src3:
# noinspection PyUnboundLocalVariable
ax6.axis('off')
# noinspection PyUnboundLocalVariable
ax8.axis('off')
# noinspection PyUnboundLocalVariable
ax9.axis('off')
return fig
def matrix_difference(mat1, mat2, thresh=0, ord=None):
""" matrix_difference
:param mat1: first heatmap dataframe
:param mat2: second heatmap dataframe
:param thresh: : absolute value minimum count difference for a difference heatmap element to be visible
:return: difference matrix, norm and ratio of the sum of the first matrix over the second
"""
tot1 = mat1.sum().sum()
tot2 = mat2.sum().sum()
ratio = tot1 / tot2
diff = mat1.fillna(-999999).subtract(mat2.fillna(0) * ratio, fill_value=0).reindex_like(mat1).astype(int)
diff = diff.replace(-999999, np.NaN)
diff[diff < -999999] = diff[diff < -999999] + 999999
diff[(diff >= 0) & (diff <= thresh)] = np.NaN
diff[(diff < 0) & (diff >= -thresh)] = np.NaN
norm = np.linalg.norm(diff.fillna(0), ord=ord)
return diff, norm, ratio
def ngram_data(df, alpha_only=False, ascending=True, binary=False, break_on=None, caps=False,
char_filter=None, column=None, compact=False, display=750, leaf_order=1, leaf_skip=0,
persistence=None, random_state=None, remove_accents=False, reverse=False,
rows_only=True, sort_by='len', stem_order=1, stem_skip=0, stop_words=None):
""" ngram_data
This is the main text ingestion function for stemgraphic.alpha. It is used by most of the visualizations. It
can also be used directly, to feed a pipeline, for example.
If selected (rows_only=False), the returned dataframe includes in each row a single word, the stem, the leaf and
the ngram (stem + leaf) - the index is the 'token' position in the original source:
word stem leaf ngram
12 salut s a sa
13 chéri c h ch
:param df: list, numpy array, series, pandas or dask dataframe
:param alpha_only: only use stems from a-z alphabet (NA on dataframe)
:param ascending: bool if the sort is ascending
:param binary: bool if True forces counts to 1 for anything greater than 0
:param break_on: letter on which to break a row, or None (default)
:param caps: bool, True to be case sensitive, defaults to False, recommended for comparisons.(NA on dataframe)
:param char_filter: list of characters to ignore. If None (default) CHAR_FILTER list will be used
:param column: specify which column (string or number) of the dataframe to use, or group of columns (stems)
else the frame is assumed to only have one column with words.
:param compact: remove empty stems
:param display: maximum number of data points to display, forces sampling if smaller than len(df)
:param leaf_order: how many leaf characters per data point to display, defaults to 1
:param leaf_skip: how many leaf characters to skip, defaults to 0 - useful w/shared bigrams: 'wol','wor','woo'
:param persistence: will save the sampled datafrae to filename (with csv or pkl extension) or None
:param random_state: initial random seed for the sampling process, for reproducible research
:param remove_accents: bool if True strips accents (NA on dataframe)
:param rows_only: bool by default returns only the stem and leaf rows. If false, also the matrix and dataframe
:param sort_by: default to 'len', can also be 'alpha'
:param stem_order: how many stem characters per data point to display, defaults to 1
:param stem_skip: how many stem characters to skip, defaults to 0 - useful to zoom in on a single root letter
:param stop_words: stop words to remove. None (default), list or builtin EN (English), ES (Spanish) or FR (French)
:return: ordered rows if rows_only, else also returns the matrix and dataframe
"""
if char_filter is None:
char_filter = CHAR_FILTER
if isinstance(df, str):
# First check if it is a url
if df[:7] in ['http://', 'https:/'] and len(df) < 2000: # In theory 2048 is the max URL length
data = urlopen(df)
with data as r:
lines = r.read().decode() # utf8 for now
linecontent = ''.join(lines)
# Maybe filename passed, try to read a text file directly
else:
try:
with open(df) as r:
lines = r.readlines()
linecontent = ' '.join(lines)
except IOError:
# not url or filename, we'll assume a content string then
linecontent = df
if remove_accents:
normalized = unicodedata.normalize('NFKD', linecontent)
if normalized != linecontent:
linecontent = ''.join([c for c in normalized if not unicodedata.combining(c)])
for ch in char_filter:
if ch in linecontent:
linecontent = linecontent.replace(ch, ',')
if reverse:
linecontent = linecontent[::-1]
x = pd.DataFrame({
'word': linecontent.replace(' ', ',').split(',')
})
x = x[x.word != '']
if alpha_only:
x = x[~x.word.str[:1].isin(NON_ALPHA)]
if not caps:
x.word = x.word.str.lower()
if stop_words is not None:
if not caps:
stop_words = [x.lower() for x in stop_words]
x = x[~x.word.isin(stop_words)]
if column:
x = x[x.word.str[:1].isin(column)]
if display is None or display > x.word.shape[0]:
x_s = x.reset_index()
else:
x_s = x.sample(n=display, random_state=random_state).reset_index()
elif isinstance(df, list):
x = pd.DataFrame({
'word': df
})
if display is None or display > x.word.shape[0]:
x_s = x
else:
x_s = x.sample(n=display, random_state=random_state).reset_index()
else:
try:
x = df if column is None else df[column]
if reverse:
x = x.str[::-1]
except KeyError:
x = df.copy()
if reverse:
x = x.applymap(lambda r: r.str[::-1])
if column:
x = x[x.word.str[:1].isin(column)]
if display is None or display > x.shape[0]:
x_s = x
else:
x_s = x.sample(n=display, random_state=random_state).reset_index()
if stem_order is None:
stem_order = 1
if leaf_order is None:
leaf_order = 1
x_s['stem'] = x_s.word.str[stem_skip:stem_skip + stem_order]
offset = stem_skip + stem_order + leaf_skip
x_s['leaf'] = x_s.word.str[offset:offset + leaf_order].str.ljust(leaf_order)
x_s['ngram'] = x_s['stem'] + x_s['leaf']
if persistence is not None:
if persistence[-4:] == '.pkl':
x_s.to_pickle(persistence)
else:
x_s.to_csv(persistence) # TODO: add feather, hdf5 etc
alpha_matrix = x_s.groupby(['stem', 'leaf']).count().unstack('leaf')
if binary:
alpha_matrix.astype(bool).astype(int)
if compact:
pass # nothing to do ATM.
if break_on is not None:
# TODO: investigate if we need this down to ngram_data level, or if stem_text/stem_graphic level is ok
pass
rows = alpha_matrix[alpha_matrix.columns[0][0]].apply(stack_columns, axis=1)
# Sorting
if sort_by == 'len':
rows = rows[rows.str.len().sort_values().index]
ordered_rows = rows if ascending else rows[::-1]
if rows_only:
return ordered_rows
else:
return ordered_rows, alpha_matrix, x_s
def polar_word_plot(ax, word, words, label, min_dist, max_dist, metric, offset, step):
""" polar_word_plot
Utility function for radar plot.
:param ax: matplotlib ax
:param word: string, the reference word that will be placed in the middle
:param words: list of words to compare
:param label: bool if True display words centered at coordinate
:param min_dist: minimum distance based on metric to include a word for display
:param max_dist: maximum distance for a given section
:param metric: any metric function accepting two values and returning that metric in a range from 0 to x
:param offset: where to start plotting in degrees
:param step: how many degrees to step between plots
:return:
"""
for i, comp in enumerate(sorted(words)):
dist = metric(word, comp)
if dist > max_dist:
max_dist = dist
if dist >= min_dist:
ax.plot((0, radians((i + 1) * step + offset)), (0, dist - 0.01))
if label:
t = ax.text(radians((i + 1) * step + offset), dist, comp, size=12, ha='center', va='center')
t.set_bbox(dict(facecolor='white', alpha=0.3))
return max_dist
def plot_sunburst_level(normalized, ax, label=True, level=0, offset=0, ngram=False, plot=True, stem=None, vis=0):
""" plot_sunburst_level
utility function for sunburst function.
:param normalized:
:param ax:
:param label:
:param level:
:param ngram:
:param offset:
:param plot:
:param stem:
:param vis:
:return:
"""
total = len(normalized)
heights = [level + 1] * total
widths = normalized.values
bottoms = [level + 0] * total
values = np.cumsum([0 + offset] + list(widths[:-1]))
if plot:
rects = ax.bar(values, heights, widths, bottoms, linewidth=1,
edgecolor='white', align='edge')
else:
return values
labels = normalized.index.values
if level in (0, 0.4):
fontsize = 16
else:
fontsize = 10
if stem:
labels = [stem + label for label in labels] # for stem, this is ok, unless level is 0.4 (next statement)
fontsize = 10
if level > 0.4 and not ngram:
labels = [i[1:] for i in labels] # strip stem, label should be leaf only unless ngram requested
# If label display is enabled, we got more work to do
if label:
for rect, label in zip(rects, labels):
width = rect.get_width()
x = rect.get_x() + width / 2
y = (rect.get_y() + rect.get_height()) / 2 if level == 0.4 else level * 2.9
if width > vis:
ax.text(x, y, label, size=fontsize, color='k', ha='center', va='center')
return values
def radar(word, comparisons, ascending=True, display=100, label=True, metric=None,
min_distance=1, max_distance=None, random_state=None, sort_by='alpha'):
""" radar
The radar plot compares a reference word with a corpus. By default, it calculates the levenshtein
distance between the reference word and each words in the corpus. An alternate distance or metric
function can be provided. Each word is then plotted around the center based on 3 criteria.
1) If the word length is longer, it is plotted on the left side, else on the right side.
2) Distance from center is based on the distance function.
3) the words are equidistant, and their order defined alphabetically or by count (only applicable
if the corpus is a text and not a list of unique words, such as a password dictionary).
Stem-and-leaf support is upcoming.
:param word: string, the reference word that will be placed in the middle
:param comparisons: external file, list or string or dataframe of words
:param ascending: bool if the sort is ascending
:param display: maximum number of data points to display, forces sampling if smaller than len(df)
:param label: bool if True display words centered at coordinate
:param metric: Levenshtein (default), or any metric function accepting two values and returning that metric
:param min_distance: minimum distance based on metric to include a word for display
:param max_distance: maximum distance based on metric to include a word for display
:param random_state: initial random seed for the sampling process, for reproducible research
:param sort_by: default to 'alpha', can also be 'len'
:return:
"""
if metric is None:
if Levenshtein:
metric = Levenshtein.distance
else:
warn('metric not specified and Levenshtein module is not available. Specify an alternate metric.')
return None
# TODO: switch to ngram_data for stem-and-leaf support and better word support
if isinstance(comparisons, str):
with open(comparisons) as r:
lines = r.readlines()
linecontent = ' '.join(lines)
df = pd.DataFrame({
'word': linecontent.replace('\n', ',').replace('"', ',').replace(".", ',').replace(' ', ',').split(',')
})
x = df[df.word != ''].word.sample(n=display, random_state=random_state).tolist()
else:
x = comparisons
fig, pol_ax = plt.subplots(1, 1, figsize=(15, 15), subplot_kw=dict(projection='polar'))
pol_ax.grid(color='#dfdfdf') # Color the grid
pol_ax.set_theta_zero_location('N') # Origin is at the top
pol_ax.set_theta_direction(-1) # Reverse the rotation
pol_ax.set_rlabel_position(0) # default is angled to the right. move it out of the way
pol_ax.axes.get_xaxis().set_visible(False)
word_len = len(word)
if sort_by == 'alpha':
high = sorted([i for i in x if len(i) > word_len])
low = sorted([i for i in x if len(i) <= word_len])
else:
high = sorted([i for i in x if len(i) > word_len], key=len)
low = sorted([i for i in x if len(i) <= word_len], key=len)
if not ascending:
high = high[::-1]
low = low[::-1]
numh = len(high)
numl = len(low)
max_dist = 0
# This was initially in radians, but that's not readable for most people, so it
# is in degrees. I convert to radians directly at the call for plot and text
step = 180 / (numh + 1)
offset = 180
max_dist = polar_word_plot(pol_ax, word, high, label, min_distance, max_dist, metric, offset, step)
step = 180 / (numl + 1)
offset = 0
max_dist = polar_word_plot(pol_ax, word, low, label, min_distance, max_dist, metric, offset, step)
if max_distance is None:
max_distance = max_dist
pol_ax.set_ylim(0, max_distance)
t = pol_ax.text(0, 0, word, ha='center', va='center', size=12)
t.set_bbox(dict(facecolor='white', alpha=0.5))
pol_ax.set_title('{} distance to {}'.format(metric, word))
return pol_ax
def _scatter3d(df, x, y, z, s, color, ax, label=None, alpha=0.5):
""" _scatter3d
Helper to make call to scatter3d a little more like the 2d
:param df: data
:param x: x var name
:param y: y var name
:param z: z var name
:param s: size (list or scalar)
:param color: color, sequence, or sequence of color
:param ax: matplotlib ax
:param label: label for legend
:param alpha: alpha transparency
:return:
"""
xs = 0 if x == 0 else df[x] # logic for projections
ys = 0 if y in (0,100) else df[y]
zs = 0 if z == 0 else df[z]
ax.scatter(xs, ys, zs=zs, alpha=alpha, s=s, color=color, label=label)
def scatter(src1, src2, src3=None, alpha=0.5, alpha_only=True, ascending=True, asFigure=False, ax=None, caps=False,
compact=True, display=None, fig_xy=None, interactive=True, jitter=False, label=False, leaf_order=1,
leaf_skip=0, log_scale=True, normalize=None, percentage=None, project=False, project_only=False,
random_state=None, sort_by='alpha', stem_order=1, stem_skip=0, stop_words=None, whole=False):
""" scatter
With 2 sources:
Scatter compares the word frequency of two sources, on each axis. Each data point Z value is the word
or stem-and-leaf value, while the X axis reflects that word/ngram count in one source and the Y axis
reflect the same word/ngram count in the other source, in two different colors. If one word/ngram is more common
on the first source it will be displayed in one color, and if it is more common in the second source, it
will be displayed in a different color. The values that are the same for both sources will be displayed
in a third color (default colors are blue, black and pink.
With 3 sources:
The scatter will compare in 3d the word frequency of three sources, on each axis. Each data point hover value is
the word or stem-and-leaf value, while the X axis reflects that word/ngram count in the 1st source, the Y axis
reflects the same word/ngram count in the 2nd source, and the Z axis the 3rd source, each in a different color.
If one word/ngram is more common on the 1st source it will be displayed in one color, in the 2nd source as a
second color and if it is more common in the 3rd source, it will be displayed in a third color.
The values that are the same for both sources will be displayed in a 4th color (default colors are
blue, black, purple and pink.
In interactive mode, hovering the data point
will give the precise counts on each axis along with the word itself, and filtering by category is done
by clicking on the category in the legend. Double clicking a category will show only that category.
:param src1: string, filename, url, list, numpy array, time series, pandas or dask dataframe
:param src2: string, filename, url, list, numpy array, time series, pandas or dask dataframe
:param src3: string, filename, url, list, numpy array, time series, pandas or dask dataframe, optional
:param alpha:: opacity of the dots, defaults to 50%
:param alpha_only: only use stems from a-z alphabet (NA on dataframe)
:param ascending: word/stem count sorted in ascending order, defaults to True
:param asFigure: return plot as plotly figure (for web applications)
:param ax: matplotlib axes instance, usually from a figure or other plot
:param caps: bool, True to be case sensitive, defaults to False, recommended for comparisons.(NA on dataframe)
:param compact: do not display empty stem rows (with no leaves), defaults to False
:param display: maximum number of data points to display, forces sampling if smaller than len(df)
:param fig_xy: tuple for matplotlib figsize, defaults to (20,20)
:param interactive: if cufflinks is loaded, renders as interactive plot in notebook
:param jitter: random noise added to help see multiple data points sharing the same coordinate
:param label: bool if True display words centered at coordinate
:param leaf_order: how many leaf digits per data point to display, defaults to 1
:param leaf_skip: how many leaf characters to skip, defaults to 0 - useful w/shared bigrams: 'wol','wor','woo'
:param log_scale: bool if True (default) uses log scale axes (NA in 3d due to open issues with mpl, cufflinks)
:param normalize: bool if True normalize frequencies in src2 and src3 relative to src1 length
:param percentage: coordinates in percentage of maximum word/ngram count (in non interactive mode)
:param project: project src1/src2 and src1/src3 comparisons on X=0 and Z=0 planes
:param project_only: only show the projection (NA if project is False)
:param random_state: initial random seed for the sampling process, for reproducible research
:param sort_by: sort by 'alpha' (default) or 'count'
:param stem_order: how many stem characters per data point to display, defaults to 1
:param stem_skip: how many stem characters to skip, defaults to 0 - useful to zoom in on a single root letter
:param stop_words: stop words to remove. None (default), list or builtin EN (English), ES (Spanish) or FR (French)
:param whole: for normalized or percentage, use whole integer values (round)
:return: matplotlib ax, dataframe with categories
"""
alpha_matrix = []
x = []
filename = []
for src in [src1, src2, src3]:
if isinstance(src, str):
filename1 = src[:96]
else:
filename1 = 'data'
if src:
_, alpha_matrix1, x1 = ngram_data(
src,
alpha_only=alpha_only,
compact=compact,
display=display,
leaf_order=leaf_order,
leaf_skip=leaf_skip,
rows_only=False,
random_state=random_state,
sort_by=sort_by,
stem_order=stem_order,
stem_skip=stem_skip,
stop_words=stop_words,
caps=caps)
alpha_matrix.append(alpha_matrix1)
x.append(x1)
filename.append(filename1)
if stem_order is None and leaf_order is None:
count_by = 'word'
else:
count_by = 'ngram'
xy_ratio = len(x[0]) / len(x[1])
if src3:
xz_ratio = len(x[0]) / len(x[2])
red = pd.concat([x[0][count_by].value_counts().rename('x'),
x[1][count_by].value_counts().rename('y'),
x[2][count_by].value_counts().rename('z')], axis=1)
red.fillna(0)
if normalize:
red.y = red.y * xy_ratio
red.z = red.z * xz_ratio
max_count = red[['x', 'y', 'z']].abs().max().max()
else:
red = pd.concat([x[0][count_by].value_counts().rename('x'), x[1][count_by].value_counts().rename('y')], axis=1)
if normalize:
red.y = red.y * xy_ratio
max_count = red[['x', 'y']].abs().max().max()
title='{} vs{}{}{}'.format(filename[0],
'<br>' if interactive else '\n',
'normalized ' if normalize else '', filename[1])
if src3:
title = '{} vs {}'.format(title, filename[2])
red.x.fillna(0)
red.y.fillna(0)
red.dropna(inplace=True)
if percentage:
red.x = red.x / max_count * 100
red.y = red.y / max_count * 100
if src3:
red.z = red.z / max_count * 100
if whole:
red.x = red.x.round()
red.y = red.y.round()
if src3:
red.z = red.z.round()
red['diff1'] = red.x - red.y
if src3:
red['diff2'] = red.x - red.z
red['categories'] = 'x'
red.loc[(red['diff1'] < 0), 'categories'] = 'y'
if src3:
red.loc[(red['diff2'] < 0), 'categories'] = 'z'
red.loc[(red['diff2'] == 0) & (red['diff1'] == 0), 'categories'] = '='
red['hovertext'] = red.index.values + ' ' \
+ red.x.astype(str) + ' ' + red.y.astype(str) + ' ' + red.z.astype(str)
else:
red.loc[(red['diff1'] == 0), 'categories'] = '='
red['hovertext'] = red.x.astype(str) + ' ' + red.index.values + ' ' + red.y.astype(str)
red['text'] = red.index.values
red.sort_values(by='categories', inplace=True)
if jitter:
# varies slightly the values from their integer counts, but the hover will show the correct count pre jitter
red['x'] = red['x'] + np.random.uniform(-0.25, 0.25, len(red))
red['y'] = red['y'] + np.random.uniform(-0.25, 0.25, len(red))
if src3:
red['z'] = red['z'] + np.random.uniform(-0.25, 0.25, len(red))
palette = ['pink', 'blue', 'gray', 'lightpurple']
if len(red.categories.dropna().unique()) < 4:
palette = palette[1:len(red.categories.dropna().unique())]
if fig_xy == None:
fig_xy=(10,10)
if interactive:
try:
if src3:
ax1 = red.iplot(kind='scatter3d', colors=palette,
x='x', y='y', z='z', categories='categories', title=title, opacity=alpha,
# can't use this until fixed: https://github.com/santosjorge/cufflinks/issues/87
# logx=log_scale, logy=log_scale, logz=log_scale,
size=red.index.str.len(), text='text' if label else 'hovertext', hoverinfo='text',
mode='markers+text' if label else 'markers', asFigure=asFigure)
else:
ax1 = red.iplot(kind='scatter', colors=palette, logx=log_scale, logy=log_scale, opacity=alpha,
x='x', y='y', categories='categories', title=title,
size=red.index.str.len(), text='text' if label else 'hovertext', hoverinfo='text',
mode='markers+text' if label else 'markers', asFigure=asFigure)
except AttributeError:
warn('Interactive plot requested, but cufflinks not loaded. Falling back to matplotlib.')
interactive=False
# in case %matplotlib notebook
fig_xy = (10,10)
if not interactive:
if ax is None:
if src3:
fig = plt.figure(figsize=fig_xy)
ax = fig.add_subplot(111, projection='3d')
if not project_only:
_scatter3d(red[red.categories == 'x'], x='x', y='y', z='z', alpha=alpha,
s=red[red.categories == 'x'].index.str.len()*10, ax=ax, color='C0', label='x')
_scatter3d(red[red.categories == 'y'], x='x', y='y', z='z', alpha=alpha,
s=red[red.categories == 'y'].index.str.len()*10, ax=ax, color='k', label='y')
_scatter3d(red[red.categories == 'z'], x='x', y='y', z='z', alpha=alpha,
s=red[red.categories == 'z'].index.str.len()*10, ax=ax, color='C4', label='z')
if len(palette) == 4:
# we do have equal values
_scatter3d(red[red.categories == '='], x='x', y='y', z='z', alpha=alpha,
s=red[red.categories == '='].index.str.len()*10, ax=ax, color='C3', label='=')
if project:
_scatter3d(red[red.categories == 'x'], x='x', y='y', z=0, alpha=alpha,
s=red[red.categories == 'x'].index.str.len() * 10, ax=ax, color='C0')
_scatter3d(red[red.categories == 'y'], x='x', y='y', z=0, alpha=alpha,
s=red[red.categories == 'y'].index.str.len() * 10, ax=ax, color='k')
_scatter3d(red[red.categories == 'y'], x=0, y='y', z='z', alpha=alpha,
s=red[red.categories == 'y'].index.str.len() * 10, ax=ax, color='k')
_scatter3d(red[red.categories == 'z'], x=0, y='y', z='z', alpha=alpha,
s=red[red.categories == 'z'].index.str.len() * 10, ax=ax, color='C4')
else:
fig, ax = plt.subplots(1, 1, figsize=fig_xy)
if label:
alpha=0.05
red[red.categories == 'x'].plot(kind='scatter', x='x', y='y', color='C0', ax=ax, label='x',
alpha=alpha, s=red[red.categories == 'x'].index.str.len() * 10)
red[red.categories == 'y'].plot(ax=ax, kind='scatter', x='x', y='y', color='k', label='y',
alpha=alpha, s=red[red.categories == 'y'].index.str.len() * 10)
if len(palette) == 3:
red[red.categories == '='].plot(ax=ax, kind='scatter', x='x', y='y', color='C3', label='=',
alpha=alpha, s=red[red.categories == '='].index.str.len() * 10)
if log_scale:
if src3:
warn("Log_scale is not working currently due to an issue in {}.".format(
'cufflinks' if interactive else 'matplotlib'))
# matplotlib bug: https://github.com/matplotlib/matplotlib/issues/209
# cufflinks bug: https://github.com/santosjorge/cufflinks/issues/87
else:
ax.set_xscale('log')
ax.set_yscale('log')
if label:
if log_scale:
warn("Labels do not currently work in log scale due to an incompatibility in matplotlib."
" Set log_scale=False to display text labels.")
elif src3:
for tx, ty, tz, tword in red[['x', 'y', 'z', 'text']].dropna().values:
ax.text(tx, ty, tword, zs=tz, va='center', ha='center')
else:
for tx, ty, tword in red[['x', 'y', 'text']].dropna().values:
if tx < 5 and ty < 5:
if np.random.random() > 0.90:
# very dense area usually, show roughly 15%, randomly
ax.text(tx, ty, tword, va='center', ha='center')
else:
ax.text(tx, ty, tword, va='center', ha='center')
ax.set_title(title)
ax.legend(loc='best')
if not ascending:
ax.invert_xaxis()
return ax, red.drop(['hovertext'], axis=1)
def stem_scatter(src1, src2, src3=None, alpha=0.5, alpha_only=True, ascending=True, asFigure=False, ax=None, caps=False,
compact=True, display=None, fig_xy=None, interactive=True, jitter=False, label=False, leaf_order=1,
leaf_skip=0, log_scale=True, normalize=None, percentage=None, project=False, project_only=False,
random_state=None, sort_by='alpha', stem_order=1, stem_skip=0, stop_words=None, whole=False):
""" stem_scatter
stem_scatter compares the word frequency of two sources, on each axis. Each data point Z value is the word
or stem-and-leaf value, while the X axis reflects that word/ngram count in one source and the Y axis
reflect the same word/ngram count in the other source, in two different colors. If one word/ngram is more common
on the first source it will be displayed in one color, and if it is more common in the second source, it
will be displayed in a different color. The values that are the same for both sources will be displayed
in a third color (default colors are blue, black and pink. In interactive mode, hovering the data point
will give the precise counts on each axis along with the word itself, and filtering by category is done
by clicking on the category in the legend.
:param src1: string, filename, url, list, numpy array, time series, pandas or dask dataframe
:param src2: string, filename, url, list, numpy array, time series, pandas or dask dataframe
:param src3: string, filename, url, list, numpy array, time series, pandas or dask dataframe, optional
:param alpha:: opacity of the dots, defaults to 50%
:param alpha_only: only use stems from a-z alphabet (NA on dataframe)
:param ascending: stem sorted in ascending order, defaults to True
:param asFigure: return plot as plotly figure (for web applications)
:param ax: matplotlib axes instance, usually from a figure or other plot
:param caps: bool, True to be case sensitive, defaults to False, recommended for comparisons.(NA on dataframe)
:param compact: do not display empty stem rows (with no leaves), defaults to False
:param display: maximum number of data points to display, forces sampling if smaller than len(df)
:param fig_xy: tuple for matplotlib figsize, defaults to (20,20)
:param interactive: if cufflinks is loaded, renders as interactive plot in notebook
:param jitter: random noise added to help see multiple data points sharing the same coordinate
:param label: bool if True display words centered at coordinate
:param leaf_order: how many leaf digits per data point to display, defaults to 1
:param leaf_skip: how many leaf characters to skip, defaults to 0 - useful w/shared bigrams: 'wol','wor','woo'
:param log_scale: bool if True (default) uses log scale axes (NA in 3d due to open issues with mpl, cufflinks)
:param normalize: bool if True normalize frequencies in src2 and src3 relative to src1 length
:param percentage: coordinates in percentage of maximum word/ngram count
:param random_state: initial random seed for the sampling process, for reproducible research
:param sort_by: sort by 'alpha' (default) or 'count'
:param stem_order: how many stem characters per data point to display, defaults to 1
:param stem_skip: how many stem characters to skip, defaults to 0 - useful to zoom in on a single root letter
:param stop_words: stop words to remove. None (default), list or builtin EN (English), ES (Spanish) or FR (French)
:param whole: for normalized or percentage, use whole integer values (round)
:return: matplotlib polar ax, dataframe
"""
return scatter(src1=src1, src2=src2, src3=src3, alpha=alpha, alpha_only=alpha_only, asFigure=asFigure,
ascending=ascending, ax=ax, caps=caps, compact=compact, display=display, fig_xy=fig_xy,
interactive=interactive, jitter=jitter, label=label, leaf_order=leaf_order, leaf_skip=leaf_skip,
log_scale=log_scale, normalize=normalize, percentage=percentage, project=project,
project_only=project_only, random_state=random_state, sort_by=sort_by, stem_order=stem_order,
stem_skip=stem_skip,stop_words=stop_words, whole=whole)
def stem_text(df, aggr=False, alpha_only=True, ascending=True, binary=False, break_on=None, caps=True,
column=None, compact=False, display=750,
legend_pos='top', leaf_order=1, leaf_skip=0, persistence=None, remove_accents=False,
reverse=False, rows_only=False, sort_by='len', stem_order=1, stem_skip=0,
stop_words=None, random_state=None):
""" stem_text
Tukey's original stem-and-leaf plot was text, with a vertical delimiter to separate stem from
leaves. Just as stemgraphic implements a text version of the plot for numbers,
stemgraphic.alpha implements a text version for words. This type of plot serves a similar
purpose as a stacked bar chart with each data point annotated.
It also displays some basic statistics on the whole text (or subset if using column).
:param df: list, numpy array, time series, pandas or dask dataframe
:param aggr: bool if True display the aggregated count of leaves by row
:param alpha_only: only use stems from a-z alphabet (NA on dataframe)
:param ascending: bool if the sort is ascending
:param binary: bool if True forces counts to 1 for anything greater than 0
:param break_on: force a break of the leaves at that letter, the rest of the leaves will appear on the next line
:param caps: bool, True to be case sensitive, defaults to False, recommended for comparisons.(NA on dataframe)
:param column: specify which column (string or number) of the dataframe to use, or group of columns (stems)
else the frame is assumed to only have one column with words.
:param compact: do not display empty stem rows (with no leaves), defaults to False
:param display: maximum number of data points to display, forces sampling if smaller than len(df)
:param leaf_order: how many leaf characters per data point to display, defaults to 1
:param leaf_skip: how many leaf characters to skip, defaults to 0 - useful w/shared bigrams: 'wol','wor','woo'
:param legend_pos: where to put the legend: 'top' (default), 'bottom' or None
:param persistence: will save the sampled datafrae to filename (with csv or pkl extension) or None
:param random_state: initial random seed for the sampling process, for reproducible research
:param remove_accents: bool if True strips accents (NA on dataframe)
:param reverse: bool if True look at words from right to left
:param rows_only: by default returns only the stem and leaf rows. If false, also return the matrix and dataframe
:param sort_by: default to 'len', can also be 'alpha'
:param stem_order: how many stem characters per data point to display, defaults to 1
:param stem_skip: how many stem characters to skip, defaults to 0 - useful to zoom in on a single root letter
:param stop_words: stop words to remove. None (default), list or builtin EN (English), ES (Spanish) or FR (French)
"""
# the rows will come back sorted from this call
rows, alpha_matrix, x = ngram_data(df, alpha_only=alpha_only, ascending=ascending, binary=binary, break_on=break_on,
caps=caps, column=column, compact=compact, display=display,
leaf_order=leaf_order, leaf_skip=leaf_skip, persistence=persistence,
random_state=random_state, remove_accents=remove_accents,
reverse=reverse, rows_only=rows_only, sort_by=sort_by,
stem_order=stem_order, stem_skip=stem_skip, stop_words=stop_words)
if legend_pos == 'top':
print('{}: \n{}\nsampled {:>4}\n'.format(column if column else '', x.word.describe(include='all'), display))
cnt = 0
find = re.compile("([{}-z?])".format(break_on))
for i, val in enumerate(rows.index):
leaves = rows[i]
mask = '{:<' + str(stem_order) + '}| {}'
if aggr:
cnt += int(len(leaves) / leaf_order)
mask = '{:<' + str(len(str(display))) + '}|{:<' + str(stem_order) + '}| {}'
if break_on is not None:
try:
pos = re.search(find, leaves).start()
except AttributeError:
pos = 0
if pos > 0:
low = leaves[:pos]
high = leaves[pos:]
else:
low = leaves
high = ''
if ascending:
argsl = (cnt, val, low) if aggr else (val, low)
argsh = (cnt, val, high) if aggr else (val, high)
else:
argsl = (cnt, val, high) if aggr else (val, high)
argsh = (cnt, val, low) if aggr else (val, low)
else:
argsl = (cnt, val, leaves) if aggr else (val, leaves)
print(mask.format(*argsl))
if break_on:
# noinspection PyUnboundLocalVariable
print(mask.format(*argsh))
if legend_pos is not None and legend_pos != 'top':
print('Alpha stem and leaf {}: \n{}\nsampled {:>4}\n'.format(
column if column else '', x.word.describe(include='all'), display))
if rows_only:
return rows
else:
return rows, alpha_matrix, x
# noinspection PyTypeChecker
def stem_graphic(df, df2=None, aggregation=True, alpha=0.1, alpha_only=True, ascending=False, ax=None, ax2=None,
bar_color='C0', bar_outline=None, break_on=None, caps=True, column=None, combined=None, compact=False,
delimiter_color='C3', display=750, figure_only=True, flip_axes=False,
font_kw=None, leaf_color='k', leaf_order=1, leaf_skip=0, legend_pos='best',
median_color='C4', mirror=False, persistence=None, primary_kw=None,
random_state=None, remove_accents=False, reverse=False, secondary=False,
show_stem=True, sort_by='len', stop_words=None, stem_order=1, stem_skip=0,
title=None, trim_blank=False, underline_color=None):
""" stem_graphic
The principal visualization of stemgraphic.alpha is stem_graphic. It offers all the
options of stem\_text (3.1) and adds automatic title, mirroring, flipping of axes,
export (to pdf, svg, png, through fig.savefig) and many more options to change the
visual appearance of the plot (font size, color, background color, underlining and more).
By providing a secondary text source, the plot will enable comparison through a back-to-back display
:param df: string, filename, url, list, numpy array, time series, pandas or dask dataframe
:param df2: string, filename, url, list, numpy array, time series, pandas or dask dataframe (optional).
for back 2 back stem-and-leaf plots
:param aggregation: Boolean for sum, else specify function
:param alpha: opacity of the bars, median and outliers, defaults to 10%
:param alpha_only: only use stems from a-z alphabet (NA on dataframe)
:param ascending: stem sorted in ascending order, defaults to True
:param ax: matplotlib axes instance, usually from a figure or other plot
:param ax2: matplotlib axes instance, usually from a figure or other plot for back to back
:param bar_color: the fill color of the bar representing the leaves
:param bar_outline: the outline color of the bar representing the leaves
:param break_on: force a break of the leaves at that letter, the rest of the leaves will appear on the next line
:param caps: bool, True to be case sensitive, defaults to False, recommended for comparisons.(NA on dataframe)
:param column: specify which column (string or number) of the dataframe to use, or group of columns (stems)
else the frame is assumed to only have one column with words.
:param combined: list (specific subset to automatically include, say, for comparisons), or None
:param compact: do not display empty stem rows (with no leaves), defaults to False
:param delimiter_color: color of the line between aggregate and stem and stem and leaf
:param display: maximum number of data points to display, forces sampling if smaller than len(df)
:param figure_only: bool if True (default) returns matplotlib (fig,ax), False returns (fig,ax,df)
:param flip_axes: X becomes Y and Y becomes X
:param font_kw: keyword dictionary, font parameters
:param leaf_color: font color of the leaves
:param leaf_order: how many leaf digits per data point to display, defaults to 1
:param leaf_skip: how many leaf characters to skip, defaults to 0 - useful w/shared bigrams: 'wol','wor','woo'
:param legend_pos: One of 'top', 'bottom', 'best' or None, defaults to 'best'.
:param median_color: color of the box representing the median
:param mirror: mirror the plot in the axis of the delimiters
:param persistence: filename. save sampled data to disk, either as pickle (.pkl) or csv (any other extension)
:param primary_kw: stem-and-leaf plot additional arguments
:param random_state: initial random seed for the sampling process, for reproducible research
:param remove_accents: bool if True strips accents (NA on dataframe)
:param reverse: bool if True look at words from right to left
:param secondary: bool if True, this is a secondary plot - mostly used for back-to-back plots
:param show_stem: bool if True (default) displays the stems
:param sort_by: default to 'len', can also be 'alpha'
:param stem_order: how many stem characters per data point to display, defaults to 1
:param stem_skip: how many stem characters to skip, defaults to 0 - useful to zoom in on a single root letter
:param stop_words: stop words to remove. None (default), list or builtin EN (English), ES (Spanish) or FR (French)
:param title: string, or None. When None and source is a file, filename will be used.
:param trim_blank: remove the blank between the delimiter and the first leaf, defaults to True
:param underline_color: color of the horizontal line under the leaves, None for no display
:return: matplotlib figure and axes instance, and dataframe if figure_only is False
"""
if isinstance(df, str) and title is None:
title = df[:96] # max 96 chars for title
elif title is None:
# still
title = ''
if font_kw is None:
font_kw = {}
if primary_kw is None:
primary_kw = {}
base_fontsize = font_kw.get('fontsize', 12)
aggr_fontsize = font_kw.get('aggr_fontsize', base_fontsize - 2)
aggr_fontweight = font_kw.get('aggr_fontweight', 'normal')
aggr_facecolor = font_kw.get('aggr_facecolor', None)
aggr_fontcolor = font_kw.get('aggr_color', 'k')
stem_fontsize = font_kw.get('stem_fontsize', base_fontsize)
stem_fontweight = font_kw.get('stem_fontweight', 'normal')
stem_facecolor = font_kw.get('stem_facecolor', None)
stem_fontcolor = font_kw.get('stem_color', 'k')
pad = primary_kw.get('pad', 1.5)
leaf_alpha = 1
if leaf_color is None:
leaf_color = 'k'
leaf_alpha = 0
rows, alpha_matrix, x = ngram_data(
df,
alpha_only=alpha_only,
break_on=break_on,
caps=caps,
compact=compact,
column=column,
display=display,
leaf_order=leaf_order,
leaf_skip=leaf_skip,
persistence=persistence,
random_state=random_state,
remove_accents=remove_accents,
reverse=reverse,
rows_only=False,
sort_by=sort_by,
stem_order=stem_order,
stem_skip=stem_skip,
stop_words=stop_words
)
if combined is not None:
max_leaves = rows[combined].str.len().max()
else:
max_leaves = rows.str.len().max()
if df2 is not None:
if flip_axes:
warn("Error: flip_axes is not available with back to back stem-and-leaf plots.")
return None
_ = ngram_data(
df2,
alpha_only=alpha_only,
break_on=break_on,
caps=caps,
column=column,
display=display,
leaf_order=leaf_order,
leaf_skip=leaf_skip,
random_state=random_state,
reverse=reverse,
rows_only=False,
sort_by=sort_by,
stem_order=stem_order,
stem_skip=stem_skip,
stop_words=stop_words
)
fig = None
if flip_axes:
height = max_leaves + 3
if height < 20:
height = 20
width = len(rows) + 3
else:
width = max_leaves / (max_leaves/40)
if width < 20:
width = 20
# if df2:
# width /= 2 # two charts, need to maximize ratio
height = len(rows) + 3
if combined is None:
combined = rows.index
else:
height = len(combined)
width = max_leaves / 8 + 3
aggr_offset = -0.5
aggr_line_offset = 1
if df2 is not None:
# values = res.index
# combined = sorted(list(set(values.append(res2.index))))
aggr_offset = -3.7
aggr_line_offset = 0.2
if ax2 is None:
fig, (ax1, ax) = plt.subplots(1, 2, sharey=True, figsize=((width / 4), (height / 4)))
else:
ax1 = ax2
ax1.set_xlim((-1, width + 0.05))
ax1.set_ylim((-1, height + 0.05))
plt.box(on=None)
ax1.axes.get_yaxis().set_visible(False)
ax1.axes.get_xaxis().set_visible(False)
if not ax2:
ax1.set_xlim(-1, width + 0.05)
ax1.set_ylim(-1, height + 0.05)
_ = stem_graphic(df2, # NOQA
ax=ax1, aggregation=mirror and aggregation, alpha_only=alpha_only, ascending=ascending,
break_on=break_on, column=column, combined=combined, display=display, flip_axes=False,
mirror=not mirror, reverse=reverse, secondary=True, random_state=random_state,
show_stem=True, stop_words=stop_words)
if ax is None:
fig = plt.figure(figsize=((width / 4), (height / 4)))
ax = fig.add_axes((0.05, 0.05, 0.9, 0.9),
aspect='equal', frameon=False,
xlim=(-1, width + 0.05),
ylim=(-1, height + 0.05))
else:
ax.set_xlim((-1, width + 0.05))
ax.set_ylim((-1, height + 0.05))
fig = ax.get_figure()
plt.box(on=None)
ax.axis('off')
ax.axes.get_yaxis().set_visible(False)
ax.axes.get_xaxis().set_visible(False)
if df2 is not None or secondary:
title_offset = -2 if mirror else 4
else:
title_offset = 0 if mirror else 2
if flip_axes:
ax.set_title(title, y=title_offset)
else:
ax.set_title(title, x=title_offset)
offset = 0
if mirror:
ax.set_ylim(ax.get_ylim()[::-1]) if flip_axes else ax.set_xlim(ax.get_xlim()[::-1])
offset = -2 if secondary else 0.5
if not ascending:
ax.set_xlim(ax.get_xlim()[::-1]) if flip_axes else ax.set_ylim(ax.get_ylim()[::-1])
tot = 0
min_s = 99999999
mask = '{:>' + str(len(str(display))) + '}'
for cnt, item in enumerate(combined):
stem = item
try:
leaf = rows[item]
except KeyError:
leaf = ' '
tot += int(len(leaf) / leaf_order)
if trim_blank:
leaf = leaf.strip()
tot_display = mask.format(tot)
if flip_axes:
if aggregation and not (df2 and mirror):
ax.text(cnt + offset, 0, tot_display, fontsize=aggr_fontsize, rotation=90, color=aggr_fontcolor,
bbox={'facecolor': aggr_facecolor, 'alpha': alpha, 'pad': pad} if aggr_facecolor is not None
else {'alpha': 0},
fontweight=aggr_fontweight, va='center', ha='right' if mirror else 'left')
# STEM
if show_stem:
ax.text(cnt + offset, 1.5, stem, fontweight=stem_fontweight, color=stem_fontcolor, family='monospace',
bbox={'facecolor': stem_facecolor, 'alpha': alpha, 'pad': pad} if stem_facecolor is not None
else {'alpha': 0},
fontsize=stem_fontsize, va='center', ha='right' if mirror else 'left')
# LEAF
ax.text(cnt, 2.1, leaf[::-1] if mirror else leaf, fontsize=base_fontsize, color=leaf_color,
ha='left', va='top' if mirror else 'bottom', rotation=90, alpha=leaf_alpha, family='monospace',
bbox={'facecolor': bar_color, 'edgecolor': bar_outline, 'alpha': alpha, 'pad': pad})
else:
if aggregation and not (df2 is not None and mirror):
ax.text(aggr_offset, cnt + 0.5, tot_display, fontsize=aggr_fontsize, color=aggr_fontcolor,
bbox={'facecolor': aggr_facecolor, 'alpha': alpha, 'pad': pad} if aggr_facecolor is not None
else {'alpha': 0},
fontweight=aggr_fontweight, va='center', ha='right') # if mirror else 'left')
# STEM
if show_stem:
stem_offset = 2.2
if secondary and not mirror:
stem_offset = -8
elif df2 is not None and mirror:
stem_offset = 2.1
ax.text(stem_offset, cnt + 0.5, stem, fontweight=stem_fontweight, color=stem_fontcolor,
family='monospace',
bbox={'facecolor': stem_facecolor, 'alpha': alpha, 'pad': pad} if stem_facecolor is not None
else {'alpha': 0},
fontsize=stem_fontsize, va='center', ha='left' if mirror else 'right')
# LEAF
ax.text(2.6, cnt + 0.5, leaf[::-1] if mirror else leaf, fontsize=base_fontsize, family='monospace',
va='center', ha='right' if mirror else 'left', color=leaf_color, alpha=leaf_alpha,
bbox={'facecolor': bar_color, 'edgecolor': bar_outline, 'alpha': alpha, 'pad': pad})
if underline_color:
ax.hlines(cnt, 2.6, 2.6 + len(leaf) / 2, color=underline_color)
if flip_axes:
# noinspection PyUnboundLocalVariable
ax.hlines(2, min_s, min_s + 1 + cnt, color=delimiter_color, alpha=0.7)
if aggregation:
ax.hlines(1, min_s, min_s + 1 + cnt, color=delimiter_color, alpha=0.7)
else:
if aggregation and not (df2 is not None and mirror):
# noinspection PyUnboundLocalVariable
ax.vlines(aggr_line_offset, 0, 1 + cnt, color=delimiter_color, alpha=0.7)
if show_stem:
ax.vlines(2.4, 0, 1 + cnt, color=delimiter_color, alpha=0.7)
if flip_axes:
ax.plot(0, height)
else:
ax.plot(width, 0)
fig.tight_layout()
if figure_only:
return fig, ax
else:
return fig, ax, x
# noinspection PyPep8Naming
def stem_freq_plot(df, alpha_only=False, asFigure=False, column=None, compact=True, caps=False, # NOQA
display=2600, interactive=True, kind='barh', leaf_order=1, leaf_skip=0, random_state=None,
stem_order=1, stem_skip=0, stop_words=None):
""" stem_freq_plot
Word frequency plot is the most common visualization in NLP. In this version it supports stem-and-leaf / n-grams.
Each row is the stem, and similar leaves are grouped together and each different group is stacked
in bar charts.
Default is horizontal bar chart, but vertical, histograms, area charts and even pie charts are
supported by this one visualization.
:param df: string, filename, url, list, numpy array, time series, pandas or dask dataframe
:param alpha_only: only use stems from a-z alphabet (NA on dataframe)
:param asFigure: return plot as plotly figure (for web applications)
:param column: specify which column (string or number) of the dataframe to use, or group of columns (stems)
else the frame is assumed to only have one column with words.
:param compact: do not display empty stem rows (with no leaves), defaults to False
:param caps: bool, True to be case sensitive, defaults to False, recommended for comparisons.(NA on dataframe)
:param display: maximum number of data points to display, forces sampling if smaller than len(df)
:param interactive: if cufflinks is loaded, renders as interactive plot in nebook
:param kind: defaults to 'barh'. One of 'bar','barh','area','hist'. Non-interactive also supports 'pie'
:param leaf_order: how many leaf digits per data point to display, defaults to 1
:param leaf_skip: how many leaf characters to skip, defaults to 0 - useful w/shared bigrams: 'wol','wor','woo'
:param random_state: initial random seed for the sampling process, for reproducible research
:param stem_order: how many stem characters per data point to display, defaults to 1
:param stem_skip: how many stem characters to skip, defaults to 0 - useful to zoom in on a single root letter
:param stop_words: stop words to remove. None (default), list or builtin EN (English), ES (Spanish) or FR (French)
:return:
"""
rows, alpha_matrix, x = ngram_data(
df,
alpha_only=alpha_only,
caps=caps,
compact=compact,
display=display,
leaf_order=leaf_order,
leaf_skip=leaf_skip,
rows_only=False,
random_state=random_state,
stem_order=stem_order,
stem_skip=stem_skip,
stop_words=stop_words,
)
if not interactive:
plt.figure(figsize=(20, 20))
if isinstance(df, str):
title = 'stem-and-leaf stacked frequency for {}'.format(df)
else:
title = 'stem-and-leaf stacked frequency'
if interactive:
try:
if column:
# one or multiple "columns" specified, we filter those stems
fig = alpha_matrix.loc[column].word.iplot(kind=kind, barmode='stack', asFigure=asFigure, title=title)
else:
alpha_matrix.word.iplot(kind=kind, barmode='stack', asFigure=asFigure, title=title)
except AttributeError:
warn('Interactive plot requested, but cufflinks not loaded. Falling back to matplotlib.')
alpha_matrix.word.plot(kind=kind, stacked=True, legend=None, title=title)
else:
alpha_matrix.word.plot(kind=kind, stacked=True, legend=None, title=title)
return x
def stem_sunburst(words, alpha_only=True, ascending=False, caps=False, compact=True, display=None, hole=True,
label=True, leaf_order=1, leaf_skip=0, median=True, ngram=False, random_state=None, sort_by='alpha',
statistics=True, stem_order=1, stem_skip=0, stop_words=None, top=0):
""" stem_sunburst
Stem-and-leaf based sunburst. See sunburst for details
:param words: string, filename, url, list, numpy array, time series, pandas or dask dataframe
:param alpha_only: only use stems from a-z alphabet (NA on dataframe)
:param ascending: stem sorted in ascending order, defaults to True
:param caps: bool, True to be case sensitive, defaults to False, recommended for comparisons.(NA on dataframe)
:param compact: do not display empty stem rows (with no leaves), defaults to False
:param display: maximum number of data points to display, forces sampling if smaller than len(df)
:param hole: bool if True (default) leave space in middle for statistics
:param label: bool if True display words centered at coordinate
:param leaf_order: how many leaf digits per data point to display, defaults to 1
:param leaf_skip: how many leaf characters to skip, defaults to 0 - useful w/shared bigrams: 'wol','wor','woo'
:param median: bool if True (default) display an origin and a median mark
:param ngram: bool if True display full n-gram as leaf label
:param random_state: initial random seed for the sampling process, for reproducible research
:param sort_by: sort by 'alpha' (default) or 'count'
:param statistics: bool if True (default) displays statistics in center - hole has to be True
:param stem_order: how many stem characters per data point to display, defaults to 1
:param stem_skip: how many stem characters to skip, defaults to 0 - useful to zoom in on a single root letter
:param stop_words: stop words to remove. None (default), list or builtin EN (English), ES (Spanish) or FR (French)
:param top: how many different words to count by order frequency. If negative, this will be the least frequent
:return:
"""
return sunburst(words, alpha_only=alpha_only, ascending=ascending, caps=caps, compact=compact, display=display,
hole=hole, label=label, leaf_order=leaf_order, leaf_skip=leaf_skip, median=median, ngram=ngram,
random_state=random_state, sort_by=sort_by, statistics=statistics,
stem_order=stem_order, stem_skip=stem_skip, stop_words=stop_words, top=top)
# noinspection PyTypeChecker,PyTypeChecker,PyTypeChecker,PyTypeChecker,PyTypeChecker,PyTypeChecker
def sunburst(words, alpha_only=True, ascending=False, caps=False, compact=True, display=None, hole=True,
label=True, leaf_order=1, leaf_skip=0, median=True, ngram=True, random_state=None, sort_by='alpha',
statistics=True, stem_order=1, stem_skip=0, stop_words=None, top=40):
""" sunburst
Word sunburst charts are similar to pie or donut charts, but add some statistics
in the middle of the chart, including the percentage of total words targeted for a given
number of unique words (ie. top 50 words, 48\% coverage).
With stem-and-leaf, the first level of the sunburst represents the stem and the second
level subdivides each stem by leaves.
:param words: string, filename, url, list, numpy array, time series, pandas or dask dataframe
:param alpha_only: only use stems from a-z alphabet (NA on dataframe)
:param ascending: stem sorted in ascending order, defaults to True
:param caps: bool, True to be case sensitive, defaults to False, recommended for comparisons.(NA on dataframe)
:param compact: do not display empty stem rows (with no leaves), defaults to False
:param display: maximum number of data points to display, forces sampling if smaller than len(df)
:param hole: bool if True (default) leave space in middle for statistics
:param label: bool if True display words centered at coordinate
:param leaf_order: how many leaf digits per data point to display, defaults to 1
:param leaf_skip: how many leaf characters to skip, defaults to 0 - useful w/shared bigrams: 'wol','wor','woo'
:param median: bool if True (default) display an origin and a median mark
:param ngram: bool if True (default) display full n-gram as leaf label
:param random_state: initial random seed for the sampling process, for reproducible research
:param statistics: bool if True (default) displays statistics in center - hole has to be True
:param sort_by: sort by 'alpha' (default) or 'count'
:param stem_order: how many stem characters per data point to display, defaults to 1
:param stem_skip: how many stem characters to skip, defaults to 0 - useful to zoom in on a single root letter
:param stop_words: stop words to remove. None (default), list or builtin EN (English), ES (Spanish) or FR (French)
:param top: how many different words to count by order frequency. If negative, this will be the least frequent
:return: matplotlib polar ax, dataframe
"""
if isinstance(words, str):
filename = words
else:
filename = 'data'
_, alpha_matrix, x = ngram_data(
words,
alpha_only=alpha_only,
compact=compact,
display=display,
leaf_order=leaf_order,
leaf_skip=leaf_skip,
rows_only=False,
random_state=random_state,
sort_by=sort_by,
stem_order=stem_order,
stem_skip=stem_skip,
stop_words=stop_words,
caps=caps)
fig, pol_ax = plt.subplots(1, 1, figsize=(12, 12), subplot_kw=dict(projection='polar'))
pol_ax.grid(color='#dfdfdf') # Color the grid
pol_ax.set_theta_zero_location('N') # we start at the top
pol_ax.set_theta_direction(-1) # and go clockwise
pol_ax.set_rlabel_position(0)
pol_ax.set_axis_off()
if median:
# start marker
if leaf_order is None:
pol_ax.plot((0, 0), (1.98, 2.02), color='r')
else:
pol_ax.plot((0, 0), (0.98, 1.02), color='r')
if top < 0:
sum_by_len = x.word.value_counts()[top:].sum()
else:
sum_by_len = x.word.value_counts()[:top].sum()
sum_by_stem = alpha_matrix.word.T.sum()
sum_of_sum = sum_by_stem.sum()
qty_unique_ngrams = len(x.ngram.unique())
if stem_order is None:
if leaf_order is None:
col = 'word' # dealing with words
else:
col = 'ngram' # partial stem and leaf, words are n-grams
top = qty_unique_ngrams if top == 0 else top
# We are dealing with words, then.
d = np.pi * 2 / sum_by_len
if top < 0:
normalized = x[col].value_counts()[top:] * d
else:
normalized = x[col].value_counts()[:top] * d
if sort_by == 'alpha':
normalized.sort_index(inplace=True, ascending=ascending)
elif sort_by == 'count':
normalized.sort_values(inplace=True, ascending=ascending)
# elif sort_by == 'len':
#
plot_sunburst_level(normalized, ax=pol_ax, label=label, level=0.5 if hole else 0, ngram=ngram)
if median:
pol_ax.plot((np.pi, np.pi), (0.48, 2.02), color='r')
if hole and statistics:
pol_ax.text(0, 0, '{} words\n{:.2f} %'.format(sum_by_len, sum_by_len / len(x.word) * 100),
size=20, ha='center', va='center')
if top < 0:
plt.figtext(.5, 0.95, 'Bottom {} {}s from'.format(abs(top), col), ha='center')
pol_ax.set_title('{}'.format(filename), fontsize=8)
else:
plt.figtext(.5, 0.95, 'Top {} {}s from'.format(top, col), ha='center')
pol_ax.set_title('{}'.format(filename), fontsize=8)
else:
# A variation of a stem-and-leaf polar plot
d = np.pi * 2 / sum_of_sum
normalized = alpha_matrix.word.T.sum() * d
if sort_by == 'alpha':
normalized.sort_index(inplace=True, ascending=ascending)
elif sort_by == 'count':
# default is alpha
normalized.sort_values(inplace=True, ascending=ascending)
# sum_by_stem.sort_values(inplace=True, ascending=ascending)
hole_adjust = 0.4 if hole else 0
values = plot_sunburst_level(normalized, ax=pol_ax, label=label, level=hole_adjust,
ngram=ngram, plot=stem_order)
if hole and statistics:
pol_ax.text(0, 0, '{:.2f} %'.format(sum_of_sum / len(x.word) * 100), size=12, ha='center',
va='center')
if leaf_order is not None:
stems = list(normalized.index)
for i, stem in enumerate(stems):
try:
leaves = alpha_matrix.word.T[stem].fillna(0) * d
if sort_by == 'count':
leaves.sort_values(inplace=True, ascending=ascending)
plot_sunburst_level(leaves, offset=values[i],
level=1 if stem_order else hole_adjust, ax=pol_ax,
ngram=ngram,
stem=stem, vis=0.001)
except KeyError:
pass
if stem_order:
plt.figtext(.5, 0.95, 'Stem-and-leaves from', ha='center')
pol_ax.set_title(filename, fontsize=8)
if median:
pol_ax.plot((np.pi, np.pi), (0, 1.02), color='r')
else:
plt.figtext(.5, 0.95, 'Leaves from', ha='center')
pol_ax.set_title(filename, fontsize=8)
if median:
pol_ax.plot((np.pi, np.pi), (0, 1.02), color='r')
else:
plt.figtext(.5, 0.95, 'Stems from', ha='center')
pol_ax.set_title(filename, fontsize=8)
if median:
pol_ax.plot((np.pi, np.pi), (0, 1.02), color='r')
return pol_ax, x
# noinspection PyPep8Naming,PyTypeChecker,PyTypeChecker
def word_freq_plot(src, alpha_only=False, ascending=False, asFigure=False, caps=False, display=None, # NOQA
interactive=True, kind='barh', random_state=None, sort_by='count', stop_words=None, top=100):
""" word frequency bar chart.
This function creates a classical word frequency bar chart.
:param src: Either a filename including path, a url or a ready to process text in a dataframe or a tokenized format.
:param alpha_only: words only if True, words and numbers if False
:param ascending: stem sorted in ascending order, defaults to True
:param asFigure: if interactive, the function will return a plotly figure instead of a matplotlib ax
:param caps: keep capitalization (True, False)
:param display: if specified, sample that quantity of words
:param interactive: interactive graphic (True, False)
:param kind: horizontal bar chart (barh) - also 'bar', 'area', 'hist' and non interactive 'kde' and 'pie'
:param random_state: initial random seed for the sampling process, for reproducible research
:param sort_by: default to 'count', can also be 'alpha'
:param stop_words: a list of words to ignore
:param top: how many different words to count by order frequency. If negative, this will be the least frequent
:return: text as dataframe and plotly figure or matplotlib ax
"""
_, _, x = ngram_data(
src,
alpha_only=alpha_only,
caps=caps,
compact=True,
display=display,
leaf_order=None,
rows_only=False,
random_state=random_state,
sort_by=sort_by,
stem_order=None,
stop_words=stop_words
)
if stop_words is not None:
x = x[~x.word.isin(stop_words)]
# if sort_by == 'alpha':
# x.sort_values(by='word', inplace=True, ascending=ascending)
# elif sort_by == 'count':
# x.word.value_counts().sort_values(ascending=ascending, inplace=True)
# elif sort_by == 'len':
# x = x[x.word.str.len().sort_values().index]
if isinstance(src, str):
if top < 0:
title = 'Bottom {} word frequency for {}'.format(min(len(x.word.value_counts()), abs(top)), src)
else:
title = 'Top {} word frequency for {}'.format(min(len(x.word.value_counts()), top), src)
else:
title = 'word frequency'
if interactive:
try:
if top < 0:
figure = x.word.value_counts().sort_values(ascending=ascending)[top:].iplot(kind=kind,
asFigure=asFigure,
title=title)
else:
if sort_by == 'alpha':
figure = x.word.value_counts()[:top].sort_index(ascending=ascending).iplot(kind=kind,
asFigure=asFigure,
title=title)
else:
figure = x.word.value_counts()[:top].sort_values(ascending=ascending).iplot(kind=kind,
asFigure=asFigure,
title=title)
except AttributeError:
warn('Interactive plot requested, but cufflinks not loaded. Falling back to matplotlib.')
plt.figure(figsize=(20, 20))
if top < 0:
ax = x.word.value_counts()[top:].plot(kind=kind, title=title)
else:
if sort_by == 'alpha':
ax = x.word.value_counts()[:top].sort_index(ascending=ascending).plot(kind=kind, title=title)
else:
ax = x.word.value_counts()[:top].sort_values(ascending=ascending).plot(kind=kind, title=title)
figure = ax # special case, requested interactive, but unavailable, so return matplotlib ax
else:
plt.figure(figsize=(20, 20))
if top < 0:
ax = x.word.value_counts()[top:].sort_values(ascending=ascending).plot(kind=kind, title=title)
else:
if sort_by == 'alpha':
ax = x.word.value_counts()[:top].sort_index(ascending=ascending).plot(kind=kind, title=title)
else:
ax = x.word.value_counts()[:top].sort_values(ascending=ascending).plot(kind=kind, title=title)
# noinspection PyUnboundLocalVariable,PyUnboundLocalVariable
return x, figure if interactive else ax
def word_radar(word, comparisons, ascending=True, display=100, label=True, metric=None,
min_distance=1, max_distance=None, random_state=None, sort_by='alpha'):
""" word_radar
Radar plot based on words. Currently, the only type of radar plot supported. See `radar' for more detail.
:param word: string, the reference word that will be placed in the middle
:param comparisons: external file, list or string or dataframe of words
:param ascending: bool if the sort is ascending
:param display: maximum number of data points to display, forces sampling if smaller than len(df)
:param label: bool if True display words centered at coordinate
:param metric: any metric function accepting two values and returning that metric in a range from 0 to x
:param min_distance: minimum distance based on metric to include a word for display
:param max_distance: maximum distance based on metric to include a word for display
:param random_state: initial random seed for the sampling process, for reproducible research
:param sort_by: default to 'alpha', can also be 'len'
:return:
"""
return radar(word, comparisons, ascending=ascending, display=display, label=label, metric=metric,
min_distance=min_distance, max_distance=max_distance, random_state=random_state, sort_by=sort_by)
def word_scatter(src1, src2, src3=None, alpha=0.5, alpha_only=True, ascending=True, asFigure=False, ax=None, caps=False,
compact=True, display=None, fig_xy=None, interactive=True, jitter=False, label=False,
leaf_order=None, leaf_skip=0, log_scale=True, normalize=None, percentage=None, random_state=None,
sort_by='alpha', stem_order=None, stem_skip=0, stop_words=None, whole=False):
""" word_scatter
Scatter compares the word frequency of two sources, on each axis. Each data point Z value is the word
or stem-and-leaf value, while the X axis reflects that word count in one source and the Y axis re-
flect the same word count in the other source, in two different colors. If one word is more common
on the first source it will be displayed in one color, and if it is more common in the second source, it
will be displayed in a different color. The values that are the same for both sources will be displayed
in a third color (default colors are blue, black and pink. In interactive mode, hovering the data point
will give the precise counts on each axis along with the word itself, and filtering by category is done
by clicking on the category in the legend.
:param src1: string, filename, url, list, numpy array, time series, pandas or dask dataframe
:param src2: string, filename, url, list, numpy array, time series, pandas or dask dataframe
:param src3: string, filename, url, list, numpy array, time series, pandas or dask dataframe, optional
:param alpha: opacity of the bars, median and outliers, defaults to 10%
:param alpha_only: only use stems from a-z alphabet (NA on dataframe)
:param ascending: stem sorted in ascending order, defaults to True
:param asFigure: return plot as plotly figure (for web applications)
:param ax: matplotlib axes instance, usually from a figure or other plot
:param caps: bool, True to be case sensitive, defaults to False, recommended for comparisons.(NA on dataframe)
:param compact: do not display empty stem rows (with no leaves), defaults to False
:param display: maximum number of data points to display, forces sampling if smaller than len(df)
:param fig_xy: tuple for matplotlib figsize, defaults to (20,20)
:param interactive: if cufflinks is loaded, renders as interactive plot in notebook
:param jitter: random noise added to help see multiple data points sharing the same coordinate
:param label: bool if True display words centered at coordinate
:param leaf_order: how many leaf digits per data point to display, defaults to 1
:param leaf_skip: how many leaf characters to skip, defaults to 0 - useful w/shared bigrams: 'wol','wor','woo'
:param log_scale: bool if True (default) uses log scale axes
:param random_state: initial random seed for the sampling process, for reproducible research
:param sort_by: sort by 'alpha' or 'count' (default)
:param stem_order: how many stem characters per data point to display, defaults to 1
:param stem_skip: how many stem characters to skip, defaults to 0 - useful to zoom in on a single root letter
:param stop_words: stop words to remove. None (default), list or builtin EN (English), ES (Spanish) or FR (French)
:param whole: for normalized or percentage, use whole integer values (round)
:return: matplotlib polar ax, dataframe
"""
return scatter(src1=src1, src2=src2, src3=src3, alpha=alpha, alpha_only=alpha_only, asFigure=asFigure,
ascending=ascending, ax=ax, caps=caps, compact=compact, display=display, fig_xy=fig_xy,
interactive=interactive, jitter=jitter, label=label, leaf_order=leaf_order, leaf_skip=leaf_skip,
log_scale=log_scale, normalize=normalize, percentage=percentage, random_state=random_state,
sort_by=sort_by, stem_order=stem_order, stem_skip=stem_skip,stop_words=stop_words, whole=whole)
def word_sunburst(words, alpha_only=True, ascending=False, caps=False, compact=True, display=None, hole=True,
label=True, leaf_order=None, leaf_skip=0, median=True, ngram=True, random_state=None, sort_by='alpha',
statistics=True, stem_order=None, stem_skip=0, stop_words=None, top=40):
""" word_sunburst
Word based sunburst. See sunburst for details
:param words: string, filename, url, list, numpy array, time series, pandas or dask dataframe
:param alpha_only: only use stems from a-z alphabet (NA on dataframe)
:param ascending: stem sorted in ascending order, defaults to True
:param caps: bool, True to be case sensitive, defaults to False, recommended for comparisons.(NA on dataframe)
:param compact: do not display empty stem rows (with no leaves), defaults to False
:param display: maximum number of data points to display, forces sampling if smaller than len(df)
:param hole: bool if True (default) leave space in middle for statistics
:param label: bool if True display words centered at coordinate
:param leaf_order: how many leaf digits per data point to display, defaults to 1
:param leaf_skip: how many leaf characters to skip, defaults to 0 - useful w/shared bigrams: 'wol','wor','woo'
:param median: bool if True (default) display an origin and a median mark
:param ngram: bool if True (default) display full n-gram as leaf label
:param random_state: initial random seed for the sampling process, for reproducible research
:param statistics: bool if True (default) displays statistics in center - hole has to be True
:param sort_by: sort by 'alpha' (default) or 'count'
:param stem_order: how many stem characters per data point to display, defaults to 1
:param stem_skip: how many stem characters to skip, defaults to 0 - useful to zoom in on a single root letter
:param stop_words: stop words to remove. None (default), list or builtin EN (English), ES (Spanish) or FR (French)
:param top: how many different words to count by order frequency. If negative, this will be the least frequent
:return:
"""
return sunburst(words, alpha_only=alpha_only, ascending=ascending, caps=caps, compact=compact, display=display,
hole=hole, label=label, leaf_order=leaf_order, leaf_skip=leaf_skip, median=median, ngram=ngram,
random_state=random_state, sort_by=sort_by, statistics=statistics,
stem_order=stem_order, stem_skip=stem_skip, stop_words=stop_words, top=top)
| mit |
c-m/Licenta | src/nn.py | 1 | 7522 | # neural network models for data learning
import matplotlib.pyplot as plt
import numpy as np
from data_loader import load_grades, preprocess_data
from sklearn.metrics import confusion_matrix, hamming_loss, brier_score_loss, log_loss
from sklearn.metrics import mean_absolute_error, mean_squared_error, median_absolute_error, r2_score
from sklearn.neural_network import MLPClassifier, MLPRegressor
from sklearn.preprocessing import LabelEncoder
LABEL_NAMES_BIN = ['failed', 'passed']
LABEL_NAMES_MULT = ['0-1', '1-2', '2-3', '3-4']
def plot_confusion_matrix(cm, label_names, title='Confusion matrix', cmap=plt.cm.Blues):
plt.imshow(cm, interpolation='nearest', cmap=cmap)
plt.title(title)
plt.colorbar()
tick_marks = np.arange(len(label_names))
plt.xticks(tick_marks, label_names, rotation=45)
plt.yticks(tick_marks, label_names)
plt.tight_layout()
plt.ylabel('True label')
plt.xlabel('Predicted label')
def model_eval(clf, X_nn, Y_nn, X_nn_test, Y_nn_test, label_names):
prob = clf.predict_proba(X_nn_test)
print prob
_y_test = clf.predict(X_nn_test)
print Y_nn_test
print _y_test
#analyze how good/bad is the model
#reference: http://scikit-learn.org/dev/modules/model_evaluation.html
#model score (same as accuracy_score from sklearn.metrics module)
print("Training set score: %f" % clf.score(X_nn, Y_nn))
print("Test set score: %f" % clf.score(X_nn_test, Y_nn_test))
#hamming loss
hloss = hamming_loss(Y_nn_test, _y_test)
print 'Hamming loss: %f' % hloss
#brier score loss
i = 0
t = []
for p in prob:
if Y_nn_test[i] == 0:
t.append(p[0])
else:
t.append(p[1])
i += 1
y_prob = np.array(t)
print y_prob
brier_loss = brier_score_loss(Y_nn_test, y_prob)
print 'Brier score loss (lower is better): %f' % brier_loss
#cross-entropy loss -> this is an important metric for ML models
#more on this: http://colah.github.io/posts/2015-09-Visual-Information/
#H(p(x)) = sum(p(x)*log(1/p(x))); H is the entropy function
#H_q(p) = sum(p*log(1/q))
#We are going to use the following formula for evaluating the model:
#Kullback-Leibler divergence - KL divergence of p with respect to q:
#D_q(p) = H_q(p) - H(p) = sum(p*log(1/q)) - sum(p*log(1/p)), for all x,
#where x is a random variable
#In our case, the second sum is zero, since p(x) = 0 or 1
#We have to sum all these divergences for all the test examples and,
#in the end, average them for the formula to be correct.
KL_div = -np.sum(np.log(y_prob)) / _y_test.shape
print 'KL divergence: %f' % KL_div
#Alternatively, we could use this function from sk-learn:
print log_loss(Y_nn_test, prob)
#confusion matrix
c_m = confusion_matrix(Y_nn_test, _y_test)
print 'Confusion matrix'
print c_m
plt.figure()
plot_confusion_matrix(c_m, label_names)
np.set_printoptions(precision=2)
c_m_normalized = c_m.astype('float') / c_m.sum(axis=1)[:, np.newaxis]
print 'Normalized confusion matrix'
print c_m_normalized
plt.figure()
plot_confusion_matrix(c_m_normalized, label_names)
plt.show()
#reset numpy output formatter
np.set_printoptions(edgeitems=3,infstr='inf',
linewidth=75, nanstr='nan', precision=8,
suppress=False, threshold=1000, formatter=None)
def regressor_eval(clf, X_nn, Y_nn, X_nn_test, Y_nn_test):
_y_train = clf.predict(X_nn)
_y_test = clf.predict(X_nn_test)
print Y_nn_test
print _y_test
print '----------Train----------'
print("Training set score == R_2 score: %f" % clf.score(X_nn, Y_nn))
print 'Mean absolute error: %f' % mean_absolute_error(Y_nn, _y_train)
mse = mean_squared_error(Y_nn, _y_train)
print 'Mean squared error: %f. RMSE: %f' % (mse, np.sqrt(mse))
print 'Median absolute error: %f' % median_absolute_error(Y_nn, _y_train)
#r2_score
#It provides a measure of how well future samples are likely to be predicted by the model.
#Best possible score is 1.0 and it can be negative (because the model can be arbitrarily worse).
#A constant model that always predicts the expected value of y, disregarding the input features,
#would get a R^2 score of 0.0. Source: http://scikit-learn.org/dev/modules/model_evaluation.html
print 'R_2 score: %f' % r2_score(Y_nn, _y_train)
print '----------Test----------'
print("Test set score == R_2 score: %f" % clf.score(X_nn_test, Y_nn_test))
print 'Mean absolute error: %f' % mean_absolute_error(Y_nn_test, _y_test)
mse = mean_squared_error(Y_nn_test, _y_test)
print 'Mean squared error: %f. RMSE: %f' % (mse, np.sqrt(mse))
print 'Median absolute error: %f' % median_absolute_error(Y_nn_test, _y_test)
print 'R_2 score: %f' % r2_score(Y_nn_test, _y_test)
def nn_binary_classifier(data):
"""Classify the dataset in two classes ('failed' and 'passed')
using fully-connected neural networks.
"""
X_nn = data['train_data']
X_nn_test = data['test_data']
Y_nn = data['train_discrete_labels']
Y_nn_test = data['test_discrete_labels']
#transform Y_nn and Y_nn_test
Y_nn[Y_nn < 5] = 0
Y_nn[Y_nn >= 5] = 1
Y_nn_test[Y_nn_test < 5] = 0
Y_nn_test[Y_nn_test >= 5] = 1
clf = MLPClassifier(algorithm='sgd',
alpha=1e-5,
activation='relu',
hidden_layer_sizes=(100,),
random_state=1,
max_iter=1000,
batch_size='auto',
learning_rate='constant',
learning_rate_init=0.001,
verbose=False)
clf.fit(X_nn, Y_nn)
#evaluate de trained model
model_eval(clf, X_nn, Y_nn, X_nn_test, Y_nn_test, LABEL_NAMES_BIN)
def nn_classifier(data):
"""Classify students data in four classes based on the final
exam grade, which is a real number between 0.0 and 4.0.
"""
X_nn = data['train_data']
X_nn_test = data['test_data']
Y_nn = data['train_continuous_labels'][:,1]
Y_nn_test = data['test_continuous_labels'][:,1]
#transform Y_nn and Y_nn_test
Y_nn = np.ceil(Y_nn)
Y_nn_test = np.ceil(Y_nn_test)
#Encode labels to values: 0,1,2,3
le = LabelEncoder()
le.fit(Y_nn)
le.fit(Y_nn_test)
Y_nn = le.transform(Y_nn)
Y_nn_test = le.transform(Y_nn_test)
clf = MLPClassifier(algorithm='adam',
alpha=1e-5,
activation='tanh',
hidden_layer_sizes=(100,),
random_state=0,
max_iter=500,
batch_size='auto',
learning_rate='constant',
learning_rate_init=0.001,
verbose=True)
clf.fit(X_nn, Y_nn)
#evaluate de trained model
model_eval(clf, X_nn, Y_nn, X_nn_test, Y_nn_test, LABEL_NAMES_MULT)
def nn_regressor(data):
"""Perform regression on students data using exam grades as continuous output values.
Exam grade is a real value between 0.0 and 4.0 and it will remain as is for the regressor.
"""
X_nn = data['train_data']
X_nn_test = data['test_data']
Y_nn = data['train_continuous_labels'][:,1]
Y_nn_test = data['test_continuous_labels'][:,1]
clf = MLPRegressor(algorithm='sgd',
alpha=1e-5,
activation='tanh',
hidden_layer_sizes=(100,),
random_state=1,
max_iter=500,
batch_size='auto',
learning_rate='constant',
learning_rate_init=0.001,
verbose=True)
clf.fit(X_nn, Y_nn)
#evaluate the MLPRegressor model
regressor_eval(clf, X_nn, Y_nn, X_nn_test, Y_nn_test)
def main():
students_data_set = load_grades()
students_data_set = preprocess_data(students_data_set, poly_features=True)
#binary classificication based on final grade
nn_binary_classifier(students_data_set)
#4-class classification based on exam grade
# nn_classifier(students_data_set)
#regression for exam_grades using MLPRegressor() class
#nn_regressor(students_data_set)
if __name__ == '__main__':
main()
| mit |
bkendzior/scipy | scipy/interpolate/ndgriddata.py | 39 | 7457 | """
Convenience interface to N-D interpolation
.. versionadded:: 0.9
"""
from __future__ import division, print_function, absolute_import
import numpy as np
from .interpnd import LinearNDInterpolator, NDInterpolatorBase, \
CloughTocher2DInterpolator, _ndim_coords_from_arrays
from scipy.spatial import cKDTree
__all__ = ['griddata', 'NearestNDInterpolator', 'LinearNDInterpolator',
'CloughTocher2DInterpolator']
#------------------------------------------------------------------------------
# Nearest-neighbour interpolation
#------------------------------------------------------------------------------
class NearestNDInterpolator(NDInterpolatorBase):
"""
NearestNDInterpolator(points, values)
Nearest-neighbour interpolation in N dimensions.
.. versionadded:: 0.9
Methods
-------
__call__
Parameters
----------
x : (Npoints, Ndims) ndarray of floats
Data point coordinates.
y : (Npoints,) ndarray of float or complex
Data values.
rescale : boolean, optional
Rescale points to unit cube before performing interpolation.
This is useful if some of the input dimensions have
incommensurable units and differ by many orders of magnitude.
.. versionadded:: 0.14.0
tree_options : dict, optional
Options passed to the underlying ``cKDTree``.
.. versionadded:: 0.17.0
Notes
-----
Uses ``scipy.spatial.cKDTree``
"""
def __init__(self, x, y, rescale=False, tree_options=None):
NDInterpolatorBase.__init__(self, x, y, rescale=rescale,
need_contiguous=False,
need_values=False)
if tree_options is None:
tree_options = dict()
self.tree = cKDTree(self.points, **tree_options)
self.values = y
def __call__(self, *args):
"""
Evaluate interpolator at given points.
Parameters
----------
xi : ndarray of float, shape (..., ndim)
Points where to interpolate data at.
"""
xi = _ndim_coords_from_arrays(args, ndim=self.points.shape[1])
xi = self._check_call_shape(xi)
xi = self._scale_x(xi)
dist, i = self.tree.query(xi)
return self.values[i]
#------------------------------------------------------------------------------
# Convenience interface function
#------------------------------------------------------------------------------
def griddata(points, values, xi, method='linear', fill_value=np.nan,
rescale=False):
"""
Interpolate unstructured D-dimensional data.
Parameters
----------
points : ndarray of floats, shape (n, D)
Data point coordinates. Can either be an array of
shape (n, D), or a tuple of `ndim` arrays.
values : ndarray of float or complex, shape (n,)
Data values.
xi : ndarray of float, shape (M, D)
Points at which to interpolate data.
method : {'linear', 'nearest', 'cubic'}, optional
Method of interpolation. One of
``nearest``
return the value at the data point closest to
the point of interpolation. See `NearestNDInterpolator` for
more details.
``linear``
tesselate the input point set to n-dimensional
simplices, and interpolate linearly on each simplex. See
`LinearNDInterpolator` for more details.
``cubic`` (1-D)
return the value determined from a cubic
spline.
``cubic`` (2-D)
return the value determined from a
piecewise cubic, continuously differentiable (C1), and
approximately curvature-minimizing polynomial surface. See
`CloughTocher2DInterpolator` for more details.
fill_value : float, optional
Value used to fill in for requested points outside of the
convex hull of the input points. If not provided, then the
default is ``nan``. This option has no effect for the
'nearest' method.
rescale : bool, optional
Rescale points to unit cube before performing interpolation.
This is useful if some of the input dimensions have
incommensurable units and differ by many orders of magnitude.
.. versionadded:: 0.14.0
Notes
-----
.. versionadded:: 0.9
Examples
--------
Suppose we want to interpolate the 2-D function
>>> def func(x, y):
... return x*(1-x)*np.cos(4*np.pi*x) * np.sin(4*np.pi*y**2)**2
on a grid in [0, 1]x[0, 1]
>>> grid_x, grid_y = np.mgrid[0:1:100j, 0:1:200j]
but we only know its values at 1000 data points:
>>> points = np.random.rand(1000, 2)
>>> values = func(points[:,0], points[:,1])
This can be done with `griddata` -- below we try out all of the
interpolation methods:
>>> from scipy.interpolate import griddata
>>> grid_z0 = griddata(points, values, (grid_x, grid_y), method='nearest')
>>> grid_z1 = griddata(points, values, (grid_x, grid_y), method='linear')
>>> grid_z2 = griddata(points, values, (grid_x, grid_y), method='cubic')
One can see that the exact result is reproduced by all of the
methods to some degree, but for this smooth function the piecewise
cubic interpolant gives the best results:
>>> import matplotlib.pyplot as plt
>>> plt.subplot(221)
>>> plt.imshow(func(grid_x, grid_y).T, extent=(0,1,0,1), origin='lower')
>>> plt.plot(points[:,0], points[:,1], 'k.', ms=1)
>>> plt.title('Original')
>>> plt.subplot(222)
>>> plt.imshow(grid_z0.T, extent=(0,1,0,1), origin='lower')
>>> plt.title('Nearest')
>>> plt.subplot(223)
>>> plt.imshow(grid_z1.T, extent=(0,1,0,1), origin='lower')
>>> plt.title('Linear')
>>> plt.subplot(224)
>>> plt.imshow(grid_z2.T, extent=(0,1,0,1), origin='lower')
>>> plt.title('Cubic')
>>> plt.gcf().set_size_inches(6, 6)
>>> plt.show()
"""
points = _ndim_coords_from_arrays(points)
if points.ndim < 2:
ndim = points.ndim
else:
ndim = points.shape[-1]
if ndim == 1 and method in ('nearest', 'linear', 'cubic'):
from .interpolate import interp1d
points = points.ravel()
if isinstance(xi, tuple):
if len(xi) != 1:
raise ValueError("invalid number of dimensions in xi")
xi, = xi
# Sort points/values together, necessary as input for interp1d
idx = np.argsort(points)
points = points[idx]
values = values[idx]
if method == 'nearest':
fill_value = 'extrapolate'
ip = interp1d(points, values, kind=method, axis=0, bounds_error=False,
fill_value=fill_value)
return ip(xi)
elif method == 'nearest':
ip = NearestNDInterpolator(points, values, rescale=rescale)
return ip(xi)
elif method == 'linear':
ip = LinearNDInterpolator(points, values, fill_value=fill_value,
rescale=rescale)
return ip(xi)
elif method == 'cubic' and ndim == 2:
ip = CloughTocher2DInterpolator(points, values, fill_value=fill_value,
rescale=rescale)
return ip(xi)
else:
raise ValueError("Unknown interpolation method %r for "
"%d dimensional data" % (method, ndim))
| bsd-3-clause |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.