repo_name stringlengths 6 67 | path stringlengths 5 185 | copies stringlengths 1 3 | size stringlengths 4 6 | content stringlengths 1.02k 962k | license stringclasses 15 values |
|---|---|---|---|---|---|
nrhine1/scikit-learn | doc/tutorial/text_analytics/skeletons/exercise_02_sentiment.py | 256 | 2406 | """Build a sentiment analysis / polarity model
Sentiment analysis can be casted as a binary text classification problem,
that is fitting a linear classifier on features extracted from the text
of the user messages so as to guess wether the opinion of the author is
positive or negative.
In this examples we will use a movie review dataset.
"""
# Author: Olivier Grisel <olivier.grisel@ensta.org>
# License: Simplified BSD
import sys
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.svm import LinearSVC
from sklearn.pipeline import Pipeline
from sklearn.grid_search import GridSearchCV
from sklearn.datasets import load_files
from sklearn.cross_validation import train_test_split
from sklearn import metrics
if __name__ == "__main__":
# NOTE: we put the following in a 'if __name__ == "__main__"' protected
# block to be able to use a multi-core grid search that also works under
# Windows, see: http://docs.python.org/library/multiprocessing.html#windows
# The multiprocessing module is used as the backend of joblib.Parallel
# that is used when n_jobs != 1 in GridSearchCV
# the training data folder must be passed as first argument
movie_reviews_data_folder = sys.argv[1]
dataset = load_files(movie_reviews_data_folder, shuffle=False)
print("n_samples: %d" % len(dataset.data))
# split the dataset in training and test set:
docs_train, docs_test, y_train, y_test = train_test_split(
dataset.data, dataset.target, test_size=0.25, random_state=None)
# TASK: Build a vectorizer / classifier pipeline that filters out tokens
# that are too rare or too frequent
# TASK: Build a grid search to find out whether unigrams or bigrams are
# more useful.
# Fit the pipeline on the training set using grid search for the parameters
# TASK: print the cross-validated scores for the each parameters set
# explored by the grid search
# TASK: Predict the outcome on the testing set and store it in a variable
# named y_predicted
# Print the classification report
print(metrics.classification_report(y_test, y_predicted,
target_names=dataset.target_names))
# Print and plot the confusion matrix
cm = metrics.confusion_matrix(y_test, y_predicted)
print(cm)
# import matplotlib.pyplot as plt
# plt.matshow(cm)
# plt.show()
| bsd-3-clause |
AnasGhrab/scikit-learn | examples/applications/plot_out_of_core_classification.py | 255 | 13919 | """
======================================================
Out-of-core classification of text documents
======================================================
This is an example showing how scikit-learn can be used for classification
using an out-of-core approach: learning from data that doesn't fit into main
memory. We make use of an online classifier, i.e., one that supports the
partial_fit method, that will be fed with batches of examples. To guarantee
that the features space remains the same over time we leverage a
HashingVectorizer that will project each example into the same feature space.
This is especially useful in the case of text classification where new
features (words) may appear in each batch.
The dataset used in this example is Reuters-21578 as provided by the UCI ML
repository. It will be automatically downloaded and uncompressed on first run.
The plot represents the learning curve of the classifier: the evolution
of classification accuracy over the course of the mini-batches. Accuracy is
measured on the first 1000 samples, held out as a validation set.
To limit the memory consumption, we queue examples up to a fixed amount before
feeding them to the learner.
"""
# Authors: Eustache Diemert <eustache@diemert.fr>
# @FedericoV <https://github.com/FedericoV/>
# License: BSD 3 clause
from __future__ import print_function
from glob import glob
import itertools
import os.path
import re
import tarfile
import time
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import rcParams
from sklearn.externals.six.moves import html_parser
from sklearn.externals.six.moves import urllib
from sklearn.datasets import get_data_home
from sklearn.feature_extraction.text import HashingVectorizer
from sklearn.linear_model import SGDClassifier
from sklearn.linear_model import PassiveAggressiveClassifier
from sklearn.linear_model import Perceptron
from sklearn.naive_bayes import MultinomialNB
def _not_in_sphinx():
# Hack to detect whether we are running by the sphinx builder
return '__file__' in globals()
###############################################################################
# Reuters Dataset related routines
###############################################################################
class ReutersParser(html_parser.HTMLParser):
"""Utility class to parse a SGML file and yield documents one at a time."""
def __init__(self, encoding='latin-1'):
html_parser.HTMLParser.__init__(self)
self._reset()
self.encoding = encoding
def handle_starttag(self, tag, attrs):
method = 'start_' + tag
getattr(self, method, lambda x: None)(attrs)
def handle_endtag(self, tag):
method = 'end_' + tag
getattr(self, method, lambda: None)()
def _reset(self):
self.in_title = 0
self.in_body = 0
self.in_topics = 0
self.in_topic_d = 0
self.title = ""
self.body = ""
self.topics = []
self.topic_d = ""
def parse(self, fd):
self.docs = []
for chunk in fd:
self.feed(chunk.decode(self.encoding))
for doc in self.docs:
yield doc
self.docs = []
self.close()
def handle_data(self, data):
if self.in_body:
self.body += data
elif self.in_title:
self.title += data
elif self.in_topic_d:
self.topic_d += data
def start_reuters(self, attributes):
pass
def end_reuters(self):
self.body = re.sub(r'\s+', r' ', self.body)
self.docs.append({'title': self.title,
'body': self.body,
'topics': self.topics})
self._reset()
def start_title(self, attributes):
self.in_title = 1
def end_title(self):
self.in_title = 0
def start_body(self, attributes):
self.in_body = 1
def end_body(self):
self.in_body = 0
def start_topics(self, attributes):
self.in_topics = 1
def end_topics(self):
self.in_topics = 0
def start_d(self, attributes):
self.in_topic_d = 1
def end_d(self):
self.in_topic_d = 0
self.topics.append(self.topic_d)
self.topic_d = ""
def stream_reuters_documents(data_path=None):
"""Iterate over documents of the Reuters dataset.
The Reuters archive will automatically be downloaded and uncompressed if
the `data_path` directory does not exist.
Documents are represented as dictionaries with 'body' (str),
'title' (str), 'topics' (list(str)) keys.
"""
DOWNLOAD_URL = ('http://archive.ics.uci.edu/ml/machine-learning-databases/'
'reuters21578-mld/reuters21578.tar.gz')
ARCHIVE_FILENAME = 'reuters21578.tar.gz'
if data_path is None:
data_path = os.path.join(get_data_home(), "reuters")
if not os.path.exists(data_path):
"""Download the dataset."""
print("downloading dataset (once and for all) into %s" %
data_path)
os.mkdir(data_path)
def progress(blocknum, bs, size):
total_sz_mb = '%.2f MB' % (size / 1e6)
current_sz_mb = '%.2f MB' % ((blocknum * bs) / 1e6)
if _not_in_sphinx():
print('\rdownloaded %s / %s' % (current_sz_mb, total_sz_mb),
end='')
archive_path = os.path.join(data_path, ARCHIVE_FILENAME)
urllib.request.urlretrieve(DOWNLOAD_URL, filename=archive_path,
reporthook=progress)
if _not_in_sphinx():
print('\r', end='')
print("untarring Reuters dataset...")
tarfile.open(archive_path, 'r:gz').extractall(data_path)
print("done.")
parser = ReutersParser()
for filename in glob(os.path.join(data_path, "*.sgm")):
for doc in parser.parse(open(filename, 'rb')):
yield doc
###############################################################################
# Main
###############################################################################
# Create the vectorizer and limit the number of features to a reasonable
# maximum
vectorizer = HashingVectorizer(decode_error='ignore', n_features=2 ** 18,
non_negative=True)
# Iterator over parsed Reuters SGML files.
data_stream = stream_reuters_documents()
# We learn a binary classification between the "acq" class and all the others.
# "acq" was chosen as it is more or less evenly distributed in the Reuters
# files. For other datasets, one should take care of creating a test set with
# a realistic portion of positive instances.
all_classes = np.array([0, 1])
positive_class = 'acq'
# Here are some classifiers that support the `partial_fit` method
partial_fit_classifiers = {
'SGD': SGDClassifier(),
'Perceptron': Perceptron(),
'NB Multinomial': MultinomialNB(alpha=0.01),
'Passive-Aggressive': PassiveAggressiveClassifier(),
}
def get_minibatch(doc_iter, size, pos_class=positive_class):
"""Extract a minibatch of examples, return a tuple X_text, y.
Note: size is before excluding invalid docs with no topics assigned.
"""
data = [(u'{title}\n\n{body}'.format(**doc), pos_class in doc['topics'])
for doc in itertools.islice(doc_iter, size)
if doc['topics']]
if not len(data):
return np.asarray([], dtype=int), np.asarray([], dtype=int)
X_text, y = zip(*data)
return X_text, np.asarray(y, dtype=int)
def iter_minibatches(doc_iter, minibatch_size):
"""Generator of minibatches."""
X_text, y = get_minibatch(doc_iter, minibatch_size)
while len(X_text):
yield X_text, y
X_text, y = get_minibatch(doc_iter, minibatch_size)
# test data statistics
test_stats = {'n_test': 0, 'n_test_pos': 0}
# First we hold out a number of examples to estimate accuracy
n_test_documents = 1000
tick = time.time()
X_test_text, y_test = get_minibatch(data_stream, 1000)
parsing_time = time.time() - tick
tick = time.time()
X_test = vectorizer.transform(X_test_text)
vectorizing_time = time.time() - tick
test_stats['n_test'] += len(y_test)
test_stats['n_test_pos'] += sum(y_test)
print("Test set is %d documents (%d positive)" % (len(y_test), sum(y_test)))
def progress(cls_name, stats):
"""Report progress information, return a string."""
duration = time.time() - stats['t0']
s = "%20s classifier : \t" % cls_name
s += "%(n_train)6d train docs (%(n_train_pos)6d positive) " % stats
s += "%(n_test)6d test docs (%(n_test_pos)6d positive) " % test_stats
s += "accuracy: %(accuracy).3f " % stats
s += "in %.2fs (%5d docs/s)" % (duration, stats['n_train'] / duration)
return s
cls_stats = {}
for cls_name in partial_fit_classifiers:
stats = {'n_train': 0, 'n_train_pos': 0,
'accuracy': 0.0, 'accuracy_history': [(0, 0)], 't0': time.time(),
'runtime_history': [(0, 0)], 'total_fit_time': 0.0}
cls_stats[cls_name] = stats
get_minibatch(data_stream, n_test_documents)
# Discard test set
# We will feed the classifier with mini-batches of 1000 documents; this means
# we have at most 1000 docs in memory at any time. The smaller the document
# batch, the bigger the relative overhead of the partial fit methods.
minibatch_size = 1000
# Create the data_stream that parses Reuters SGML files and iterates on
# documents as a stream.
minibatch_iterators = iter_minibatches(data_stream, minibatch_size)
total_vect_time = 0.0
# Main loop : iterate on mini-batchs of examples
for i, (X_train_text, y_train) in enumerate(minibatch_iterators):
tick = time.time()
X_train = vectorizer.transform(X_train_text)
total_vect_time += time.time() - tick
for cls_name, cls in partial_fit_classifiers.items():
tick = time.time()
# update estimator with examples in the current mini-batch
cls.partial_fit(X_train, y_train, classes=all_classes)
# accumulate test accuracy stats
cls_stats[cls_name]['total_fit_time'] += time.time() - tick
cls_stats[cls_name]['n_train'] += X_train.shape[0]
cls_stats[cls_name]['n_train_pos'] += sum(y_train)
tick = time.time()
cls_stats[cls_name]['accuracy'] = cls.score(X_test, y_test)
cls_stats[cls_name]['prediction_time'] = time.time() - tick
acc_history = (cls_stats[cls_name]['accuracy'],
cls_stats[cls_name]['n_train'])
cls_stats[cls_name]['accuracy_history'].append(acc_history)
run_history = (cls_stats[cls_name]['accuracy'],
total_vect_time + cls_stats[cls_name]['total_fit_time'])
cls_stats[cls_name]['runtime_history'].append(run_history)
if i % 3 == 0:
print(progress(cls_name, cls_stats[cls_name]))
if i % 3 == 0:
print('\n')
###############################################################################
# Plot results
###############################################################################
def plot_accuracy(x, y, x_legend):
"""Plot accuracy as a function of x."""
x = np.array(x)
y = np.array(y)
plt.title('Classification accuracy as a function of %s' % x_legend)
plt.xlabel('%s' % x_legend)
plt.ylabel('Accuracy')
plt.grid(True)
plt.plot(x, y)
rcParams['legend.fontsize'] = 10
cls_names = list(sorted(cls_stats.keys()))
# Plot accuracy evolution
plt.figure()
for _, stats in sorted(cls_stats.items()):
# Plot accuracy evolution with #examples
accuracy, n_examples = zip(*stats['accuracy_history'])
plot_accuracy(n_examples, accuracy, "training examples (#)")
ax = plt.gca()
ax.set_ylim((0.8, 1))
plt.legend(cls_names, loc='best')
plt.figure()
for _, stats in sorted(cls_stats.items()):
# Plot accuracy evolution with runtime
accuracy, runtime = zip(*stats['runtime_history'])
plot_accuracy(runtime, accuracy, 'runtime (s)')
ax = plt.gca()
ax.set_ylim((0.8, 1))
plt.legend(cls_names, loc='best')
# Plot fitting times
plt.figure()
fig = plt.gcf()
cls_runtime = []
for cls_name, stats in sorted(cls_stats.items()):
cls_runtime.append(stats['total_fit_time'])
cls_runtime.append(total_vect_time)
cls_names.append('Vectorization')
bar_colors = rcParams['axes.color_cycle'][:len(cls_names)]
ax = plt.subplot(111)
rectangles = plt.bar(range(len(cls_names)), cls_runtime, width=0.5,
color=bar_colors)
ax.set_xticks(np.linspace(0.25, len(cls_names) - 0.75, len(cls_names)))
ax.set_xticklabels(cls_names, fontsize=10)
ymax = max(cls_runtime) * 1.2
ax.set_ylim((0, ymax))
ax.set_ylabel('runtime (s)')
ax.set_title('Training Times')
def autolabel(rectangles):
"""attach some text vi autolabel on rectangles."""
for rect in rectangles:
height = rect.get_height()
ax.text(rect.get_x() + rect.get_width() / 2.,
1.05 * height, '%.4f' % height,
ha='center', va='bottom')
autolabel(rectangles)
plt.show()
# Plot prediction times
plt.figure()
#fig = plt.gcf()
cls_runtime = []
cls_names = list(sorted(cls_stats.keys()))
for cls_name, stats in sorted(cls_stats.items()):
cls_runtime.append(stats['prediction_time'])
cls_runtime.append(parsing_time)
cls_names.append('Read/Parse\n+Feat.Extr.')
cls_runtime.append(vectorizing_time)
cls_names.append('Hashing\n+Vect.')
bar_colors = rcParams['axes.color_cycle'][:len(cls_names)]
ax = plt.subplot(111)
rectangles = plt.bar(range(len(cls_names)), cls_runtime, width=0.5,
color=bar_colors)
ax.set_xticks(np.linspace(0.25, len(cls_names) - 0.75, len(cls_names)))
ax.set_xticklabels(cls_names, fontsize=8)
plt.setp(plt.xticks()[1], rotation=30)
ymax = max(cls_runtime) * 1.2
ax.set_ylim((0, ymax))
ax.set_ylabel('runtime (s)')
ax.set_title('Prediction Times (%d instances)' % n_test_documents)
autolabel(rectangles)
plt.show()
| bsd-3-clause |
spallavolu/scikit-learn | benchmarks/bench_plot_incremental_pca.py | 374 | 6430 | """
========================
IncrementalPCA benchmark
========================
Benchmarks for IncrementalPCA
"""
import numpy as np
import gc
from time import time
from collections import defaultdict
import matplotlib.pyplot as plt
from sklearn.datasets import fetch_lfw_people
from sklearn.decomposition import IncrementalPCA, RandomizedPCA, PCA
def plot_results(X, y, label):
plt.plot(X, y, label=label, marker='o')
def benchmark(estimator, data):
gc.collect()
print("Benching %s" % estimator)
t0 = time()
estimator.fit(data)
training_time = time() - t0
data_t = estimator.transform(data)
data_r = estimator.inverse_transform(data_t)
reconstruction_error = np.mean(np.abs(data - data_r))
return {'time': training_time, 'error': reconstruction_error}
def plot_feature_times(all_times, batch_size, all_components, data):
plt.figure()
plot_results(all_components, all_times['pca'], label="PCA")
plot_results(all_components, all_times['ipca'],
label="IncrementalPCA, bsize=%i" % batch_size)
plot_results(all_components, all_times['rpca'], label="RandomizedPCA")
plt.legend(loc="upper left")
plt.suptitle("Algorithm runtime vs. n_components\n \
LFW, size %i x %i" % data.shape)
plt.xlabel("Number of components (out of max %i)" % data.shape[1])
plt.ylabel("Time (seconds)")
def plot_feature_errors(all_errors, batch_size, all_components, data):
plt.figure()
plot_results(all_components, all_errors['pca'], label="PCA")
plot_results(all_components, all_errors['ipca'],
label="IncrementalPCA, bsize=%i" % batch_size)
plot_results(all_components, all_errors['rpca'], label="RandomizedPCA")
plt.legend(loc="lower left")
plt.suptitle("Algorithm error vs. n_components\n"
"LFW, size %i x %i" % data.shape)
plt.xlabel("Number of components (out of max %i)" % data.shape[1])
plt.ylabel("Mean absolute error")
def plot_batch_times(all_times, n_features, all_batch_sizes, data):
plt.figure()
plot_results(all_batch_sizes, all_times['pca'], label="PCA")
plot_results(all_batch_sizes, all_times['rpca'], label="RandomizedPCA")
plot_results(all_batch_sizes, all_times['ipca'], label="IncrementalPCA")
plt.legend(loc="lower left")
plt.suptitle("Algorithm runtime vs. batch_size for n_components %i\n \
LFW, size %i x %i" % (
n_features, data.shape[0], data.shape[1]))
plt.xlabel("Batch size")
plt.ylabel("Time (seconds)")
def plot_batch_errors(all_errors, n_features, all_batch_sizes, data):
plt.figure()
plot_results(all_batch_sizes, all_errors['pca'], label="PCA")
plot_results(all_batch_sizes, all_errors['ipca'], label="IncrementalPCA")
plt.legend(loc="lower left")
plt.suptitle("Algorithm error vs. batch_size for n_components %i\n \
LFW, size %i x %i" % (
n_features, data.shape[0], data.shape[1]))
plt.xlabel("Batch size")
plt.ylabel("Mean absolute error")
def fixed_batch_size_comparison(data):
all_features = [i.astype(int) for i in np.linspace(data.shape[1] // 10,
data.shape[1], num=5)]
batch_size = 1000
# Compare runtimes and error for fixed batch size
all_times = defaultdict(list)
all_errors = defaultdict(list)
for n_components in all_features:
pca = PCA(n_components=n_components)
rpca = RandomizedPCA(n_components=n_components, random_state=1999)
ipca = IncrementalPCA(n_components=n_components, batch_size=batch_size)
results_dict = {k: benchmark(est, data) for k, est in [('pca', pca),
('ipca', ipca),
('rpca', rpca)]}
for k in sorted(results_dict.keys()):
all_times[k].append(results_dict[k]['time'])
all_errors[k].append(results_dict[k]['error'])
plot_feature_times(all_times, batch_size, all_features, data)
plot_feature_errors(all_errors, batch_size, all_features, data)
def variable_batch_size_comparison(data):
batch_sizes = [i.astype(int) for i in np.linspace(data.shape[0] // 10,
data.shape[0], num=10)]
for n_components in [i.astype(int) for i in
np.linspace(data.shape[1] // 10,
data.shape[1], num=4)]:
all_times = defaultdict(list)
all_errors = defaultdict(list)
pca = PCA(n_components=n_components)
rpca = RandomizedPCA(n_components=n_components, random_state=1999)
results_dict = {k: benchmark(est, data) for k, est in [('pca', pca),
('rpca', rpca)]}
# Create flat baselines to compare the variation over batch size
all_times['pca'].extend([results_dict['pca']['time']] *
len(batch_sizes))
all_errors['pca'].extend([results_dict['pca']['error']] *
len(batch_sizes))
all_times['rpca'].extend([results_dict['rpca']['time']] *
len(batch_sizes))
all_errors['rpca'].extend([results_dict['rpca']['error']] *
len(batch_sizes))
for batch_size in batch_sizes:
ipca = IncrementalPCA(n_components=n_components,
batch_size=batch_size)
results_dict = {k: benchmark(est, data) for k, est in [('ipca',
ipca)]}
all_times['ipca'].append(results_dict['ipca']['time'])
all_errors['ipca'].append(results_dict['ipca']['error'])
plot_batch_times(all_times, n_components, batch_sizes, data)
# RandomizedPCA error is always worse (approx 100x) than other PCA
# tests
plot_batch_errors(all_errors, n_components, batch_sizes, data)
faces = fetch_lfw_people(resize=.2, min_faces_per_person=5)
# limit dataset to 5000 people (don't care who they are!)
X = faces.data[:5000]
n_samples, h, w = faces.images.shape
n_features = X.shape[1]
X -= X.mean(axis=0)
X /= X.std(axis=0)
fixed_batch_size_comparison(X)
variable_batch_size_comparison(X)
plt.show()
| bsd-3-clause |
heli522/scikit-learn | examples/svm/plot_rbf_parameters.py | 132 | 8096 | '''
==================
RBF SVM parameters
==================
This example illustrates the effect of the parameters ``gamma`` and ``C`` of
the Radial Basis Function (RBF) kernel SVM.
Intuitively, the ``gamma`` parameter defines how far the influence of a single
training example reaches, with low values meaning 'far' and high values meaning
'close'. The ``gamma`` parameters can be seen as the inverse of the radius of
influence of samples selected by the model as support vectors.
The ``C`` parameter trades off misclassification of training examples against
simplicity of the decision surface. A low ``C`` makes the decision surface
smooth, while a high ``C`` aims at classifying all training examples correctly
by giving the model freedom to select more samples as support vectors.
The first plot is a visualization of the decision function for a variety of
parameter values on a simplified classification problem involving only 2 input
features and 2 possible target classes (binary classification). Note that this
kind of plot is not possible to do for problems with more features or target
classes.
The second plot is a heatmap of the classifier's cross-validation accuracy as a
function of ``C`` and ``gamma``. For this example we explore a relatively large
grid for illustration purposes. In practice, a logarithmic grid from
:math:`10^{-3}` to :math:`10^3` is usually sufficient. If the best parameters
lie on the boundaries of the grid, it can be extended in that direction in a
subsequent search.
Note that the heat map plot has a special colorbar with a midpoint value close
to the score values of the best performing models so as to make it easy to tell
them appart in the blink of an eye.
The behavior of the model is very sensitive to the ``gamma`` parameter. If
``gamma`` is too large, the radius of the area of influence of the support
vectors only includes the support vector itself and no amount of
regularization with ``C`` will be able to prevent overfitting.
When ``gamma`` is very small, the model is too constrained and cannot capture
the complexity or "shape" of the data. The region of influence of any selected
support vector would include the whole training set. The resulting model will
behave similarly to a linear model with a set of hyperplanes that separate the
centers of high density of any pair of two classes.
For intermediate values, we can see on the second plot that good models can
be found on a diagonal of ``C`` and ``gamma``. Smooth models (lower ``gamma``
values) can be made more complex by selecting a larger number of support
vectors (larger ``C`` values) hence the diagonal of good performing models.
Finally one can also observe that for some intermediate values of ``gamma`` we
get equally performing models when ``C`` becomes very large: it is not
necessary to regularize by limiting the number of support vectors. The radius of
the RBF kernel alone acts as a good structural regularizer. In practice though
it might still be interesting to limit the number of support vectors with a
lower value of ``C`` so as to favor models that use less memory and that are
faster to predict.
We should also note that small differences in scores results from the random
splits of the cross-validation procedure. Those spurious variations can be
smoothed out by increasing the number of CV iterations ``n_iter`` at the
expense of compute time. Increasing the value number of ``C_range`` and
``gamma_range`` steps will increase the resolution of the hyper-parameter heat
map.
'''
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.colors import Normalize
from sklearn.svm import SVC
from sklearn.preprocessing import StandardScaler
from sklearn.datasets import load_iris
from sklearn.cross_validation import StratifiedShuffleSplit
from sklearn.grid_search import GridSearchCV
# Utility function to move the midpoint of a colormap to be around
# the values of interest.
class MidpointNormalize(Normalize):
def __init__(self, vmin=None, vmax=None, midpoint=None, clip=False):
self.midpoint = midpoint
Normalize.__init__(self, vmin, vmax, clip)
def __call__(self, value, clip=None):
x, y = [self.vmin, self.midpoint, self.vmax], [0, 0.5, 1]
return np.ma.masked_array(np.interp(value, x, y))
##############################################################################
# Load and prepare data set
#
# dataset for grid search
iris = load_iris()
X = iris.data
y = iris.target
# Dataset for decision function visualization: we only keep the first two
# features in X and sub-sample the dataset to keep only 2 classes and
# make it a binary classification problem.
X_2d = X[:, :2]
X_2d = X_2d[y > 0]
y_2d = y[y > 0]
y_2d -= 1
# It is usually a good idea to scale the data for SVM training.
# We are cheating a bit in this example in scaling all of the data,
# instead of fitting the transformation on the training set and
# just applying it on the test set.
scaler = StandardScaler()
X = scaler.fit_transform(X)
X_2d = scaler.fit_transform(X_2d)
##############################################################################
# Train classifiers
#
# For an initial search, a logarithmic grid with basis
# 10 is often helpful. Using a basis of 2, a finer
# tuning can be achieved but at a much higher cost.
C_range = np.logspace(-2, 10, 13)
gamma_range = np.logspace(-9, 3, 13)
param_grid = dict(gamma=gamma_range, C=C_range)
cv = StratifiedShuffleSplit(y, n_iter=5, test_size=0.2, random_state=42)
grid = GridSearchCV(SVC(), param_grid=param_grid, cv=cv)
grid.fit(X, y)
print("The best parameters are %s with a score of %0.2f"
% (grid.best_params_, grid.best_score_))
# Now we need to fit a classifier for all parameters in the 2d version
# (we use a smaller set of parameters here because it takes a while to train)
C_2d_range = [1e-2, 1, 1e2]
gamma_2d_range = [1e-1, 1, 1e1]
classifiers = []
for C in C_2d_range:
for gamma in gamma_2d_range:
clf = SVC(C=C, gamma=gamma)
clf.fit(X_2d, y_2d)
classifiers.append((C, gamma, clf))
##############################################################################
# visualization
#
# draw visualization of parameter effects
plt.figure(figsize=(8, 6))
xx, yy = np.meshgrid(np.linspace(-3, 3, 200), np.linspace(-3, 3, 200))
for (k, (C, gamma, clf)) in enumerate(classifiers):
# evaluate decision function in a grid
Z = clf.decision_function(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
# visualize decision function for these parameters
plt.subplot(len(C_2d_range), len(gamma_2d_range), k + 1)
plt.title("gamma=10^%d, C=10^%d" % (np.log10(gamma), np.log10(C)),
size='medium')
# visualize parameter's effect on decision function
plt.pcolormesh(xx, yy, -Z, cmap=plt.cm.RdBu)
plt.scatter(X_2d[:, 0], X_2d[:, 1], c=y_2d, cmap=plt.cm.RdBu_r)
plt.xticks(())
plt.yticks(())
plt.axis('tight')
# plot the scores of the grid
# grid_scores_ contains parameter settings and scores
# We extract just the scores
scores = [x[1] for x in grid.grid_scores_]
scores = np.array(scores).reshape(len(C_range), len(gamma_range))
# Draw heatmap of the validation accuracy as a function of gamma and C
#
# The score are encoded as colors with the hot colormap which varies from dark
# red to bright yellow. As the most interesting scores are all located in the
# 0.92 to 0.97 range we use a custom normalizer to set the mid-point to 0.92 so
# as to make it easier to visualize the small variations of score values in the
# interesting range while not brutally collapsing all the low score values to
# the same color.
plt.figure(figsize=(8, 6))
plt.subplots_adjust(left=.2, right=0.95, bottom=0.15, top=0.95)
plt.imshow(scores, interpolation='nearest', cmap=plt.cm.hot,
norm=MidpointNormalize(vmin=0.2, midpoint=0.92))
plt.xlabel('gamma')
plt.ylabel('C')
plt.colorbar()
plt.xticks(np.arange(len(gamma_range)), gamma_range, rotation=45)
plt.yticks(np.arange(len(C_range)), C_range)
plt.title('Validation accuracy')
plt.show()
| bsd-3-clause |
plotly/python-api | packages/python/plotly/_plotly_utils/tests/validators/test_integer_validator.py | 2 | 4681 | # Array not ok
# ------------
import pytest
from pytest import approx
from _plotly_utils.basevalidators import IntegerValidator
import numpy as np
import pandas as pd
# ### Fixtures ###
@pytest.fixture()
def validator():
return IntegerValidator("prop", "parent")
@pytest.fixture
def validator_min_max():
return IntegerValidator("prop", "parent", min=-1, max=2)
@pytest.fixture
def validator_min():
return IntegerValidator("prop", "parent", min=-1)
@pytest.fixture
def validator_max():
return IntegerValidator("prop", "parent", max=2)
@pytest.fixture
def validator_aok(request):
return IntegerValidator("prop", "parent", min=-2, max=10, array_ok=True)
# ### Acceptance ###
@pytest.mark.parametrize("val", [1, -19, 0, -1234])
def test_acceptance(val, validator):
assert validator.validate_coerce(val) == val
# ### Rejection by value ###
@pytest.mark.parametrize(
"val", ["hello", (), [], [1, 2, 3], set(), "34", np.nan, np.inf, -np.inf]
)
def test_rejection_by_value(val, validator):
with pytest.raises(ValueError) as validation_failure:
validator.validate_coerce(val)
assert "Invalid value" in str(validation_failure.value)
# ### With min/max ###
# min == -1 and max == 2
@pytest.mark.parametrize("val", [0, 1, -1, 2])
def test_acceptance_min_max(val, validator_min_max):
assert validator_min_max.validate_coerce(val) == approx(val)
@pytest.mark.parametrize(
"val", [-1.01, -10, 2.1, 3, np.iinfo(np.int).max, np.iinfo(np.int).min]
)
def test_rejection_min_max(val, validator_min_max):
with pytest.raises(ValueError) as validation_failure:
validator_min_max.validate_coerce(val)
assert "in the interval [-1, 2]" in str(validation_failure.value)
# ### With min only ###
# min == -1
@pytest.mark.parametrize("val", [-1, 0, 1, 23, 99999])
def test_acceptance_min(val, validator_min):
assert validator_min.validate_coerce(val) == approx(val)
@pytest.mark.parametrize("val", [-2, -123, np.iinfo(np.int).min])
def test_rejection_min(val, validator_min):
with pytest.raises(ValueError) as validation_failure:
validator_min.validate_coerce(val)
assert "in the interval [-1, 9223372036854775807]" in str(validation_failure.value)
# ### With max only ###
# max == 2
@pytest.mark.parametrize("val", [1, 2, -10, -999999, np.iinfo(np.int32).min])
def test_acceptance_max(val, validator_max):
assert validator_max.validate_coerce(val) == approx(val)
@pytest.mark.parametrize("val", [3, 10, np.iinfo(np.int32).max])
def test_rejection_max(val, validator_max):
with pytest.raises(ValueError) as validation_failure:
validator_max.validate_coerce(val)
assert "in the interval [-9223372036854775808, 2]" in str(validation_failure.value)
# Array ok
# --------
# min=-2 and max=10
# ### Acceptance ###
@pytest.mark.parametrize("val", [-2, 1, 0, 1, 10])
def test_acceptance_aok_scalars(val, validator_aok):
assert validator_aok.validate_coerce(val) == val
@pytest.mark.parametrize("val", [[1, 0], [1], [-2, 1, 8], np.array([3, 2, -1, 5])])
def test_acceptance_aok_list(val, validator_aok):
assert np.array_equal(validator_aok.validate_coerce(val), val)
# ### Coerce ###
# Coerced to general consistent numeric type
@pytest.mark.parametrize(
"val,expected",
[
([1, 0], (1, 0)),
((1, -1), (1, -1)),
(np.array([-1, 0, 5.0], dtype="int16"), [-1, 0, 5]),
(np.array([1, 0], dtype=np.int64), [1, 0]),
(pd.Series([1, 0], dtype=np.int64), [1, 0]),
(pd.Index([1, 0], dtype=np.int64), [1, 0]),
],
)
def test_coercion_aok_list(val, expected, validator_aok):
v = validator_aok.validate_coerce(val)
if isinstance(val, (np.ndarray, pd.Series, pd.Index)):
assert v.dtype == val.dtype
assert np.array_equal(
validator_aok.present(v), np.array(expected, dtype=np.int32)
)
else:
assert isinstance(v, list)
assert validator_aok.present(v) == expected
# ### Rejection ###
#
@pytest.mark.parametrize("val", [["a", 4], [[], 3, 4]])
def test_integer_validator_rejection_aok(val, validator_aok):
with pytest.raises(ValueError) as validation_failure:
validator_aok.validate_coerce(val)
assert "Invalid element(s)" in str(validation_failure.value)
# ### Rejection by element ###
@pytest.mark.parametrize(
"val",
[[-1, 11], [1.5, -3], [0, np.iinfo(np.int32).max], [0, np.iinfo(np.int32).min]],
)
def test_rejection_aok_min_max(val, validator_aok):
with pytest.raises(ValueError) as validation_failure:
validator_aok.validate_coerce(val)
assert "in the interval [-2, 10]" in str(validation_failure.value)
| mit |
yask123/scikit-learn | sklearn/pipeline.py | 61 | 21271 | """
The :mod:`sklearn.pipeline` module implements utilities to build a composite
estimator, as a chain of transforms and estimators.
"""
# Author: Edouard Duchesnay
# Gael Varoquaux
# Virgile Fritsch
# Alexandre Gramfort
# Lars Buitinck
# Licence: BSD
from collections import defaultdict
from warnings import warn
import numpy as np
from scipy import sparse
from .base import BaseEstimator, TransformerMixin
from .externals.joblib import Parallel, delayed
from .externals import six
from .utils import tosequence
from .utils.metaestimators import if_delegate_has_method
from .externals.six import iteritems
__all__ = ['Pipeline', 'FeatureUnion']
class Pipeline(BaseEstimator):
"""Pipeline of transforms with a final estimator.
Sequentially apply a list of transforms and a final estimator.
Intermediate steps of the pipeline must be 'transforms', that is, they
must implement fit and transform methods.
The final estimator only needs to implement fit.
The purpose of the pipeline is to assemble several steps that can be
cross-validated together while setting different parameters.
For this, it enables setting parameters of the various steps using their
names and the parameter name separated by a '__', as in the example below.
Read more in the :ref:`User Guide <pipeline>`.
Parameters
----------
steps : list
List of (name, transform) tuples (implementing fit/transform) that are
chained, in the order in which they are chained, with the last object
an estimator.
Attributes
----------
named_steps : dict
Read-only attribute to access any step parameter by user given name.
Keys are step names and values are steps parameters.
Examples
--------
>>> from sklearn import svm
>>> from sklearn.datasets import samples_generator
>>> from sklearn.feature_selection import SelectKBest
>>> from sklearn.feature_selection import f_regression
>>> from sklearn.pipeline import Pipeline
>>> # generate some data to play with
>>> X, y = samples_generator.make_classification(
... n_informative=5, n_redundant=0, random_state=42)
>>> # ANOVA SVM-C
>>> anova_filter = SelectKBest(f_regression, k=5)
>>> clf = svm.SVC(kernel='linear')
>>> anova_svm = Pipeline([('anova', anova_filter), ('svc', clf)])
>>> # You can set the parameters using the names issued
>>> # For instance, fit using a k of 10 in the SelectKBest
>>> # and a parameter 'C' of the svm
>>> anova_svm.set_params(anova__k=10, svc__C=.1).fit(X, y)
... # doctest: +ELLIPSIS
Pipeline(steps=[...])
>>> prediction = anova_svm.predict(X)
>>> anova_svm.score(X, y) # doctest: +ELLIPSIS
0.77...
>>> # getting the selected features chosen by anova_filter
>>> anova_svm.named_steps['anova'].get_support()
... # doctest: +NORMALIZE_WHITESPACE
array([ True, True, True, False, False, True, False, True, True, True,
False, False, True, False, True, False, False, False, False,
True], dtype=bool)
"""
# BaseEstimator interface
def __init__(self, steps):
names, estimators = zip(*steps)
if len(dict(steps)) != len(steps):
raise ValueError("Provided step names are not unique: %s" % (names,))
# shallow copy of steps
self.steps = tosequence(steps)
transforms = estimators[:-1]
estimator = estimators[-1]
for t in transforms:
if (not (hasattr(t, "fit") or hasattr(t, "fit_transform")) or not
hasattr(t, "transform")):
raise TypeError("All intermediate steps of the chain should "
"be transforms and implement fit and transform"
" '%s' (type %s) doesn't)" % (t, type(t)))
if not hasattr(estimator, "fit"):
raise TypeError("Last step of chain should implement fit "
"'%s' (type %s) doesn't)"
% (estimator, type(estimator)))
@property
def _estimator_type(self):
return self.steps[-1][1]._estimator_type
def get_params(self, deep=True):
if not deep:
return super(Pipeline, self).get_params(deep=False)
else:
out = self.named_steps
for name, step in six.iteritems(self.named_steps):
for key, value in six.iteritems(step.get_params(deep=True)):
out['%s__%s' % (name, key)] = value
out.update(super(Pipeline, self).get_params(deep=False))
return out
@property
def named_steps(self):
return dict(self.steps)
@property
def _final_estimator(self):
return self.steps[-1][1]
# Estimator interface
def _pre_transform(self, X, y=None, **fit_params):
fit_params_steps = dict((step, {}) for step, _ in self.steps)
for pname, pval in six.iteritems(fit_params):
step, param = pname.split('__', 1)
fit_params_steps[step][param] = pval
Xt = X
for name, transform in self.steps[:-1]:
if hasattr(transform, "fit_transform"):
Xt = transform.fit_transform(Xt, y, **fit_params_steps[name])
else:
Xt = transform.fit(Xt, y, **fit_params_steps[name]) \
.transform(Xt)
return Xt, fit_params_steps[self.steps[-1][0]]
def fit(self, X, y=None, **fit_params):
"""Fit all the transforms one after the other and transform the
data, then fit the transformed data using the final estimator.
Parameters
----------
X : iterable
Training data. Must fulfill input requirements of first step of the
pipeline.
y : iterable, default=None
Training targets. Must fulfill label requirements for all steps of
the pipeline.
"""
Xt, fit_params = self._pre_transform(X, y, **fit_params)
self.steps[-1][-1].fit(Xt, y, **fit_params)
return self
def fit_transform(self, X, y=None, **fit_params):
"""Fit all the transforms one after the other and transform the
data, then use fit_transform on transformed data using the final
estimator.
Parameters
----------
X : iterable
Training data. Must fulfill input requirements of first step of the
pipeline.
y : iterable, default=None
Training targets. Must fulfill label requirements for all steps of
the pipeline.
"""
Xt, fit_params = self._pre_transform(X, y, **fit_params)
if hasattr(self.steps[-1][-1], 'fit_transform'):
return self.steps[-1][-1].fit_transform(Xt, y, **fit_params)
else:
return self.steps[-1][-1].fit(Xt, y, **fit_params).transform(Xt)
@if_delegate_has_method(delegate='_final_estimator')
def predict(self, X):
"""Applies transforms to the data, and the predict method of the
final estimator. Valid only if the final estimator implements
predict.
Parameters
----------
X : iterable
Data to predict on. Must fulfill input requirements of first step of
the pipeline.
"""
Xt = X
for name, transform in self.steps[:-1]:
Xt = transform.transform(Xt)
return self.steps[-1][-1].predict(Xt)
@if_delegate_has_method(delegate='_final_estimator')
def fit_predict(self, X, y=None, **fit_params):
"""Applies fit_predict of last step in pipeline after transforms.
Applies fit_transforms of a pipeline to the data, followed by the
fit_predict method of the final estimator in the pipeline. Valid
only if the final estimator implements fit_predict.
Parameters
----------
X : iterable
Training data. Must fulfill input requirements of first step of
the pipeline.
y : iterable, default=None
Training targets. Must fulfill label requirements for all steps
of the pipeline.
"""
Xt, fit_params = self._pre_transform(X, y, **fit_params)
return self.steps[-1][-1].fit_predict(Xt, y, **fit_params)
@if_delegate_has_method(delegate='_final_estimator')
def predict_proba(self, X):
"""Applies transforms to the data, and the predict_proba method of the
final estimator. Valid only if the final estimator implements
predict_proba.
Parameters
----------
X : iterable
Data to predict on. Must fulfill input requirements of first step of
the pipeline.
"""
Xt = X
for name, transform in self.steps[:-1]:
Xt = transform.transform(Xt)
return self.steps[-1][-1].predict_proba(Xt)
@if_delegate_has_method(delegate='_final_estimator')
def decision_function(self, X):
"""Applies transforms to the data, and the decision_function method of
the final estimator. Valid only if the final estimator implements
decision_function.
Parameters
----------
X : iterable
Data to predict on. Must fulfill input requirements of first step of
the pipeline.
"""
Xt = X
for name, transform in self.steps[:-1]:
Xt = transform.transform(Xt)
return self.steps[-1][-1].decision_function(Xt)
@if_delegate_has_method(delegate='_final_estimator')
def predict_log_proba(self, X):
"""Applies transforms to the data, and the predict_log_proba method of
the final estimator. Valid only if the final estimator implements
predict_log_proba.
Parameters
----------
X : iterable
Data to predict on. Must fulfill input requirements of first step of
the pipeline.
"""
Xt = X
for name, transform in self.steps[:-1]:
Xt = transform.transform(Xt)
return self.steps[-1][-1].predict_log_proba(Xt)
@if_delegate_has_method(delegate='_final_estimator')
def transform(self, X):
"""Applies transforms to the data, and the transform method of the
final estimator. Valid only if the final estimator implements
transform.
Parameters
----------
X : iterable
Data to predict on. Must fulfill input requirements of first step of
the pipeline.
"""
Xt = X
for name, transform in self.steps:
Xt = transform.transform(Xt)
return Xt
@if_delegate_has_method(delegate='_final_estimator')
def inverse_transform(self, X):
"""Applies inverse transform to the data.
Starts with the last step of the pipeline and applies ``inverse_transform`` in
inverse order of the pipeline steps.
Valid only if all steps of the pipeline implement inverse_transform.
Parameters
----------
X : iterable
Data to inverse transform. Must fulfill output requirements of the
last step of the pipeline.
"""
if X.ndim == 1:
warn("From version 0.19, a 1d X will not be reshaped in"
" pipeline.inverse_transform any more.", FutureWarning)
X = X[None, :]
Xt = X
for name, step in self.steps[::-1]:
Xt = step.inverse_transform(Xt)
return Xt
@if_delegate_has_method(delegate='_final_estimator')
def score(self, X, y=None):
"""Applies transforms to the data, and the score method of the
final estimator. Valid only if the final estimator implements
score.
Parameters
----------
X : iterable
Data to score. Must fulfill input requirements of first step of the
pipeline.
y : iterable, default=None
Targets used for scoring. Must fulfill label requirements for all steps of
the pipeline.
"""
Xt = X
for name, transform in self.steps[:-1]:
Xt = transform.transform(Xt)
return self.steps[-1][-1].score(Xt, y)
@property
def classes_(self):
return self.steps[-1][-1].classes_
@property
def _pairwise(self):
# check if first estimator expects pairwise input
return getattr(self.steps[0][1], '_pairwise', False)
def _name_estimators(estimators):
"""Generate names for estimators."""
names = [type(estimator).__name__.lower() for estimator in estimators]
namecount = defaultdict(int)
for est, name in zip(estimators, names):
namecount[name] += 1
for k, v in list(six.iteritems(namecount)):
if v == 1:
del namecount[k]
for i in reversed(range(len(estimators))):
name = names[i]
if name in namecount:
names[i] += "-%d" % namecount[name]
namecount[name] -= 1
return list(zip(names, estimators))
def make_pipeline(*steps):
"""Construct a Pipeline from the given estimators.
This is a shorthand for the Pipeline constructor; it does not require, and
does not permit, naming the estimators. Instead, they will be given names
automatically based on their types.
Examples
--------
>>> from sklearn.naive_bayes import GaussianNB
>>> from sklearn.preprocessing import StandardScaler
>>> make_pipeline(StandardScaler(), GaussianNB()) # doctest: +NORMALIZE_WHITESPACE
Pipeline(steps=[('standardscaler',
StandardScaler(copy=True, with_mean=True, with_std=True)),
('gaussiannb', GaussianNB())])
Returns
-------
p : Pipeline
"""
return Pipeline(_name_estimators(steps))
def _fit_one_transformer(transformer, X, y):
return transformer.fit(X, y)
def _transform_one(transformer, name, X, transformer_weights):
if transformer_weights is not None and name in transformer_weights:
# if we have a weight for this transformer, muliply output
return transformer.transform(X) * transformer_weights[name]
return transformer.transform(X)
def _fit_transform_one(transformer, name, X, y, transformer_weights,
**fit_params):
if transformer_weights is not None and name in transformer_weights:
# if we have a weight for this transformer, muliply output
if hasattr(transformer, 'fit_transform'):
X_transformed = transformer.fit_transform(X, y, **fit_params)
return X_transformed * transformer_weights[name], transformer
else:
X_transformed = transformer.fit(X, y, **fit_params).transform(X)
return X_transformed * transformer_weights[name], transformer
if hasattr(transformer, 'fit_transform'):
X_transformed = transformer.fit_transform(X, y, **fit_params)
return X_transformed, transformer
else:
X_transformed = transformer.fit(X, y, **fit_params).transform(X)
return X_transformed, transformer
class FeatureUnion(BaseEstimator, TransformerMixin):
"""Concatenates results of multiple transformer objects.
This estimator applies a list of transformer objects in parallel to the
input data, then concatenates the results. This is useful to combine
several feature extraction mechanisms into a single transformer.
Read more in the :ref:`User Guide <feature_union>`.
Parameters
----------
transformer_list: list of (string, transformer) tuples
List of transformer objects to be applied to the data. The first
half of each tuple is the name of the transformer.
n_jobs: int, optional
Number of jobs to run in parallel (default 1).
transformer_weights: dict, optional
Multiplicative weights for features per transformer.
Keys are transformer names, values the weights.
"""
def __init__(self, transformer_list, n_jobs=1, transformer_weights=None):
self.transformer_list = transformer_list
self.n_jobs = n_jobs
self.transformer_weights = transformer_weights
def get_feature_names(self):
"""Get feature names from all transformers.
Returns
-------
feature_names : list of strings
Names of the features produced by transform.
"""
feature_names = []
for name, trans in self.transformer_list:
if not hasattr(trans, 'get_feature_names'):
raise AttributeError("Transformer %s does not provide"
" get_feature_names." % str(name))
feature_names.extend([name + "__" + f for f in
trans.get_feature_names()])
return feature_names
def fit(self, X, y=None):
"""Fit all transformers using X.
Parameters
----------
X : array-like or sparse matrix, shape (n_samples, n_features)
Input data, used to fit transformers.
"""
transformers = Parallel(n_jobs=self.n_jobs)(
delayed(_fit_one_transformer)(trans, X, y)
for name, trans in self.transformer_list)
self._update_transformer_list(transformers)
return self
def fit_transform(self, X, y=None, **fit_params):
"""Fit all transformers using X, transform the data and concatenate
results.
Parameters
----------
X : array-like or sparse matrix, shape (n_samples, n_features)
Input data to be transformed.
Returns
-------
X_t : array-like or sparse matrix, shape (n_samples, sum_n_components)
hstack of results of transformers. sum_n_components is the
sum of n_components (output dimension) over transformers.
"""
result = Parallel(n_jobs=self.n_jobs)(
delayed(_fit_transform_one)(trans, name, X, y,
self.transformer_weights, **fit_params)
for name, trans in self.transformer_list)
Xs, transformers = zip(*result)
self._update_transformer_list(transformers)
if any(sparse.issparse(f) for f in Xs):
Xs = sparse.hstack(Xs).tocsr()
else:
Xs = np.hstack(Xs)
return Xs
def transform(self, X):
"""Transform X separately by each transformer, concatenate results.
Parameters
----------
X : array-like or sparse matrix, shape (n_samples, n_features)
Input data to be transformed.
Returns
-------
X_t : array-like or sparse matrix, shape (n_samples, sum_n_components)
hstack of results of transformers. sum_n_components is the
sum of n_components (output dimension) over transformers.
"""
Xs = Parallel(n_jobs=self.n_jobs)(
delayed(_transform_one)(trans, name, X, self.transformer_weights)
for name, trans in self.transformer_list)
if any(sparse.issparse(f) for f in Xs):
Xs = sparse.hstack(Xs).tocsr()
else:
Xs = np.hstack(Xs)
return Xs
def get_params(self, deep=True):
if not deep:
return super(FeatureUnion, self).get_params(deep=False)
else:
out = dict(self.transformer_list)
for name, trans in self.transformer_list:
for key, value in iteritems(trans.get_params(deep=True)):
out['%s__%s' % (name, key)] = value
out.update(super(FeatureUnion, self).get_params(deep=False))
return out
def _update_transformer_list(self, transformers):
self.transformer_list[:] = [
(name, new)
for ((name, old), new) in zip(self.transformer_list, transformers)
]
# XXX it would be nice to have a keyword-only n_jobs argument to this function,
# but that's not allowed in Python 2.x.
def make_union(*transformers):
"""Construct a FeatureUnion from the given transformers.
This is a shorthand for the FeatureUnion constructor; it does not require,
and does not permit, naming the transformers. Instead, they will be given
names automatically based on their types. It also does not allow weighting.
Examples
--------
>>> from sklearn.decomposition import PCA, TruncatedSVD
>>> make_union(PCA(), TruncatedSVD()) # doctest: +NORMALIZE_WHITESPACE
FeatureUnion(n_jobs=1,
transformer_list=[('pca', PCA(copy=True, n_components=None,
whiten=False)),
('truncatedsvd',
TruncatedSVD(algorithm='randomized',
n_components=2, n_iter=5,
random_state=None, tol=0.0))],
transformer_weights=None)
Returns
-------
f : FeatureUnion
"""
return FeatureUnion(_name_estimators(transformers))
| bsd-3-clause |
ralbayaty/KaggleRetina | testing/circleDetect.py | 1 | 1170 | import numpy as np
import matplotlib.pyplot as plt
from skimage import data, color
from skimage.transform import hough_circle
from skimage.feature import peak_local_max, canny
from skimage.draw import circle_perimeter
from skimage.util import img_as_ubyte
# Load picture and detect edges
image = img_as_ubyte(data.coins()[0:95, 70:370])
edges = canny(image, sigma=3, low_threshold=10, high_threshold=50)
fig, ax = plt.subplots(ncols=1, nrows=1, figsize=(5, 2))
# Detect two radii
hough_radii = np.arange(15, 30, 2)
hough_res = hough_circle(edges, hough_radii)
centers = []
accums = []
radii = []
for radius, h in zip(hough_radii, hough_res):
# For each radius, extract two circles
num_peaks = 2
peaks = peak_local_max(h, num_peaks=num_peaks)
centers.extend(peaks)
accums.extend(h[peaks[:, 0], peaks[:, 1]])
radii.extend([radius] * num_peaks)
# Draw the most prominent 5 circles
image = color.gray2rgb(image)
for idx in np.argsort(accums)[::-1][:5]:
center_x, center_y = centers[idx]
radius = radii[idx]
cx, cy = circle_perimeter(center_y, center_x, radius)
image[cy, cx] = (220, 20, 20)
ax.imshow(image, cmap=plt.cm.gray) | gpl-2.0 |
gahoo/SNAP | core/gantt.py | 1 | 6208 | import dash
from dash.dependencies import Input, Output, State
from core.misc import *
from core import models
from sqlalchemy import create_engine
from sqlalchemy.orm import sessionmaker
from sqlalchemy.exc import IntegrityError
from collections import defaultdict
import dash_core_components as dcc
import dash_html_components as html
import dash_table_experiments as dt
import plotly.figure_factory as ff
import plotly.graph_objs as go
import json
import pandas as pd
import numpy as np
import colorlover as cl
import plotly
import os
import pdb
import itertools
db = loadYaml(os.path.expanduser("~/.snap/db.yaml"))
app = dash.Dash()
app.scripts.config.serve_locally = True
ctrl_style = {'width': '25%', 'display': 'inline-block', 'margin': '1'}
def new_session(name, dbfile):
engine = create_engine('sqlite:///' + dbfile)
Session = sessionmaker(bind=engine)
return Session()
def load_project(name):
session = new_session(name, db[name])
proj = session.query(models.Project).filter_by(name = name).one()
proj.session = session
proj.logger = new_logger(name, new_log_file_handler(db[name]))
return proj
def make_options(opts):
return map(lambda x:{'label':x, 'value':x}, opts)
app.layout = html.Div([
html.H4('Project Gantt'),
html.Div([
html.Div([
html.Label('Show'),
dcc.Checklist(
options = make_options(['waited', 'elapsed']),
values = ['elapsed'],
id = 'show')
], style = {'width': '14%', 'float': 'left', 'margin': '1'}),
html.Div([
html.Label('Project'),
dcc.Dropdown(
options = make_options(db.keys()),
id = 'project_id')
], style = ctrl_style),
html.Div([
html.Label('Mode'),
dcc.Dropdown(
options = make_options(['Module', 'App', 'Task', 'Instance']),
value = 'Module',
id = 'mode')
], style = {'width': '10%', 'display': 'inline-block', 'margin': '1'}),
html.Div([
html.Label('Modules'),
dcc.Dropdown(
multi=True,
id = 'modules')
], style = ctrl_style),
html.Div([
html.Label('Apps'),
dcc.Dropdown(
multi=True,
id = 'apps')
], style = ctrl_style),
]),
html.Hr(),
dcc.Graph(
id='graph-gantt'
),
dcc.Checklist(
options = make_options(['boxplot']),
values = [],
id = 'boxplot',
style = {'float': 'right'}),
], className="container")
@app.callback(
Output('modules', 'options'),
[Input('project_id', 'value')]
)
def set_modules_options(project_id):
proj = load_project(project_id)
modules = proj.session.query(models.Module).all()
options=make_options([m.name for m in modules])
return options
@app.callback(
Output('apps', 'options'),
[Input('project_id', 'value'),
Input('modules', 'value'),]
)
def set_apps_options(project_id, modules):
proj = load_project(project_id)
apps = proj.session.query(models.App).all()
if modules:
apps = [a for a in apps if a.module.name in modules]
options=make_options([a.name for a in apps])
return options
@app.callback(
Output('graph-gantt', 'figure'),
[
Input('project_id', 'value'),
Input('mode', 'value'),
Input('modules', 'value'),
Input('apps', 'value'),
Input('show', 'values'),
Input('boxplot', 'values'),
])
def update_figure(project_id, mode, modules, apps, show, boxplot):
def pick_job(jobs):
jobs = filter(lambda x:x.status in ['Finished', 'Imported'], jobs)
return jobs[-1] if jobs else None
def filter_jobs(jobs):
jobs = filter(lambda x:x is not None, jobs)
if modules:
jobs = filter(lambda x:x.task.module.name in modules, jobs)
if apps:
jobs = filter(lambda x:x.task.app.name in apps, jobs)
return jobs
def build_task(job):
return {'Task': get_task_name[mode](job), 'Start':job.__getattribute__(job_start), 'Finish':job.__getattribute__(job_finish)}
get_task_name = {
'Task': lambda x:os.path.basename(x.task.shell),
'App': lambda x:x.task.app.name,
'Module': lambda x:x.task.module.name,
'Instance': lambda x:x.instance.name}
def choose_start_finish():
if 'waited' in show and 'elapsed' in show:
return ('create_date', 'finish_date')
elif 'waited' in show and 'elapsed' not in show:
return ('create_date', 'start_date')
elif 'waited' not in show and 'elapsed' in show:
return ('start_date', 'finish_date')
elif 'waited' not in show and 'elapsed' not in show:
return ('start_date', 'finish_date')
def pick_start(element):
return element['Start']
def prepare_gantt(df):
return ff.create_gantt(df, group_tasks=True)
def make_task_trace(name, value):
return {
'name': name,
'type': 'box',
'x': value
}
def add_task_time(job):
diff_time = job['Finish'] - job['Start']
task_time[job['Task']].append(diff_time.total_seconds() / 3600.0)
def prepare_boxplot(df):
map(add_task_time, df)
data = [make_task_trace(name, value) for name, value in task_time.iteritems()]
fig = go.Figure(data=data)
fig['layout']['showlegend'] = False
return fig
proj = load_project(project_id)
jobs = [pick_job(t.bcs) for t in proj.task if t.bcs]
jobs = filter_jobs(jobs)
job_start, job_finish = choose_start_finish()
df = map(build_task, jobs)
df = sorted(df, key=pick_start)
task_time = defaultdict(list)
if 'boxplot' in boxplot:
fig = prepare_boxplot(df)
else:
fig = prepare_gantt(df)
max_nchar = max([len(i['Task']) for i in df])
fig['layout']['height'] = 18 * len(set([i['Task'] for i in df])) + 200
fig['layout']['margin'] = {
'l': 8 * max_nchar,
'r': 10,
't': 40,
'b': 80
}
return fig
| mit |
ahmadia/bokeh | bokeh/charts/builder/line_builder.py | 43 | 5360 | """This is the Bokeh charts interface. It gives you a high level API to build
complex plot is a simple way.
This is the Line class which lets you build your Line charts just
passing the arguments to the Chart class and calling the proper functions.
"""
#-----------------------------------------------------------------------------
# Copyright (c) 2012 - 2014, Continuum Analytics, Inc. All rights reserved.
#
# Powered by the Bokeh Development Team.
#
# The full license is in the file LICENSE.txt, distributed with this software.
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
from __future__ import absolute_import
from six import string_types
import numpy as np
from ..utils import cycle_colors
from .._builder import Builder, create_and_build
from ...models import ColumnDataSource, DataRange1d, GlyphRenderer, Range1d
from ...models.glyphs import Line as LineGlyph
from ...properties import Any
#-----------------------------------------------------------------------------
# Classes and functions
#-----------------------------------------------------------------------------
def Line(values, index=None, **kws):
""" Create a line chart using :class:`LineBuilder <bokeh.charts.builder.line_builder.LineBuilder>` to
render the geometry from values and index.
Args:
values (iterable): iterable 2d representing the data series
values matrix.
index (str|1d iterable, optional): can be used to specify a common custom
index for all data series as an **1d iterable** of any sort that will be used as
series common index or a **string** that corresponds to the key of the
mapping to be used as index (and not as data series) if
area.values is a mapping (like a dict, an OrderedDict
or a pandas DataFrame)
In addition the the parameters specific to this chart,
:ref:`userguide_charts_generic_arguments` are also accepted as keyword parameters.
Returns:
a new :class:`Chart <bokeh.charts.Chart>`
Examples:
.. bokeh-plot::
:source-position: above
import numpy as np
from bokeh.charts import Line, output_file, show
# (dict, OrderedDict, lists, arrays and DataFrames are valid inputs)
xyvalues = np.array([[2, 3, 7, 5, 26], [12, 33, 47, 15, 126], [22, 43, 10, 25, 26]])
line = Line(xyvalues, title="line", legend="top_left", ylabel='Languages')
output_file('line.html')
show(line)
"""
return create_and_build(LineBuilder, values, index=index, **kws)
class LineBuilder(Builder):
"""This is the Line class and it is in charge of plotting
Line charts in an easy and intuitive way.
Essentially, we provide a way to ingest the data, make the proper
calculations and push the references into a source object.
We additionally make calculations for the ranges.
And finally add the needed lines taking the references from the source.
"""
index = Any(help="""
An index to be used for all data series as follows:
- A 1d iterable of any sort that will be used as
series common index
- As a string that corresponds to the key of the
mapping to be used as index (and not as data
series) if area.values is a mapping (like a dict,
an OrderedDict or a pandas DataFrame)
""")
def _process_data(self):
"""Calculate the chart properties accordingly from line.values.
Then build a dict containing references to all the points to be
used by the line glyph inside the ``_yield_renderers`` method.
"""
self._data = dict()
# list to save all the attributes we are going to create
self._attr = []
xs = self._values_index
self.set_and_get("x", "", np.array(xs))
for col, values in self._values.items():
if isinstance(self.index, string_types) and col == self.index:
continue
# save every new group we find
self._groups.append(col)
self.set_and_get("y_", col, values)
def _set_sources(self):
"""
Push the Line data into the ColumnDataSource and calculate the
proper ranges.
"""
self._source = ColumnDataSource(self._data)
self.x_range = DataRange1d()
y_names = self._attr[1:]
endy = max(max(self._data[i]) for i in y_names)
starty = min(min(self._data[i]) for i in y_names)
self.y_range = Range1d(
start=starty - 0.1 * (endy - starty),
end=endy + 0.1 * (endy - starty)
)
def _yield_renderers(self):
"""Use the line glyphs to connect the xy points in the Line.
Takes reference points from the data loaded at the ColumnDataSource.
"""
colors = cycle_colors(self._attr, self.palette)
for i, duplet in enumerate(self._attr[1:], start=1):
glyph = LineGlyph(x='x', y=duplet, line_color=colors[i - 1])
renderer = GlyphRenderer(data_source=self._source, glyph=glyph)
self._legends.append((self._groups[i-1], [renderer]))
yield renderer
| bsd-3-clause |
StepicOrg/Stepic-API | examples/external-reports/library/models.py | 1 | 24967 | import argparse
import shutil
import os
import time
import datetime
import matplotlib.pyplot as plt
import pandas as pd
from library.api import API_HOST, get_token, fetch_objects, fetch_objects_by_pk
from library.settings import ITEM_FORMAT, OPTION_FORMAT, STEP_FORMAT, STEP_STAT_FORMAT
from library.utils import (html2latex, create_answer_matrix, get_course_structure,
get_course_submissions, get_step_options, get_step_info,
get_item_statistics, get_question, process_step_url, process_options_with_name,
get_video_peaks, get_video_stats, get_unix_date, get_course_grades)
class ExternalCourseReport:
default_project_folder = 'default'
default_report_name = 'course-report'
course_project_folder = 'course-{}'
course_report_name = 'course-{}-report'
def __init__(self):
self.course_id = 0
def get_parser(self):
parser = argparse.ArgumentParser()
parser.add_argument('-c', '--course', type=int, help='course id')
parser.add_argument('-n', '--nocache', action='store_true', help='no cache when generate latex report')
parser.add_argument('-u', '--update', action='store_true', help='update project when compile latex report')
parser.add_argument('-d', '--date', type=str,
help='date when data collection started, in the format YYYY-MM-DD ')
return parser
def build(self):
args = self.get_parser().parse_args()
if args.course:
self.course_id = args.course
cached = not bool(args.nocache)
update = bool(args.update)
date = args.date if args.date else '1970-01-01'
try:
timestamp = time.mktime(datetime.datetime.strptime(date.split('+')[0], '%Y-%m-%d').timetuple())
self.from_date = int(timestamp)
except ValueError:
print('data {} does not match format YYYY-MM-DD, default 1970-01-01'. format(date))
self.from_date = 0
print('Course {} processing...'.format(self.course_id))
base = 'latex/'
directory_name = self.course_project_folder.format(self.course_id)
full_directory = base + directory_name
if update and os.path.exists(full_directory):
shutil.rmtree(full_directory)
if not os.path.exists(full_directory):
shutil.copytree(base + self.default_project_folder, full_directory)
self.generate_latex_report(full_directory + '/generated/', cached=cached)
self.compile_latex_report(full_directory)
def generate_latex_report(self, directory, cached=True):
pass
def compile_latex_report(self, directory):
latex_command = 'pdflatex -synctex=1 -interaction=nonstopmode {}.tex'.format(self.default_report_name)
# Launch LaTeX three times
os.chdir(directory)
os.system(latex_command)
os.system(latex_command)
os.system(latex_command)
report_name = self.course_report_name.format(self.course_id)
shutil.copy('{}.pdf'.format(self.default_report_name),
'../../pdf/{}.pdf'.format(report_name))
class ItemReport(ExternalCourseReport):
default_project_folder = 'default-item'
default_report_name = 'course-item-report'
course_project_folder = 'course-{}-item'
course_report_name = 'course-{}-item-report'
def generate_latex_report(self, directory, cached=True):
course_id = self.course_id
course_info = fetch_objects('courses', pk=course_id)
course_title = course_info[0]['title']
course_url = '{}/course/{}'.format(API_HOST, course_id)
with open('{}info.tex'.format(directory), 'w', encoding='utf-8') as info_file:
info_file.write('\\def\\coursetitle{{{}}}\n\\def\\courseurl{{{}}}\n'.format(course_title, course_url))
course_structure_filename = 'cache/course-{}-structure.csv'.format(course_id)
if os.path.isfile(course_structure_filename) and cached:
course_structure = pd.read_csv(course_structure_filename)
else:
course_structure = get_course_structure(course_id)
course_structure.to_csv(course_structure_filename, index=False)
course_structure['step_variation'] = course_structure.groupby(['lesson_id', 'step_position']).cumcount()
course_structure['step_variation'] += 1
submissions_filename = 'cache/course-{}-submissions.csv'.format(course_id)
if os.path.isfile(submissions_filename) and cached:
submissions = pd.read_csv(submissions_filename)
else:
submissions = get_course_submissions(course_id, course_structure)
submissions.to_csv(submissions_filename, index=False)
submissions = submissions[submissions.submission_time >= self.from_date]
submissions = pd.merge(submissions, course_structure, on='step_id')
item_statistics = self.perform_item_analysis(submissions, course_structure, cached)
option_statistics = self.perform_option_analysis(submissions, cached)
self.generate_latex_files(item_statistics, option_statistics, directory)
def perform_item_analysis(self, submissions, course_structure, cached=True):
# item statistics
course_id = self.course_id
item_statistics_filename = 'cache/course-{}-item-statistics.csv'.format(course_id)
if os.path.isfile(item_statistics_filename) and cached:
item_statistics = pd.read_csv(item_statistics_filename)
else:
answers = create_answer_matrix(submissions, 'user_id', 'step_id', 'status',
lambda x: int('wrong' not in x.tolist()), 'submission_time')
item_statistics = get_item_statistics(answers)
if item_statistics.empty:
return
item_statistics = item_statistics.rename(columns={'item': 'step_id'})
item_statistics = pd.merge(item_statistics, course_structure, on='step_id')
item_statistics['step_url'] = item_statistics.apply(process_step_url, axis=1)
item_statistics['question'] = item_statistics['step_id'].apply(get_question)
item_statistics = item_statistics.sort_values(['module_position', 'lesson_position',
'step_position', 'step_id'])
item_statistics.to_csv(item_statistics_filename, index=False)
return item_statistics
def perform_option_analysis(self, submissions, cached=True, with_distractors=False):
course_id = self.course_id
option_statistics_filename = 'cache/course-{}-option-statistics.csv'.format(course_id)
if os.path.isfile(option_statistics_filename) and cached:
option_statistics = pd.read_csv(option_statistics_filename)
else:
if with_distractors:
option_statistics = pd.DataFrame(columns=['step_id', 'is_multiple',
'is_correct', 'option_id', 'option_name',
'clue', 'difficulty', 'discrimination'])
else:
option_statistics = pd.DataFrame(columns=['step_id', 'is_multiple',
'is_correct', 'option_id', 'option_name'])
option_statistics[['step_id', 'option_id']] = option_statistics[['step_id', 'option_id']].astype(int)
for step_id in submissions.step_id.unique():
step_submissions = submissions[submissions.step_id == step_id]
if step_submissions.empty:
continue
if step_submissions.step_type.tolist()[0] != 'choice':
continue
print('Option analysis for step_id = ', step_id)
option_names = get_step_options(step_id)
if with_distractors:
step_options = pd.DataFrame(columns=['user_id', 'is_multiple', 'option_id', 'answer', 'clue'])
for _, row in step_submissions.iterrows():
try:
options = process_options_with_name(row['dataset'], row['reply'], option_names=option_names)
except(ValueError, KeyError):
# cannot process submission
continue
options['user_id'] = row['user_id']
step_options = step_options.append(options)
step_answers = create_answer_matrix(step_options, 'user_id', 'option_id', 'answer',
lambda x: int(False not in x.tolist()))
step_statistics = get_item_statistics(step_answers)
step_statistics = step_statistics.rename(columns={'item': 'option_id',
'item_total_corr': 'option_item_corr'})
step_statistics = pd.merge(step_statistics,
step_options.loc[:, ['option_id', 'clue']].drop_duplicates(),
on='option_id')
step_statistics = pd.merge(step_statistics, option_names, on='option_id')
step_statistics[['is_correct']] = step_statistics[['clue']]
else:
step_statistics = option_names
option_statistics = option_statistics.append(step_statistics)
option_statistics.to_csv('cache/course-{}-option-statistics.csv'.format(course_id), index=False)
return option_statistics
def generate_latex_files(self, item_statistics, option_statistics, directory):
# TODO: Improve recommendations based on item and option statistics
def get_recommendation(question, options):
recommendation = ''
difficulty = question.difficulty
discrimination = question.discrimination
item_total_corr = question.item_total_corr
if options.empty or ('difficulty' not in options.columns.values):
n_nonfunct_options = 0
else:
nonfunct_options = options[(options.difficulty < 0.05) & ~options.is_correct]
n_nonfunct_options = nonfunct_options.shape[0]
if difficulty < 0.05:
recommendation += 'Задание слишком легкое, его лучше пересмотреть или удалить.\n\n'
elif difficulty > 0.95:
recommendation += 'Задание слишком сложное: проверьте, правильно ли оно составлено.\n\n'
if (discrimination <= 0) or (item_total_corr <= 0):
recommendation += 'У задания отрицательная дискриминативность: ' + \
'если оно корректно составлено, то его лучше исключить.\n\n'
if (0 < discrimination) and (discrimination < 0.3):
recommendation += 'У задания низкая дискриминативность: ' + \
'его можно оставить в качесте тренировочного задания, ' + \
'но для проверки знаний его лучше не использовать.\n\n'
if (0 < item_total_corr) and (item_total_corr < 0.2):
recommendation += 'Данное задание слабо коррелирует с суммарным баллом: ' + \
'возможно, оно проверяет не то же самое, что проверяют другие задания.\n\n'
if n_nonfunct_options:
recommendation += 'Данное задание содержит нефункциональные опции: их нужно заменить или удалить.\n\n'
return recommendation
with open('{}map.tex'.format(directory), 'w', encoding='utf-8') as map_file:
map_file.write('')
for ind, item in item_statistics.iterrows():
with open('{}map.tex'.format(directory), 'a', encoding='utf-8') as map_file:
map_file.write('\\input{{generated/step-{}.tex}}\n'.format(item.step_id))
with open('{}step-{}.tex'.format(directory, item.step_id), 'w', encoding='utf-8') as item_file:
item_file.write(ITEM_FORMAT.format(item=item))
if item.step_type != 'choice':
continue
has_difficulty = bool('difficulty' in option_statistics.columns.values)
if has_difficulty:
item_file.write('\n\n\\begin{options}\n')
item_option_statistics = option_statistics[option_statistics.step_id == item.step_id]
for op_ind, option in item_option_statistics.iterrows():
label = ''
if option.is_multiple:
label += '\multiple'
else:
label += '\single'
if option.is_correct:
label += 'correct'
else:
label += 'wrong'
option.option_name = html2latex(option.option_name)
if not has_difficulty:
difficulty = ''
else:
difficulty = '{:.2f}'.format(option.difficulty)
item_file.write(OPTION_FORMAT.format(label=label, name=option.option_name,
difficulty=difficulty))
if has_difficulty:
item_file.write('\\end{options}\n\n')
item_recommendation = get_recommendation(item, item_option_statistics)
if item_recommendation:
item_file.write('\\begin{recommendations}\n')
item_file.write(item_recommendation)
item_file.write('\\end{recommendations}\n')
else:
item_file.write('%\\begin{recommendations}\n\n%\\end{recommendations}\n')
class VideoReport(ExternalCourseReport):
default_project_folder = 'default-video'
default_report_name = 'course-video-report'
course_project_folder = 'course-{}-video'
course_report_name = 'course-{}-video-report'
def generate_latex_report(self, directory, cached=True):
course_id = self.course_id
token = get_token()
course_structure = get_course_structure(course_id)
course_structure = course_structure.loc[course_structure.step_type == 'video',
['step_id', 'step_position', 'lesson_id']]
course_info = fetch_objects('courses', pk=course_id)
course_title = course_info[0]['title']
course_url = '{}/course/{}'.format(API_HOST, course_id)
with open('{}info.tex'.format(directory), 'w', encoding='utf-8') as info_file:
info_file.write('\\def\\coursetitle{{{}}}\n\\def\\courseurl{{{}}}\n'.format(course_title, course_url))
with open('{}map.tex'.format(directory), 'w', encoding='utf-8') as map_file:
map_file.write('')
total_peaks = pd.DataFrame()
for ind, row in course_structure.iterrows():
step_id = row.step_id
step_url = 'https://stepik.org/lesson/{}/step/{}'.format(row.lesson_id, row.step_position)
stats = get_video_stats(step_id, cached, token)
fig = plt.figure()
ax1 = fig.add_subplot(211)
ax2 = fig.add_subplot(212)
windows = get_video_peaks(stats, plot=True, ax=ax1, ax2=ax2)
windows['step_id'] = step_id
windows['course_id'] = course_id
windows['step_url'] = step_url
windows['start_sec'] = windows['start'].apply(lambda x: '{:02d}:{:02d}'.format(x // 60, x % 60))
windows['end_sec'] = windows['end'].apply(lambda x: '{:02d}:{:02d}'.format(x // 60, x % 60))
self.generate_latex_files(course_id, step_id, step_url, windows, directory)
fig.savefig('{}step_{}.png'.format(directory, step_id))
plt.close()
if total_peaks.empty:
total_peaks = windows
else:
total_peaks = total_peaks.append(windows)
total_peaks.to_csv('cache/course-{}-totalpeaks.csv'.format(course_id), index=False)
# total_peaks = pd.read_csv('cache/course-{}-totalpeaks.csv'.format(course_id))
total_peaks = total_peaks.sort_values('area', ascending=False)
if total_peaks.shape[0] <= 5:
top_peaks = total_peaks
else:
top_peaks = total_peaks[0:5]
with open('{}total.tex'.format(directory), 'w', encoding='utf-8') as total_file:
if not total_peaks.empty:
total_file.write('В курсе выделены следующие пики, имеющие максимальную относительную площадь.\n')
total_file.write('Проверьте, нет ли в данных местах у учащихся ' +
'трудностей с пониманием учебного материала.\n')
total_file.write('\\begin{totalpeaks}\n')
for ind, row in top_peaks.iterrows():
total_file.write('\\totalpeak{{{}}}{{{}}}{{{}}}{{{}}}{{{:.2f}}}\n'.format(row.step_id,
row.step_url,
row.start_sec,
row.end_sec,
row.area))
total_file.write('\\end{totalpeaks}\n')
else:
total_file.write('\n')
def generate_latex_files(self, course_id, step_id, step_url, windows, directory):
with open('{}map.tex'.format(directory), 'a', encoding='utf-8') as map_file:
map_file.write('\\input{{generated/step_{}.tex}}\n'.format(step_id))
with open('{}step_{}.tex'.format(directory, step_id), 'w', encoding='utf-8') as step_file:
step_file.write(STEP_FORMAT.format(step_id=step_id, step_url=step_url))
if windows.empty:
step_file.write('\n\nПики не обнаружены.\n')
else:
step_file.write('\n\n\\begin{peaks}\n')
for ind, row in windows.iterrows():
step_file.write('\peak{{{}}}{{{}}}{{{:.2f}}}{{{:.2f}}}{{{:.2f}}}\n'.format(row.start_sec,
row.end_sec,
row.width,
row.height,
row.area))
step_file.write('\\end{peaks}\n\n')
class DropoutReport(ExternalCourseReport):
default_project_folder = 'default-dropout'
default_report_name = 'course-dropout-report'
course_project_folder = 'course-{}-dropout'
course_report_name = 'course-{}-dropout-report'
def generate_latex_report(self, directory, cached=True):
course_id = self.course_id
token = get_token()
course_info = fetch_objects('courses', pk=course_id)[0]
course_title = course_info['title']
course_url = '{}/course/{}'.format(API_HOST, course_id)
with open('{}info.tex'.format(directory), 'w', encoding='utf-8') as info_file:
info_file.write('\\def\\coursetitle{{{}}}\n\\def\\courseurl{{{}}}\n'.format(course_title, course_url))
with open('{}map.tex'.format(directory), 'w', encoding='utf-8') as map_file:
map_file.write('')
time_now = time.time()
certificate_threshold = course_info['certificate_regular_threshold']
begin_date = get_unix_date(course_info['begin_date']) if course_info['begin_date'] else 0
last_deadline = get_unix_date(course_info['last_deadline']) if course_info['begin_date'] else time_now
course_teachers = course_info['instructors']
course_testers = fetch_objects_by_pk('groups', course_info["testers_group"], token=token)[0]['users']
users_to_delete = course_teachers + course_testers
# collect course grades
grades = get_course_grades(course_id, token=token)
learners = grades[['user_id', 'total_score', 'date_joined', 'last_viewed']].drop_duplicates()
learners = learners[~learners.user_id.isin(users_to_delete)]
learners = learners[(0 < learners.total_score) & (learners.total_score < certificate_threshold)]
# collect submissions
course_structure = get_course_structure(course_id, token=token)
course_submissions = get_course_submissions(course_id, course_structure, token)
course_submissions = course_submissions[course_submissions.user_id.isin(learners.user_id)]
# find last submissions
course_submissions = course_submissions[(begin_date < course_submissions.submission_time) &
(course_submissions.submission_time < last_deadline)]
idx_grouped = course_submissions.groupby('user_id')['submission_time']
idx = idx_grouped.transform(max) == course_submissions['submission_time']
last_submissions = course_submissions[idx].groupby('step_id', as_index=False)['submission_id'].count()
last_submissions = last_submissions.rename(columns={'submission_id': 'last_submissions'})
unique_submissions = course_submissions.groupby('step_id', as_index=False)['user_id'].agg(pd.Series.nunique)
unique_submissions = unique_submissions.rename(columns={'user_id': 'unique_submissions'})
step_stats = unique_submissions.merge(last_submissions)
step_stats['dropout_rate'] = step_stats.apply(lambda row: (row.last_submissions / row.unique_submissions
if row.unique_submissions else 0), axis=1)
step_stats = pd.merge(course_structure, step_stats, how='left')
additional_columns = ['viewed_by', 'passed_by', 'correct_ratio']
step_stats[additional_columns] = step_stats.step_id.apply(lambda step:
get_step_info(step)[additional_columns])
step_stats['difficulty'] = 1 - step_stats['correct_ratio']
step_stats['completion_rate'] = step_stats.apply((lambda row: row.passed_by / row.viewed_by
if row.viewed_by else 0), axis=1)
step_stats.to_csv('cache/course-{}-stepstats.csv'.format(course_id), index=False)
step_stats['step_url'] = step_stats.apply(process_step_url, axis=1)
step_stats['completion_rate'] *= 100
step_stats['dropout_rate'] *= 100
step_stats['completion_rate'] = step_stats['completion_rate'].round(1)
step_stats['dropout_rate'] = step_stats['dropout_rate'].round(1)
for lesson_id in step_stats.lesson_id.unique():
step_lesson_stats = step_stats[step_stats.lesson_id == lesson_id]
step_lesson_stats = step_lesson_stats.fillna('')
lesson_url = '{}/lesson/{}'.format(API_HOST, lesson_id)
lesson_name = '{}'.format(lesson_id) # TODO: use module and lesson position
with open('{}map.tex'.format(directory), 'a', encoding='utf-8') as map_file:
map_file.write('\\input{{generated/lesson-{}.tex}}\n'.format(lesson_id))
with open('{}lesson-{}.tex'.format(directory, lesson_id), 'w', encoding='utf-8') as lesson_file:
lesson_file.write('\\newpage\n\\lessoninfo{{{}}}{{{}}}\n'.format(lesson_name, lesson_url))
lesson_file.write('\\begin{lessonstatistics}')
for _, step_stat in step_lesson_stats.iterrows():
lesson_file.write(STEP_STAT_FORMAT.format(stat=step_stat))
lesson_file.write('\\end{lessonstatistics}') | mit |
tomlof/scikit-learn | benchmarks/bench_20newsgroups.py | 377 | 3555 | from __future__ import print_function, division
from time import time
import argparse
import numpy as np
from sklearn.dummy import DummyClassifier
from sklearn.datasets import fetch_20newsgroups_vectorized
from sklearn.metrics import accuracy_score
from sklearn.utils.validation import check_array
from sklearn.ensemble import RandomForestClassifier
from sklearn.ensemble import ExtraTreesClassifier
from sklearn.ensemble import AdaBoostClassifier
from sklearn.linear_model import LogisticRegression
from sklearn.naive_bayes import MultinomialNB
ESTIMATORS = {
"dummy": DummyClassifier(),
"random_forest": RandomForestClassifier(n_estimators=100,
max_features="sqrt",
min_samples_split=10),
"extra_trees": ExtraTreesClassifier(n_estimators=100,
max_features="sqrt",
min_samples_split=10),
"logistic_regression": LogisticRegression(),
"naive_bayes": MultinomialNB(),
"adaboost": AdaBoostClassifier(n_estimators=10),
}
###############################################################################
# Data
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('-e', '--estimators', nargs="+", required=True,
choices=ESTIMATORS)
args = vars(parser.parse_args())
data_train = fetch_20newsgroups_vectorized(subset="train")
data_test = fetch_20newsgroups_vectorized(subset="test")
X_train = check_array(data_train.data, dtype=np.float32,
accept_sparse="csc")
X_test = check_array(data_test.data, dtype=np.float32, accept_sparse="csr")
y_train = data_train.target
y_test = data_test.target
print("20 newsgroups")
print("=============")
print("X_train.shape = {0}".format(X_train.shape))
print("X_train.format = {0}".format(X_train.format))
print("X_train.dtype = {0}".format(X_train.dtype))
print("X_train density = {0}"
"".format(X_train.nnz / np.product(X_train.shape)))
print("y_train {0}".format(y_train.shape))
print("X_test {0}".format(X_test.shape))
print("X_test.format = {0}".format(X_test.format))
print("X_test.dtype = {0}".format(X_test.dtype))
print("y_test {0}".format(y_test.shape))
print()
print("Classifier Training")
print("===================")
accuracy, train_time, test_time = {}, {}, {}
for name in sorted(args["estimators"]):
clf = ESTIMATORS[name]
try:
clf.set_params(random_state=0)
except (TypeError, ValueError):
pass
print("Training %s ... " % name, end="")
t0 = time()
clf.fit(X_train, y_train)
train_time[name] = time() - t0
t0 = time()
y_pred = clf.predict(X_test)
test_time[name] = time() - t0
accuracy[name] = accuracy_score(y_test, y_pred)
print("done")
print()
print("Classification performance:")
print("===========================")
print()
print("%s %s %s %s" % ("Classifier ", "train-time", "test-time",
"Accuracy"))
print("-" * 44)
for name in sorted(accuracy, key=accuracy.get):
print("%s %s %s %s" % (name.ljust(16),
("%.4fs" % train_time[name]).center(10),
("%.4fs" % test_time[name]).center(10),
("%.4f" % accuracy[name]).center(10)))
print()
| bsd-3-clause |
sfepy/sfepy | sfepy/discrete/iga/plot_nurbs.py | 5 | 6344 | from __future__ import absolute_import
import numpy as nm
import matplotlib.pyplot as plt
from sfepy.discrete.fem.geometry_element import GeometryElement
from sfepy.mesh.mesh_generators import get_tensor_product_conn
import sfepy.postprocess.plot_dofs as pd
from sfepy.postprocess.plot_dofs import _get_axes
from sfepy.discrete.iga.iga import _get_knots_tuple
from six.moves import range
def plot_parametric_mesh(ax, knots):
"""
Plot the parametric mesh of a NURBS given by its knots.
"""
knots = _get_knots_tuple(knots)
dim = len(knots)
ax = _get_axes(ax, dim)
uknots = [nm.unique(ii) for ii in knots]
shape = [len(ii) for ii in uknots]
ngrid = nm.mgrid[[slice(ii) for ii in shape]]
coors = nm.r_[[uknots[ii][ig].ravel() for ii, ig in enumerate(ngrid)]].T
conn, desc = get_tensor_product_conn(nm.array(shape))
gel = GeometryElement(desc)
ax = pd.plot_mesh(ax, coors, conn, gel.edges)
pd.plot_points(ax, coors)
return ax
def plot_control_mesh(ax, control_points, label=False):
"""
Plot the control mesh of a NURBS given by its control points.
"""
dim = control_points.shape[-1]
ax = _get_axes(ax, dim)
shape = control_points.shape
conn, desc = get_tensor_product_conn(nm.array(shape[:-1]))
gel = GeometryElement(desc)
coors = control_points.reshape((-1, dim))
ax = pd.plot_mesh(ax, coors, conn, gel.edges)
pd.plot_points(ax, coors)
if label:
for ii, cc in enumerate(coors):
ax.text(*cc, s='%d' % ii,
color='g', fontsize=12, weight='bold')
return ax
def _get_edges(n_ep, shape):
dim = len(shape)
aux = nm.arange(n_ep).reshape(shape)
edges = []
if dim == 3:
for ii in range(shape[2] - 1):
edges.append(aux[0, 0, ii:ii+2])
edges.append(aux[-1, 0, ii:ii+2])
edges.append(aux[0, -1, ii:ii+2])
edges.append(aux[-1, -1, ii:ii+2])
for ii in range(shape[1] - 1):
edges.append(aux[0, ii:ii+2, 0])
edges.append(aux[-1, ii:ii+2, 0])
edges.append(aux[0, ii:ii+2, -1])
edges.append(aux[-1, ii:ii+2, -1])
for ii in range(shape[0] - 1):
edges.append(aux[ii:ii+2, 0, 0])
edges.append(aux[ii:ii+2, -1, 0])
edges.append(aux[ii:ii+2, 0, -1])
edges.append(aux[ii:ii+2, -1, -1])
elif dim == 2:
for ii in range(shape[1] - 1):
edges.append(aux[0, ii:ii+2])
edges.append(aux[-1, ii:ii+2])
for ii in range(shape[0] - 1):
edges.append(aux[ii:ii+2, 0])
edges.append(aux[ii:ii+2, -1])
else:
for ii in range(shape[0] - 1):
edges.append(aux[ii:ii+2])
return nm.array(edges)
def plot_bezier_mesh(ax, control_points, conn, degrees, label=False):
"""
Plot the Bezier mesh of a NURBS given by its control points and
connectivity.
"""
dim = control_points.shape[-1]
ax = _get_axes(ax, dim)
edges = _get_edges(conn.shape[1], nm.asarray(degrees) + 1)
ax = pd.plot_mesh(ax, control_points, conn, edges)
pd.plot_points(ax, control_points)
if label:
ax = pd.plot_global_dofs(ax, control_points, conn)
return ax
def plot_iso_lines(ax, nurbs, color='b', n_points=100):
"""
Plot the NURBS object using iso-lines in Greville abscissae coordinates.
"""
dim = nurbs.dim
ax = _get_axes(ax, dim)
gas = nurbs.greville()
if dim == 1:
ga = gas[0]
x0 = nm.linspace(ga[0], ga[-1], n_points)
vals = nurbs(x0)
if vals.shape[1] == 1:
ax.plot(x0, vals[:, 0], color)
else: # Assume curve in 2D.
ax.plot(vals[:, 0], vals[:, 1], color)
elif dim == 2:
ga0 = gas[0]
ga1 = gas[1]
x1 = nm.linspace(ga1[0], ga1[-1], n_points)
for x0 in ga0:
vals = nurbs(x0, x1)
ax.plot(vals[:, 0], vals[:, 1], color)
x0 = nm.linspace(ga0[0], ga0[-1], n_points)
for x1 in ga0:
vals = nurbs(x0, x1)
ax.plot(vals[:, 0], vals[:, 1], color)
else:
ga0 = gas[0]
ga1 = gas[1]
ga2 = gas[2]
x2 = nm.linspace(ga2[0], ga2[-1], n_points)
for x0 in ga0:
for x1 in ga1:
vals = nurbs(x0, x1, x2)
ax.plot(vals[:, 0], vals[:, 1], vals[:, 2], color)
x1 = nm.linspace(ga1[0], ga1[-1], n_points)
for x0 in ga0:
for x2 in ga2:
vals = nurbs(x0, x1, x2)
ax.plot(vals[:, 0], vals[:, 1], vals[:, 2], color)
x0 = nm.linspace(ga0[0], ga0[-1], n_points)
for x1 in ga1:
for x2 in ga2:
vals = nurbs(x0, x1, x2)
ax.plot(vals[:, 0], vals[:, 1], vals[:, 2], color)
return ax
def plot_nurbs_basis_1d(ax, nurbs, n_points=100, x_axis='parametric',
legend=False):
"""
Plot a 1D NURBS basis.
"""
ax = _get_axes(ax, 2)
ga = nurbs.greville()[0]
n_fun = nurbs.weights.shape[0]
line = nm.linspace(ga[0], ga[-1], n_points)
for ii in range(n_fun):
field = nm.zeros(n_fun)
field[ii] = 1.0
vals = nurbs.evaluate(fields=field, u=line)
if x_axis == 'parametric':
ax.plot(line, vals, label='%d' % ii)
else:
coors = nurbs(u=line)[:, x_axis]
ax.plot(coors, vals, label='%d' % ii)
if legend: ax.legend()
return ax
def plot_bezier_nurbs_basis_1d(ax, control_points, weights, degrees, cs, conn,
n_points=20):
"""
Plot a 1D NURBS basis using the Bezier extraction and local Bernstein
basis.
"""
from sfepy.discrete.iga.iga import eval_variable_in_qp
ax = _get_axes(ax, 2)
n_fun = weights.shape[0]
line = nm.linspace(0, 1, n_points)[:, None]
for ii in range(n_fun):
variable = nm.zeros((n_fun, 1))
variable[ii] = 1.0
coors, vals, dets = eval_variable_in_qp(variable, line,
control_points,
weights, degrees,
cs, conn)
plt.plot(coors[:, 0], vals)
return ax
| bsd-3-clause |
diana-hep/carl | carl/distributions/transforms.py | 1 | 1770 | # Carl is free software; you can redistribute it and/or modify it
# under the terms of the Revised BSD License; see LICENSE file for
# more details.
import numpy as np
from sklearn.utils import check_random_state
from . import TheanoDistribution
class LinearTransform(TheanoDistribution):
"""Apply a linear transformation `u = A*x` to `x ~ p`."""
def __init__(self, p, A):
"""Constructor.
Parameters
----------
* `p` [`DistributionMixin`]:
The base distribution.
* `A` [array, shape=(p.ndim, p.ndim)]:
The linear operator.
"""
super(LinearTransform, self).__init__()
self.p = p
self.A = A
self.inv_A = np.linalg.inv(A)
if isinstance(p, TheanoDistribution):
for p_i in p.parameters_:
self.parameters_.add(p_i)
for c_i in p.constants_:
self.constants_.add(c_i)
for o_i in p.observeds_:
self.observeds_.add(o_i)
# Derive and overide pdf, nll and cdf analytically if possible
# XXX todo
def pdf(self, X, **kwargs):
return self.p.pdf(np.dot(self.inv_A, X.T).T, **kwargs)
def nll(self, X, **kwargs):
return self.p.nll(np.dot(self.inv_A, X.T).T, **kwargs)
def ppf(self, X, **kwargs):
"""Not supported."""
raise NotImplementedError
def cdf(self, X, **kwargs):
"""Not supported."""
raise NotImplementedError
def rvs(self, n_samples, random_state=None, **kwargs):
rng = check_random_state(random_state)
out = self.p.rvs(n_samples, random_state=rng, **kwargs)
return np.dot(self.A, out.T).T
@property
def ndim(self):
return self.p.ndim
| bsd-3-clause |
jlegendary/scikit-learn | examples/mixture/plot_gmm_selection.py | 248 | 3223 | """
=================================
Gaussian Mixture Model Selection
=================================
This example shows that model selection can be performed with
Gaussian Mixture Models using information-theoretic criteria (BIC).
Model selection concerns both the covariance type
and the number of components in the model.
In that case, AIC also provides the right result (not shown to save time),
but BIC is better suited if the problem is to identify the right model.
Unlike Bayesian procedures, such inferences are prior-free.
In that case, the model with 2 components and full covariance
(which corresponds to the true generative model) is selected.
"""
print(__doc__)
import itertools
import numpy as np
from scipy import linalg
import matplotlib.pyplot as plt
import matplotlib as mpl
from sklearn import mixture
# Number of samples per component
n_samples = 500
# Generate random sample, two components
np.random.seed(0)
C = np.array([[0., -0.1], [1.7, .4]])
X = np.r_[np.dot(np.random.randn(n_samples, 2), C),
.7 * np.random.randn(n_samples, 2) + np.array([-6, 3])]
lowest_bic = np.infty
bic = []
n_components_range = range(1, 7)
cv_types = ['spherical', 'tied', 'diag', 'full']
for cv_type in cv_types:
for n_components in n_components_range:
# Fit a mixture of Gaussians with EM
gmm = mixture.GMM(n_components=n_components, covariance_type=cv_type)
gmm.fit(X)
bic.append(gmm.bic(X))
if bic[-1] < lowest_bic:
lowest_bic = bic[-1]
best_gmm = gmm
bic = np.array(bic)
color_iter = itertools.cycle(['k', 'r', 'g', 'b', 'c', 'm', 'y'])
clf = best_gmm
bars = []
# Plot the BIC scores
spl = plt.subplot(2, 1, 1)
for i, (cv_type, color) in enumerate(zip(cv_types, color_iter)):
xpos = np.array(n_components_range) + .2 * (i - 2)
bars.append(plt.bar(xpos, bic[i * len(n_components_range):
(i + 1) * len(n_components_range)],
width=.2, color=color))
plt.xticks(n_components_range)
plt.ylim([bic.min() * 1.01 - .01 * bic.max(), bic.max()])
plt.title('BIC score per model')
xpos = np.mod(bic.argmin(), len(n_components_range)) + .65 +\
.2 * np.floor(bic.argmin() / len(n_components_range))
plt.text(xpos, bic.min() * 0.97 + .03 * bic.max(), '*', fontsize=14)
spl.set_xlabel('Number of components')
spl.legend([b[0] for b in bars], cv_types)
# Plot the winner
splot = plt.subplot(2, 1, 2)
Y_ = clf.predict(X)
for i, (mean, covar, color) in enumerate(zip(clf.means_, clf.covars_,
color_iter)):
v, w = linalg.eigh(covar)
if not np.any(Y_ == i):
continue
plt.scatter(X[Y_ == i, 0], X[Y_ == i, 1], .8, color=color)
# Plot an ellipse to show the Gaussian component
angle = np.arctan2(w[0][1], w[0][0])
angle = 180 * angle / np.pi # convert to degrees
v *= 4
ell = mpl.patches.Ellipse(mean, v[0], v[1], 180 + angle, color=color)
ell.set_clip_box(splot.bbox)
ell.set_alpha(.5)
splot.add_artist(ell)
plt.xlim(-10, 10)
plt.ylim(-3, 6)
plt.xticks(())
plt.yticks(())
plt.title('Selected GMM: full model, 2 components')
plt.subplots_adjust(hspace=.35, bottom=.02)
plt.show()
| bsd-3-clause |
dankessler/cogfusion | cogfusion/farmcogatlas.py | 1 | 3055 | #!/usr/bin/python
"""
Based on an example from https://github.com/CognitiveAtlas/cogat-python
by Vanessa Sochat
"""
try:
from cognitiveatlas.api import get_concept, get_task
except:
pass
# EXAMPLE 1: #########################################################
# We are going to retrieve all cognitive paradgims (tasks), find
# associated contrasts, and then find the cognitive concepts
# the contrasts are asserted to measure. You can choose to do these
# calls and arrange the data in whatever format fits your needs
######################################################################
# Step 1: Retrieve all tasks. When we do a general call for all tasks, we
# actually only retrieve the basic information:
#
# [u'def_id_user',
# u'id',
# u'definition_text',
# u'event_stamp',
# u'def_event_stamp',
# u'concept_class',
# u'def_id',
# u'id_concept_class',
# u'id_user',
# u'name',
# u'type',
# u'alias']
#
# We will need a second call to get the rest, the addition of:
#[u'conclass',
# u'implementations',
# u'disorders',
# u'discussion',
# u'indicators',
# u'conditions',
# u'contrasts',
# u'external_datasets',
# u'umarkdef',
# u'umark',
# u'history']
# Step 2: Find contrasts associated with each task
# Note that this is an inefficient way to retrieve the full data, but it will work!
if __name__ == '__main__':
task_uids = [task["id"] for task in get_task().json]
contrasts = dict() # contrast lookup by task uid
# Now we can retrieve the full data. We are interested in contrasts, so let's save those.
for t, task in enumerate(task_uids):
if task not in contrasts:
print('Task {} of {}'.format(t, len(task_uids)))
task_complete = get_task(task).json[0]
# Only save if we have contrasts
if len(task_complete["contrasts"]) > 0:
contrasts[task] = task_complete["contrasts"]
# How many tasks have contrasts?
print('# contrasts: {}'.format(len(contrasts)))
# 437
# Step 3: Make a contrast --> concept lookup
concepts = dict()
for t, taskContrasts in enumerate(contrasts.items()):
task_uid, contrast_set = taskContrasts
print('Task {} of {}'.format(t, len(task_uids)))
for contrast in contrast_set:
contrast_uid = contrast["id"]
if contrast_uid not in concepts:
try: # Some calls don't work
concepts[contrast_uid] = get_concept(contrast_id=contrast_uid).json
except:
pass
# How many concepts are asserted to measure different contrasts?
len(concepts)
print('# concepts: {}'.format(len(concepts)))
allcontrasts = []
for tid, taskContrasts in contrasts.items():
for contrast in taskContrasts:
allcontrasts.append(contrast)
get_concept().pandas.to_csv('data/concepts.csv', encoding='utf-8')
pandas.DataFrame(allcontrasts).to_csv('data/contrasts.csv', encoding='utf-8')
pickle.dump(concepts, open('data/conceptsByContrasts.pickle','wb'))
| mit |
viveksck/langchangetrack | langchangetrack/tsconstruction/displacements.py | 1 | 8624 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from argparse import ArgumentParser
import os
from os import path
import cPickle as pickle
import numpy as np
import scipy
import itertools
from scipy.spatial.distance import cosine, euclidean, norm
import pandas as pd
import more_itertools
from joblib import Parallel, delayed
from langchangetrack.utils.dummy_regressor import DummyRegressor
import gensim
import logging
LOGFORMAT = "%(asctime).19s %(levelname)s %(filename)s: %(lineno)s %(message)s"
logger = logging.getLogger("langchangetrack")
import psutil
from multiprocessing import cpu_count
p = psutil.Process(os.getpid())
p.set_cpu_affinity(list(range(cpu_count())))
def normalize_vector(vec):
""" Normalize a vector by its L2 norm. """
norm = (vec ** 2).sum() ** 0.5
return (vec / norm)
def pairwise(iterable):
""" [a,b,c,d]=>[(a,b), (b,c), (c, d)] """
a, b = itertools.tee(iterable)
next(b, None)
return itertools.izip(a, b)
def process_word_source(w, eobj):
""" Calculate displacements of word for source timepoint tuples. """
return eobj.process_word(w, 0)
def process_word_dest(w, eobj):
""" Calculate displacements of word for destination timepoint tuples."""
return eobj.process_word(w, 1)
def process_chunk(chunk, func, *args):
""" Apply a function on each element of a iterable. """
L = []
for i, e in enumerate(chunk):
L.append(func(e, *args))
if i % 10 == 0:
print "Processing chunk", i
return L
class Displacements(object):
def __init__(self):
""" Constructor """
self.get_vectors = None
self.load_model = None
self.models = {}
self.has_predictors = False
self.load_predictor = None
self.predictors = {}
self.norm_embedding = False
self.words_file = None
self.timepoints = None
self.data_dir = None
self.pred_dir = None
self.num_words = -1
self.method = None
self.win_size = -1
self.fixed_point = -1
self.embedding_suffix = None
self.predictor_suffix = None
def get_word_list(self):
""" Returns a list of words for which time series needs to be generated.
"""
words_list = open(self.words_file, 'r').read().split('\n')
if words_list[-1] == '':
words_list = words_list[:-1]
if self.num_words != -1:
return words_list[:num_words]
else:
return words_list
def get_tuples(self, word, timepoint1, timepoint2):
""" Return what time point pairs we must consider fot the word. """
return [(word, timepoint1, word, timepoint2)]
def generate_displacement_word(self, word, timepoints):
L = []
for ot, nt in timepoints:
modelo = self.get_predictor(ot)
modeln = self.get_predictor(nt)
tuples = self.get_tuples(word, ot, nt)
for tup in tuples:
word1 = tup[0]
timepoint1 = tup[1]
word2 = tup[2]
timepoint2 = tup[3]
if self.is_present(timepoint1, word1) and self.is_present(timepoint2, word2):
vec1 = self.get_vector(timepoint1, word1)
vec2 = self.get_vector(timepoint2, word2)
if self.norm_embedding:
assert(np.isclose(norm(vec1), 1.0))
assert(np.isclose(norm(vec2), 1.0))
vec1_pred = modelo.predict(vec1)
vec2_pred = modeln.predict(vec2)
if self.norm_embedding:
vec1_pred = normalize_vector(vec1_pred)
vec2_pred = normalize_vector(vec2_pred)
assert(np.isclose(norm(vec1), 1.0))
assert(np.isclose(norm(vec2), 1.0))
d = self.calculate_distance(vec1_pred, vec2_pred)
assert(len(d) == self.number_distance_metrics())
L.append([word1, timepoint1, word2, timepoint2] + d)
else:
# Word is not present in both time periods
L.append([word1, timepoint1, word2, timepoint2] + list(itertools.repeat(np.nan, self.number_distance_metrics())))
return L
def get_timepoints_word(self, w, timepoints):
""" Get the list of timepoints to be considered for a word. """
for i, t in enumerate(timepoints):
if self.is_present(t, w):
break
# We have foind the first instance of the word at this time point,
timepoints_considered = timepoints[i:]
# Create the tuples for calculating displacements based on strategy
# used.
if self.method == "polar":
timepoints1 = zip(timepoints_considered, list(itertools.repeat(timepoints_considered[0], len(timepoints_considered))))
timepoints2 = zip(timepoints_considered, list(itertools.repeat(timepoints_considered[-1], len(timepoints_considered))))
elif self.method == 'win':
timepoints1 = zip(timepoints_considered[win_size:], timepoints_considered[:-win_size])
timepoints2 = zip(timepoints_considered[:-win_size], timepoints_considered[win_size:])
elif self.method == 'fixed':
timepoints1 = zip(timepoints_considered, list(itertools.repeat(fixed_point, len(timepoints_considered))))
timepoints2 = zip(timepoints_considered, list(itertools.repeat(timepoints_considered[-1], len(timepoints_considered))))
# Return the list if tuples
return timepoints1, timepoints2
def process_word(self, w, index):
""" Calculate displacements of the word at each timepoint tuple.
index: Are we using timepoints1 or timepoints2.
"""
t = self.get_timepoints_word(w, self.timepoints)
return self.generate_displacement_word(w, t[index])
def calculate_words_displacement(self, column_names, n_jobs = 1):
""" Calculate word displacements for each word in the Pandas data frame. """
words = self.get_word_list()
# Create chunks of the words to be processed.
chunk_sz = np.ceil(len(words)/float(n_jobs))
chunks = list(more_itertools.chunked(words, chunk_sz))
# Calculate the displacements
chunksL = Parallel(n_jobs=n_jobs, verbose=20)(delayed(process_chunk)(chunk, process_word_source, self) for chunk in chunks)
chunksH = Parallel(n_jobs=n_jobs, verbose=20)(delayed(process_chunk)(chunk, process_word_dest, self) for chunk in chunks)
L = more_itertools.flatten(chunksL)
H = more_itertools.flatten(chunksH)
flattendL = [x for sublist in L for x in sublist]
flattendH = [x for sublist in H for x in sublist]
# Store the results in a nice pandas data frame
dfo, dfn = self.create_data_frames(flattendL, flattendH, column_names)
return flattendL, flattendH, dfo, dfn
def create_data_frames(self, L, H, column_names):
""" Store the displacement of each word for the pair of timepoints in a
nice Pandas data frame. """
dfo = pd.DataFrame()
dfo = dfo.from_records(L, columns=column_names)
dfo_clean = dfo.fillna(method='ffill')
dfn = pd.DataFrame()
dfn = dfn.from_records(H, columns=column_names)
dfn_clean = dfn.fillna(method='bfill')
return dfo_clean, dfn_clean
def get_model(self, timepoint):
""" Return the model corresponding to this timepoint. """
return self.models[timepoint]
def get_predictor(self, timepoint):
""" Return the predictor corresponding to this timepoint. """
return self.predictors[timepoint]
def number_distance_metrics(self):
""" The number of distance metrics evaluated by calculate_distance. """
raise NotImplementedError, "Pure virtual function"
def calculate_distance(self, vec1, vec2):
""" Calculate distances between vector1 and vector2. """
raise NotImplementedError, "Pure virtual function"
def load_models_and_predictors(self):
raise NotImplementedError, "Pure virtual function"
def is_present(self, timepoint, word):
""" Check if the word is present in the vocabulary at this timepoint. """
raise NotImplementedError, "Pure virtual function"
def get_vector(self, timepoint, word):
""" Get the embedding for this word at the specified timepoint."""
raise NotImplementedError, "Pure virtual function"
| bsd-3-clause |
ankurankan/scikit-learn | examples/cluster/plot_agglomerative_clustering_metrics.py | 20 | 4491 | """
Agglomerative clustering with different metrics
===============================================
Demonstrates the effect of different metrics on the hierarchical clustering.
The example is engineered to show the effect of the choice of different
metrics. It is applied to waveforms, which can be seen as
high-dimensional vector. Indeed, the difference between metrics is
usually more pronounced in high dimension (in particular for euclidean
and cityblock).
We generate data from three groups of waveforms. Two of the waveforms
(waveform 1 and waveform 2) are proportional one to the other. The cosine
distance is invariant to a scaling of the data, as a result, it cannot
distinguish these two waveforms. Thus even with no noise, clustering
using this distance will not separate out waveform 1 and 2.
We add observation noise to these waveforms. We generate very sparse
noise: only 6% of the time points contain noise. As a result, the
l1 norm of this noise (ie "cityblock" distance) is much smaller than it's
l2 norm ("euclidean" distance). This can be seen on the inter-class
distance matrices: the values on the diagonal, that caracterize the
spread of the class, are much bigger for the Euclidean distance than for
the cityblock distance.
When we apply clustering to the data, we find that the clustering
reflects what was in the distance matrices. Indeed, for the Euclidean
distance, the classes are ill-separated because of the noise, and thus
the clustering does not separate the waveforms. For the cityblock
distance, the separation is good and the waveform classes are recovered.
Finally, the cosine distance does not separate at all waveform 1 and 2,
thus the clustering puts them in the same cluster.
"""
# Author: Gael Varoquaux
# License: BSD 3-Clause or CC-0
import matplotlib.pyplot as plt
import numpy as np
from sklearn.cluster import AgglomerativeClustering
from sklearn.metrics import pairwise_distances
np.random.seed(0)
# Generate waveform data
n_features = 2000
t = np.pi * np.linspace(0, 1, n_features)
def sqr(x):
return np.sign(np.cos(x))
X = list()
y = list()
for i, (phi, a) in enumerate([(.5, .15), (.5, .6), (.3, .2)]):
for _ in range(30):
phase_noise = .01 * np.random.normal()
amplitude_noise = .04 * np.random.normal()
additional_noise = 1 - 2 * np.random.rand(n_features)
# Make the noise sparse
additional_noise[np.abs(additional_noise) < .997] = 0
X.append(12 * ((a + amplitude_noise)
* (sqr(6 * (t + phi + phase_noise)))
+ additional_noise))
y.append(i)
X = np.array(X)
y = np.array(y)
n_clusters = 3
labels = ('Waveform 1', 'Waveform 2', 'Waveform 3')
# Plot the ground-truth labelling
plt.figure()
plt.axes([0, 0, 1, 1])
for l, c, n in zip(range(n_clusters), 'rgb',
labels):
lines = plt.plot(X[y == l].T, c=c, alpha=.5)
lines[0].set_label(n)
plt.legend(loc='best')
plt.axis('tight')
plt.axis('off')
plt.suptitle("Ground truth", size=20)
# Plot the distances
for index, metric in enumerate(["cosine", "euclidean", "cityblock"]):
avg_dist = np.zeros((n_clusters, n_clusters))
plt.figure(figsize=(5, 4.5))
for i in range(n_clusters):
for j in range(n_clusters):
avg_dist[i, j] = pairwise_distances(X[y == i], X[y == j],
metric=metric).mean()
avg_dist /= avg_dist.max()
for i in range(n_clusters):
for j in range(n_clusters):
plt.text(i, j, '%5.3f' % avg_dist[i, j],
verticalalignment='center',
horizontalalignment='center')
plt.imshow(avg_dist, interpolation='nearest', cmap=plt.cm.gnuplot2,
vmin=0)
plt.xticks(range(n_clusters), labels, rotation=45)
plt.yticks(range(n_clusters), labels)
plt.colorbar()
plt.suptitle("Interclass %s distances" % metric, size=18)
plt.tight_layout()
# Plot clustering results
for index, metric in enumerate(["cosine", "euclidean", "cityblock"]):
model = AgglomerativeClustering(n_clusters=n_clusters,
linkage="average", affinity=metric)
model.fit(X)
plt.figure()
plt.axes([0, 0, 1, 1])
for l, c in zip(np.arange(model.n_clusters), 'rgbk'):
plt.plot(X[model.labels_ == l].T, c=c, alpha=.5)
plt.axis('tight')
plt.axis('off')
plt.suptitle("AgglomerativeClustering(affinity=%s)" % metric, size=20)
plt.show()
| bsd-3-clause |
CallaJun/hackprince | indico/matplotlib/tests/test_delaunay.py | 14 | 7090 | from __future__ import (absolute_import, division, print_function,
unicode_literals)
import six
from six.moves import xrange
import warnings
import numpy as np
from matplotlib.testing.decorators import image_comparison, knownfailureif
from matplotlib.cbook import MatplotlibDeprecationWarning
with warnings.catch_warnings():
# the module is deprecated. The tests should be removed when the module is.
warnings.simplefilter('ignore', MatplotlibDeprecationWarning)
from matplotlib.delaunay.triangulate import Triangulation
from matplotlib import pyplot as plt
import matplotlib as mpl
def constant(x, y):
return np.ones(x.shape, x.dtype)
constant.title = 'Constant'
def xramp(x, y):
return x
xramp.title = 'X Ramp'
def yramp(x, y):
return y
yramp.title = 'Y Ramp'
def exponential(x, y):
x = x*9
y = y*9
x1 = x+1.0
x2 = x-2.0
x4 = x-4.0
x7 = x-7.0
y1 = x+1.0
y2 = y-2.0
y3 = y-3.0
y7 = y-7.0
f = (0.75 * np.exp(-(x2*x2+y2*y2)/4.0) +
0.75 * np.exp(-x1*x1/49.0 - y1/10.0) +
0.5 * np.exp(-(x7*x7 + y3*y3)/4.0) -
0.2 * np.exp(-x4*x4 -y7*y7))
return f
exponential.title = 'Exponential and Some Gaussians'
def cliff(x, y):
f = np.tanh(9.0*(y-x) + 1.0)/9.0
return f
cliff.title = 'Cliff'
def saddle(x, y):
f = (1.25 + np.cos(5.4*y))/(6.0 + 6.0*(3*x-1.0)**2)
return f
saddle.title = 'Saddle'
def gentle(x, y):
f = np.exp(-5.0625*((x-0.5)**2+(y-0.5)**2))/3.0
return f
gentle.title = 'Gentle Peak'
def steep(x, y):
f = np.exp(-20.25*((x-0.5)**2+(y-0.5)**2))/3.0
return f
steep.title = 'Steep Peak'
def sphere(x, y):
circle = 64-81*((x-0.5)**2 + (y-0.5)**2)
f = np.where(circle >= 0, np.sqrt(np.clip(circle,0,100)) - 0.5, 0.0)
return f
sphere.title = 'Sphere'
def trig(x, y):
f = 2.0*np.cos(10.0*x)*np.sin(10.0*y) + np.sin(10.0*x*y)
return f
trig.title = 'Cosines and Sines'
def gauss(x, y):
x = 5.0-10.0*x
y = 5.0-10.0*y
g1 = np.exp(-x*x/2)
g2 = np.exp(-y*y/2)
f = g1 + 0.75*g2*(1 + g1)
return f
gauss.title = 'Gaussian Peak and Gaussian Ridges'
def cloverleaf(x, y):
ex = np.exp((10.0-20.0*x)/3.0)
ey = np.exp((10.0-20.0*y)/3.0)
logitx = 1.0/(1.0+ex)
logity = 1.0/(1.0+ey)
f = (((20.0/3.0)**3 * ex*ey)**2 * (logitx*logity)**5 *
(ex-2.0*logitx)*(ey-2.0*logity))
return f
cloverleaf.title = 'Cloverleaf'
def cosine_peak(x, y):
circle = np.hypot(80*x-40.0, 90*y-45.)
f = np.exp(-0.04*circle) * np.cos(0.15*circle)
return f
cosine_peak.title = 'Cosine Peak'
allfuncs = [exponential, cliff, saddle, gentle, steep, sphere, trig, gauss, cloverleaf, cosine_peak]
class LinearTester(object):
name = 'Linear'
def __init__(self, xrange=(0.0, 1.0), yrange=(0.0, 1.0), nrange=101, npoints=250):
self.xrange = xrange
self.yrange = yrange
self.nrange = nrange
self.npoints = npoints
rng = np.random.RandomState(1234567890)
self.x = rng.uniform(xrange[0], xrange[1], size=npoints)
self.y = rng.uniform(yrange[0], yrange[1], size=npoints)
self.tri = Triangulation(self.x, self.y)
def replace_data(self, dataset):
self.x = dataset.x
self.y = dataset.y
self.tri = Triangulation(self.x, self.y)
def interpolator(self, func):
z = func(self.x, self.y)
return self.tri.linear_extrapolator(z, bbox=self.xrange+self.yrange)
def plot(self, func, interp=True, plotter='imshow'):
if interp:
lpi = self.interpolator(func)
z = lpi[self.yrange[0]:self.yrange[1]:complex(0,self.nrange),
self.xrange[0]:self.xrange[1]:complex(0,self.nrange)]
else:
y, x = np.mgrid[self.yrange[0]:self.yrange[1]:complex(0,self.nrange),
self.xrange[0]:self.xrange[1]:complex(0,self.nrange)]
z = func(x, y)
z = np.where(np.isinf(z), 0.0, z)
extent = (self.xrange[0], self.xrange[1],
self.yrange[0], self.yrange[1])
fig = plt.figure()
plt.hot() # Some like it hot
if plotter == 'imshow':
plt.imshow(np.nan_to_num(z), interpolation='nearest', extent=extent, origin='lower')
elif plotter == 'contour':
Y, X = np.ogrid[self.yrange[0]:self.yrange[1]:complex(0,self.nrange),
self.xrange[0]:self.xrange[1]:complex(0,self.nrange)]
plt.contour(np.ravel(X), np.ravel(Y), z, 20)
x = self.x
y = self.y
lc = mpl.collections.LineCollection(np.array([((x[i], y[i]), (x[j], y[j]))
for i, j in self.tri.edge_db]), colors=[(0,0,0,0.2)])
ax = plt.gca()
ax.add_collection(lc)
if interp:
title = '%s Interpolant' % self.name
else:
title = 'Reference'
if hasattr(func, 'title'):
plt.title('%s: %s' % (func.title, title))
else:
plt.title(title)
class NNTester(LinearTester):
name = 'Natural Neighbors'
def interpolator(self, func):
z = func(self.x, self.y)
return self.tri.nn_extrapolator(z, bbox=self.xrange+self.yrange)
def make_all_2d_testfuncs(allfuncs=allfuncs):
def make_test(func):
filenames = [
'%s-%s' % (func.__name__, x) for x in
['ref-img', 'nn-img', 'lin-img', 'ref-con', 'nn-con', 'lin-con']]
# We only generate PNGs to save disk space -- we just assume
# that any backend differences are caught by other tests.
@image_comparison(filenames, extensions=['png'],
freetype_version=('2.4.5', '2.4.9'),
remove_text=True)
def reference_test():
nnt.plot(func, interp=False, plotter='imshow')
nnt.plot(func, interp=True, plotter='imshow')
lpt.plot(func, interp=True, plotter='imshow')
nnt.plot(func, interp=False, plotter='contour')
nnt.plot(func, interp=True, plotter='contour')
lpt.plot(func, interp=True, plotter='contour')
tester = reference_test
tester.__name__ = str('test_%s' % func.__name__)
return tester
nnt = NNTester(npoints=1000)
lpt = LinearTester(npoints=1000)
for func in allfuncs:
globals()['test_%s' % func.__name__] = make_test(func)
make_all_2d_testfuncs()
# 1d and 0d grid tests
ref_interpolator = Triangulation([0,10,10,0],
[0,0,10,10]).linear_interpolator([1,10,5,2.0])
def test_1d_grid():
res = ref_interpolator[3:6:2j,1:1:1j]
assert np.allclose(res, [[1.6],[1.9]], rtol=0)
def test_0d_grid():
res = ref_interpolator[3:3:1j,1:1:1j]
assert np.allclose(res, [[1.6]], rtol=0)
@image_comparison(baseline_images=['delaunay-1d-interp'], extensions=['png'])
def test_1d_plots():
x_range = slice(0.25,9.75,20j)
x = np.mgrid[x_range]
ax = plt.gca()
for y in xrange(2,10,2):
plt.plot(x, ref_interpolator[x_range,y:y:1j])
ax.set_xticks([])
ax.set_yticks([])
| lgpl-3.0 |
yashsavani/rechorder | makeTrainingSet.py | 1 | 3651 | #!/usr/bin/python
import util
import chordKMeans
import sys
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
import pylab
import random
from sklearn import svm
from operator import itemgetter
'''This file generates a training set to be used in our SVM,
consisting of a list of previous-n -> current cluster pairings.
arguments:
- a sequence of songs
'''
N_PREVIOUS_BARS = 5
kMeans = 7
BEATS_PER_BAR = 4
def generate_training_set(n_previous_bars, k_means, beats_per_bar, midiFiles, centroidVectors):
#centroidVectors, all_classifications = chordKMeans.getFeatureCentroids(midiFiles, numCentroids=kMeans, beatsPerBar=BEATS_PER_BAR)
#centroidVectors = motif.readCentroids(cluster_centroids_file)
classification_sequences = []
for midiFile in midiFiles:
barLists = util.getNGramBarList(midiFile, n=beats_per_bar)
bestBarList = barLists[0]
this_sequence =[] # The sequence of cluster numbers for the current song
for i, bar in enumerate(bestBarList):
closestCentroid = chordKMeans.getClosestCentroidFromVector(centroidVectors, bar.getKMeansFeatures())
this_sequence.append(closestCentroid)
classification_sequences.append(this_sequence)
output = []
for sequence in classification_sequences:
# want to cut up sequence into pieces of size n_previous_bars + 1
for i in range(0, len(sequence) - n_previous_bars):
history_numbers = sequence[i:i+n_previous_bars]
current = sequence[i + n_previous_bars]
history = []
for j in range(n_previous_bars):
history.extend([ int(history_numbers[j] == k) for k in range(k_means) ])
# history is just ones and zeros
output.append([history, current])
'''for history, current in output:
print history, ":", current'''
lin_clf = svm.LinearSVC()
xy = zip(*output)
return xy
def argmax(l):
index, value = max(enumerate(l), key = itemgetter(1))
return index
def get_decision_function(xy, accuracy = None):
lin_clf = svm.LinearSVC()
lin_clf.fit(xy[0], xy[1])
def decision(featureTable):
return [argmax(confidence) for confidence in lin_clf.decision_function(featureTable)]
predicted_y = decision(xy[0])
n_correct = 0
n_wrong = 0
#print predicted_y
#print xy[1]
for prediction, actual in zip(predicted_y, xy[1]):
if prediction == actual:
n_correct+=1
else:
n_wrong+=1
print '****-----------Results of testing on training set-----------'
print 'n_correct :', n_correct
print 'n_wrong :', n_wrong
print 'n_total :', len(xy[0])
print "accuracy: ", float(n_correct) / (n_correct + n_wrong)
if accuracy != None:
accuracy.append(float(n_correct) / (n_correct + n_wrong))
return decision
if __name__ == "__main__":
if len(sys.argv) < 2:
print "Please give me some MIDI files."
else:
midiFiles = sys.argv[1:]
centroidVectors, all_classifications = chordKMeans.getFeatureCentroids(midiFiles, numCentroids=kMeans, beatsPerBar=BEATS_PER_BAR)
xy = generate_training_set(N_PREVIOUS_BARS, kMeans, BEATS_PER_BAR, midiFiles, centroidVectors)
decision_function = get_decision_function(xy)
predicted_y = decision_function(xy[0])
n_correct = 0
n_wrong = 0
#print predicted_y
#print xy[1]
for prediction, actual in zip(predicted_y, xy[1]):
if prediction == actual:
n_correct+=1
else:
n_wrong+=1
print '****-----------Results of testing on training set-----------'
print 'n_correct :', n_correct
print 'n_wrong :', n_wrong
print 'n_total :', len(xy[0])
print "accuracy: ", float(n_correct) / (n_correct + n_wrong)
| mit |
knights-lab/SHOGUN | docs/shear_results_fix.py | 2 | 2157 | # usage: python me.py \
# alignment.burst.otu.txt db.tax sheared_bayes.txt
import os
import sys
import csv
import pandas as pd
import numpy as np
from scipy.sparse import csr_matrix
with open(sys.argv[1], 'r') as inf:
csv_inf = csv.reader(inf, delimiter="\t")
columns = next(csv_inf)
columns = dict(zip(columns[1:], range(len(columns))))
indptr = [0]
indices = np.array([], dtype=int)
data = np.array([], dtype=int)
names = []
for ix, row in enumerate(csv_inf):
if ix % 1000 == 0:
print(ix)
names.append(row[0])
np_row = np.array(row[1:], dtype=int)
temp_indx = [np_row > 0]
data = np.concatenate((data, np_row[temp_indx]))
indices = np.concatenate((indices, np.where(temp_indx)[1]))
indptr.append(indices.shape[0])
csr = csr_matrix((data, indices, indptr), dtype=int).T
with open(sys.argv[2]) as inf:
csv_inf = csv.reader(inf, delimiter='\t')
name2taxonomy = dict(csv_inf)
cols_tax = [name2taxonomy[name] for name in names]
rows_tax = [name2taxonomy[_.replace(".", "_", 1)] for _ in sorted(columns, key=columns.get)]
def index_lca(str1, str2):
for i, (s1, s2) in enumerate(zip(str1.split(';'), str2.split(';'))):
if s1 != s2:
return i
return 8
dat = np.zeros((len(rows_tax), 9), dtype=int)
for i, row_name in enumerate(rows_tax):
row = csr.getrow(i)
for j, indx in enumerate(row.indices):
dat[i, index_lca(rows_tax[i], cols_tax[indx])] += row.data[j]
print(str(dat[:, 0].sum()))
df = pd.DataFrame(dat, index=rows_tax)
df['sum'] = dat.sum(axis=1)
df.drop(0, axis=1, inplace=True)
df.to_csv(sys.argv[3], header=False, sep='\t')
uniqueness_rate_per_level = np.zeros(8, dtype=float)
for i in range(0, 8):
# Take the sum of those columns
num_hits = df.iloc[:, i].sum()
# Total number of possible hits
total_hits = df['sum'].sum()
# Uniqueness Rate
uniqueness_rate_per_level[i] = num_hits/total_hits
levels = ['kingdom', 'phylum', 'class', 'order', 'family', 'genus', 'species', 'strain']
list(zip(levels, uniqueness_rate_per_level))
print(uniqueness_rate_per_level.sum())
| agpl-3.0 |
BorisJeremic/Real-ESSI-Examples | analytic_solution/test_cases/Contact/Stress_Based_Contact_Verification/HardContact_NonLinHardSoftShear/Normal_Load/Sigma_n_1/Normal_Stress_Plot.py | 72 | 2800 | #!/usr/bin/python
import h5py
import matplotlib.pylab as plt
import matplotlib as mpl
import sys
import numpy as np;
import matplotlib;
import math;
from matplotlib.ticker import MaxNLocator
plt.rcParams.update({'font.size': 28})
# set tick width
mpl.rcParams['xtick.major.size'] = 10
mpl.rcParams['xtick.major.width'] = 5
mpl.rcParams['xtick.minor.size'] = 10
mpl.rcParams['xtick.minor.width'] = 5
plt.rcParams['xtick.labelsize']=24
mpl.rcParams['ytick.major.size'] = 10
mpl.rcParams['ytick.major.width'] = 5
mpl.rcParams['ytick.minor.size'] = 10
mpl.rcParams['ytick.minor.width'] = 5
plt.rcParams['ytick.labelsize']=24
###############################################################
## Analytical Solution
###############################################################
# Go over each feioutput and plot each one.
thefile = "Analytical_Solution_Normal_Stress.feioutput";
finput = h5py.File(thefile)
# Read the time and displacement
times = finput["time"][:]
normal_stress = -finput["/Model/Elements/Element_Outputs"][9,:];
normal_strain = -finput["/Model/Elements/Element_Outputs"][6,:];
# Configure the figure filename, according to the input filename.
outfig=thefile.replace("_","-")
outfigname=outfig.replace("h5.feioutput","pdf")
# Plot the figure. Add labels and titles.
plt.figure(figsize=(12,10))
plt.plot(normal_strain*100,normal_stress/1000,'-r',label='Analytical Solution', Linewidth=4, markersize=20)
plt.xlabel(r"Interface Type #")
plt.ylabel(r"Normal Stress $\sigma_n [kPa]$")
plt.hold(True)
###############################################################
## Numerical Solution
###############################################################
# Go over each feioutput and plot each one.
thefile = "Monotonic_Contact_Behaviour_Adding_Normal_Load.h5.feioutput";
finput = h5py.File(thefile)
# Read the time and displacement
times = finput["time"][:]
normal_stress = -finput["/Model/Elements/Element_Outputs"][9,:];
normal_strain = -finput["/Model/Elements/Element_Outputs"][6,:];
# Configure the figure filename, according to the input filename.
outfig=thefile.replace("_","-")
outfigname=outfig.replace("h5.feioutput","pdf")
# Plot the figure. Add labels and titles.
plt.plot(normal_strain*100,normal_stress/1000,'-k',label='Numerical Solution', Linewidth=4, markersize=20)
plt.xlabel(r"Normal Strain [%]")
plt.ylabel(r"Normal Stress $\sigma_n [kPa]$")
#############################################################
# # # axes = plt.gca()
# # # axes.set_xlim([-7,7])
# # # axes.set_ylim([-1,1])
# outfigname = "Interface_Test_Normal_Stress.pdf";
# plt.axis([0, 5.5, 90, 101])
# legend = plt.legend()
# legend.get_frame().set_linewidth(0.0)
# legend.get_frame().set_facecolor('none')
plt.legend()
plt.savefig('Normal_Stress.pdf', bbox_inches='tight')
# plt.show()
| cc0-1.0 |
mahak/spark | python/pyspark/pandas/tests/plot/test_frame_plot.py | 15 | 4733 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import pandas as pd
import numpy as np
from pyspark import pandas as ps
from pyspark.pandas.config import set_option, reset_option, option_context
from pyspark.pandas.plot import TopNPlotBase, SampledPlotBase, HistogramPlotBase
from pyspark.pandas.exceptions import PandasNotImplementedError
from pyspark.testing.pandasutils import PandasOnSparkTestCase
class DataFramePlotTest(PandasOnSparkTestCase):
@classmethod
def setUpClass(cls):
super().setUpClass()
set_option("plotting.max_rows", 2000)
set_option("plotting.sample_ratio", None)
@classmethod
def tearDownClass(cls):
reset_option("plotting.max_rows")
reset_option("plotting.sample_ratio")
super().tearDownClass()
def test_missing(self):
psdf = ps.DataFrame(np.random.rand(2500, 4), columns=["a", "b", "c", "d"])
unsupported_functions = ["box", "hexbin"]
for name in unsupported_functions:
with self.assertRaisesRegex(
PandasNotImplementedError, "method.*DataFrame.*{}.*not implemented".format(name)
):
getattr(psdf.plot, name)()
def test_topn_max_rows(self):
pdf = pd.DataFrame(np.random.rand(2500, 4), columns=["a", "b", "c", "d"])
psdf = ps.from_pandas(pdf)
data = TopNPlotBase().get_top_n(psdf)
self.assertEqual(len(data), 2000)
def test_sampled_plot_with_ratio(self):
with option_context("plotting.sample_ratio", 0.5):
pdf = pd.DataFrame(np.random.rand(2500, 4), columns=["a", "b", "c", "d"])
psdf = ps.from_pandas(pdf)
data = SampledPlotBase().get_sampled(psdf)
self.assertEqual(round(len(data) / 2500, 1), 0.5)
def test_sampled_plot_with_max_rows(self):
# 'plotting.max_rows' is 2000
pdf = pd.DataFrame(np.random.rand(2000, 4), columns=["a", "b", "c", "d"])
psdf = ps.from_pandas(pdf)
data = SampledPlotBase().get_sampled(psdf)
self.assertEqual(round(len(data) / 2000, 1), 1)
def test_compute_hist_single_column(self):
psdf = ps.DataFrame(
{"a": [1, 2, 3, 4, 5, 6, 7, 8, 9, 15, 50]}, index=[0, 1, 3, 5, 6, 8, 9, 9, 9, 10, 10]
)
expected_bins = np.linspace(1, 50, 11)
bins = HistogramPlotBase.get_bins(psdf[["a"]].to_spark(), 10)
expected_histogram = np.array([5, 4, 1, 0, 0, 0, 0, 0, 0, 1])
histogram = HistogramPlotBase.compute_hist(psdf[["a"]], bins)[0]
self.assert_eq(pd.Series(expected_bins), pd.Series(bins))
self.assert_eq(pd.Series(expected_histogram, name="a"), histogram, almost=True)
def test_compute_hist_multi_columns(self):
expected_bins = np.linspace(1, 50, 11)
psdf = ps.DataFrame(
{
"a": [1, 2, 3, 4, 5, 6, 7, 8, 9, 15, 50],
"b": [50, 50, 30, 30, 30, 24, 10, 5, 4, 3, 1],
}
)
bins = HistogramPlotBase.get_bins(psdf.to_spark(), 10)
self.assert_eq(pd.Series(expected_bins), pd.Series(bins))
expected_histograms = [
np.array([5, 4, 1, 0, 0, 0, 0, 0, 0, 1]),
np.array([4, 1, 0, 0, 1, 3, 0, 0, 0, 2]),
]
histograms = HistogramPlotBase.compute_hist(psdf, bins)
expected_names = ["a", "b"]
for histogram, expected_histogram, expected_name in zip(
histograms, expected_histograms, expected_names
):
self.assert_eq(
pd.Series(expected_histogram, name=expected_name), histogram, almost=True
)
if __name__ == "__main__":
import unittest
from pyspark.pandas.tests.plot.test_frame_plot import * # noqa: F401
try:
import xmlrunner # type: ignore[import]
testRunner = xmlrunner.XMLTestRunner(output="target/test-reports", verbosity=2)
except ImportError:
testRunner = None
unittest.main(testRunner=testRunner, verbosity=2)
| apache-2.0 |
smartkit/COVITAS | V1.0_ngix:apache_mosquitto_RESTful_Solr_LIRE/octo-ninja/rest-pandas/OctoNinja/app/views.py | 1 | 1324 | # views.py
from rest_pandas import PandasView
from .models import TimeSeries
class TimeSeriesView(PandasView):
model = TimeSeries
# In response to get(), the underlying Django REST Framework ListAPIView
# will load the default queryset (self.model.objects.all()) and then pass
# it to the following function.
def filter_queryset(self, qs):
# At this point, you can filter queryset based on self.request or other
# settings (useful for limiting memory usage)
return qs
# Then, the included PandasSerializer will serialize the queryset into a
# simple list of dicts (using the DRF ModelSerializer). To customize
# which fields to include, subclass PandasSerializer and set the
# appropriate ModelSerializer options. Then, set the serializer_class
# property on the view to your PandasSerializer subclass.
# Next, the PandasSerializer will load the ModelSerializer result into a
# DataFrame and pass it to the following function on the view.
def transform_dataframe(self, dataframe):
# Here you can transform the dataframe based on self.request
# (useful for pivoting or computing statistics)
return dataframe
# Finally, the included Renderers will process the dataframe into one of
# the output formats below. | unlicense |
bhillmann/gingivere | tests/test_lr.py | 2 | 1117 | from sklearn.linear_model import LinearRegression
from sklearn.cross_validation import StratifiedKFold
import numpy as np
from sklearn.metrics import classification_report
from sklearn.metrics import roc_auc_score
from tests import shelve_api
XX, yy = shelve_api.load('lr')
X = XX[2700:]
y = yy[2700:]
clf = LinearRegression(normalize=True)
skf = StratifiedKFold(y, n_folds=2)
for train_index, test_index in skf:
print("Detailed classification report:")
print()
print("The model is trained on the full development set.")
print("The scores are computed on the full evaluation set.")
print()
X_train, X_test = X[train_index], X[test_index]
y_train, y_test = y[train_index], y[test_index]
# clf.fit(X_train, y_train)
y_true, y_pred = y_test, clf.predict(X_test)
for i, num in enumerate(y_pred):
if num < 0.0:
y_pred[i] = 0.0
continue
elif num > 1.0:
y_pred[i] = 1.0
continue
print(classification_report(np.around(y_true), np.around(y_pred)))
print()
print(roc_auc_score(y_true, y_pred))
print()
| mit |
tosolveit/scikit-learn | examples/model_selection/randomized_search.py | 201 | 3214 | """
=========================================================================
Comparing randomized search and grid search for hyperparameter estimation
=========================================================================
Compare randomized search and grid search for optimizing hyperparameters of a
random forest.
All parameters that influence the learning are searched simultaneously
(except for the number of estimators, which poses a time / quality tradeoff).
The randomized search and the grid search explore exactly the same space of
parameters. The result in parameter settings is quite similar, while the run
time for randomized search is drastically lower.
The performance is slightly worse for the randomized search, though this
is most likely a noise effect and would not carry over to a held-out test set.
Note that in practice, one would not search over this many different parameters
simultaneously using grid search, but pick only the ones deemed most important.
"""
print(__doc__)
import numpy as np
from time import time
from operator import itemgetter
from scipy.stats import randint as sp_randint
from sklearn.grid_search import GridSearchCV, RandomizedSearchCV
from sklearn.datasets import load_digits
from sklearn.ensemble import RandomForestClassifier
# get some data
digits = load_digits()
X, y = digits.data, digits.target
# build a classifier
clf = RandomForestClassifier(n_estimators=20)
# Utility function to report best scores
def report(grid_scores, n_top=3):
top_scores = sorted(grid_scores, key=itemgetter(1), reverse=True)[:n_top]
for i, score in enumerate(top_scores):
print("Model with rank: {0}".format(i + 1))
print("Mean validation score: {0:.3f} (std: {1:.3f})".format(
score.mean_validation_score,
np.std(score.cv_validation_scores)))
print("Parameters: {0}".format(score.parameters))
print("")
# specify parameters and distributions to sample from
param_dist = {"max_depth": [3, None],
"max_features": sp_randint(1, 11),
"min_samples_split": sp_randint(1, 11),
"min_samples_leaf": sp_randint(1, 11),
"bootstrap": [True, False],
"criterion": ["gini", "entropy"]}
# run randomized search
n_iter_search = 20
random_search = RandomizedSearchCV(clf, param_distributions=param_dist,
n_iter=n_iter_search)
start = time()
random_search.fit(X, y)
print("RandomizedSearchCV took %.2f seconds for %d candidates"
" parameter settings." % ((time() - start), n_iter_search))
report(random_search.grid_scores_)
# use a full grid over all parameters
param_grid = {"max_depth": [3, None],
"max_features": [1, 3, 10],
"min_samples_split": [1, 3, 10],
"min_samples_leaf": [1, 3, 10],
"bootstrap": [True, False],
"criterion": ["gini", "entropy"]}
# run grid search
grid_search = GridSearchCV(clf, param_grid=param_grid)
start = time()
grid_search.fit(X, y)
print("GridSearchCV took %.2f seconds for %d candidate parameter settings."
% (time() - start, len(grid_search.grid_scores_)))
report(grid_search.grid_scores_)
| bsd-3-clause |
equialgo/scikit-learn | sklearn/datasets/__init__.py | 5 | 3683 | """
The :mod:`sklearn.datasets` module includes utilities to load datasets,
including methods to load and fetch popular reference datasets. It also
features some artificial data generators.
"""
from .base import load_diabetes
from .base import load_digits
from .base import load_files
from .base import load_iris
from .base import load_breast_cancer
from .base import load_linnerud
from .base import load_boston
from .base import get_data_home
from .base import clear_data_home
from .base import load_sample_images
from .base import load_sample_image
from .covtype import fetch_covtype
from .kddcup99 import fetch_kddcup99
from .mlcomp import load_mlcomp
from .lfw import fetch_lfw_pairs
from .lfw import fetch_lfw_people
from .twenty_newsgroups import fetch_20newsgroups
from .twenty_newsgroups import fetch_20newsgroups_vectorized
from .mldata import fetch_mldata, mldata_filename
from .samples_generator import make_classification
from .samples_generator import make_multilabel_classification
from .samples_generator import make_hastie_10_2
from .samples_generator import make_regression
from .samples_generator import make_blobs
from .samples_generator import make_moons
from .samples_generator import make_circles
from .samples_generator import make_friedman1
from .samples_generator import make_friedman2
from .samples_generator import make_friedman3
from .samples_generator import make_low_rank_matrix
from .samples_generator import make_sparse_coded_signal
from .samples_generator import make_sparse_uncorrelated
from .samples_generator import make_spd_matrix
from .samples_generator import make_swiss_roll
from .samples_generator import make_s_curve
from .samples_generator import make_sparse_spd_matrix
from .samples_generator import make_gaussian_quantiles
from .samples_generator import make_biclusters
from .samples_generator import make_checkerboard
from .svmlight_format import load_svmlight_file
from .svmlight_format import load_svmlight_files
from .svmlight_format import dump_svmlight_file
from .olivetti_faces import fetch_olivetti_faces
from .species_distributions import fetch_species_distributions
from .california_housing import fetch_california_housing
from .rcv1 import fetch_rcv1
__all__ = ['clear_data_home',
'dump_svmlight_file',
'fetch_20newsgroups',
'fetch_20newsgroups_vectorized',
'fetch_lfw_pairs',
'fetch_lfw_people',
'fetch_mldata',
'fetch_olivetti_faces',
'fetch_species_distributions',
'fetch_california_housing',
'fetch_covtype',
'fetch_rcv1',
'fetch_kddcup99',
'get_data_home',
'load_boston',
'load_diabetes',
'load_digits',
'load_files',
'load_iris',
'load_breast_cancer',
'load_linnerud',
'load_mlcomp',
'load_sample_image',
'load_sample_images',
'load_svmlight_file',
'load_svmlight_files',
'make_biclusters',
'make_blobs',
'make_circles',
'make_classification',
'make_checkerboard',
'make_friedman1',
'make_friedman2',
'make_friedman3',
'make_gaussian_quantiles',
'make_hastie_10_2',
'make_low_rank_matrix',
'make_moons',
'make_multilabel_classification',
'make_regression',
'make_s_curve',
'make_sparse_coded_signal',
'make_sparse_spd_matrix',
'make_sparse_uncorrelated',
'make_spd_matrix',
'make_swiss_roll',
'mldata_filename']
| bsd-3-clause |
NunoEdgarGub1/scikit-learn | examples/datasets/plot_random_dataset.py | 348 | 2254 | """
==============================================
Plot randomly generated classification dataset
==============================================
Plot several randomly generated 2D classification datasets.
This example illustrates the :func:`datasets.make_classification`
:func:`datasets.make_blobs` and :func:`datasets.make_gaussian_quantiles`
functions.
For ``make_classification``, three binary and two multi-class classification
datasets are generated, with different numbers of informative features and
clusters per class. """
print(__doc__)
import matplotlib.pyplot as plt
from sklearn.datasets import make_classification
from sklearn.datasets import make_blobs
from sklearn.datasets import make_gaussian_quantiles
plt.figure(figsize=(8, 8))
plt.subplots_adjust(bottom=.05, top=.9, left=.05, right=.95)
plt.subplot(321)
plt.title("One informative feature, one cluster per class", fontsize='small')
X1, Y1 = make_classification(n_features=2, n_redundant=0, n_informative=1,
n_clusters_per_class=1)
plt.scatter(X1[:, 0], X1[:, 1], marker='o', c=Y1)
plt.subplot(322)
plt.title("Two informative features, one cluster per class", fontsize='small')
X1, Y1 = make_classification(n_features=2, n_redundant=0, n_informative=2,
n_clusters_per_class=1)
plt.scatter(X1[:, 0], X1[:, 1], marker='o', c=Y1)
plt.subplot(323)
plt.title("Two informative features, two clusters per class", fontsize='small')
X2, Y2 = make_classification(n_features=2, n_redundant=0, n_informative=2)
plt.scatter(X2[:, 0], X2[:, 1], marker='o', c=Y2)
plt.subplot(324)
plt.title("Multi-class, two informative features, one cluster",
fontsize='small')
X1, Y1 = make_classification(n_features=2, n_redundant=0, n_informative=2,
n_clusters_per_class=1, n_classes=3)
plt.scatter(X1[:, 0], X1[:, 1], marker='o', c=Y1)
plt.subplot(325)
plt.title("Three blobs", fontsize='small')
X1, Y1 = make_blobs(n_features=2, centers=3)
plt.scatter(X1[:, 0], X1[:, 1], marker='o', c=Y1)
plt.subplot(326)
plt.title("Gaussian divided into three quantiles", fontsize='small')
X1, Y1 = make_gaussian_quantiles(n_features=2, n_classes=3)
plt.scatter(X1[:, 0], X1[:, 1], marker='o', c=Y1)
plt.show()
| bsd-3-clause |
COSMOGRAIL/COSMOULINE | pipe/5_pymcs_psf_scripts/6b_handcheck_psf_NU.py | 1 | 7562 | from Tkinter import *
from tkMessageBox import *
#~ try:
#~ import ImageTk
#~ import Image
#~ except:
from PIL import ImageTk
from PIL import Image
execfile("../config.py")
from kirbybase import KirbyBase, KBError
from variousfct import *
from readandreplace_fct import *
# import matplotlib.pyplot as plt
import matplotlib
matplotlib.use("TkAgg")
from matplotlib import pyplot as plt
import star
import os
"""
You are looking at the most important script of all the pipe.
Be sure to have written the right path for your config.py in the execfile and the right psfkey in it.
There are 4 functions, one for each button. skip() writes in the skiplist the name of the image you are looking
at and then displays the next one. keep() and previous() simply show respectively the next or previous picture.
Only skip() and keep() have the ability to resize the picture if they are asked to, previous() doesn't need that
since the resize is permanent (So don't do the skiplist looking the picture backwards, or just decomment the
resizing part in this function). quit() checks if all the pictures have the same size, prints the number of the
last picture you saw and puts it in the skiplist, then it closes the window.
"""
db = KirbyBase()
if thisisatest:
print "This is a test run."
imagesdict = db.select(imgdb, ['gogogo','treatme','testlist'], [True, True, True], returnType='dict', sortFields=['setname', 'mjd'])
if update:
print "This is an update."
imagesdict = db.select(imgdb, ['gogogo','treatme','updating'], [True, True, True], returnType='dict', sortFields=['setname', 'mjd'])
plt.figure(figsize=(8,5))
plt.scatter(0,0, label='COUCOU')
plt.xlabel('COUCOU !', fontsize=15)
plt.ylabel('COUCOU !', fontsize=15)
plt.legend(loc=6, fontsize=50)
plt.suptitle('COUCOU !!')
plt.show()
notify(computer, withsound, "Hey Ho ! I need your input here. Look at the PSF of your new images and add the ones you don't like to your skiplist !")
else:
imagesdict = db.select(imgdb, ['gogogo','treatme'], [True, True], returnType='dict', sortFields=['setname', 'mjd'])
# As the map function didn't want to work, I made my ersatz.
images = []
for i,image in enumerate(imagesdict):
images.append(image['imgname'])
nb= raw_input("Which image do you want to start from ? (For the first image write 0 ...) ")
i = int(nb)
resize = raw_input("Do you want to resize ? (yes/no) This is useful if your PSF uses over 6 stars otherwise it just slows the program ")
#Dimension of the window if you decide to resize
if resize == 'yes':
dimension = raw_input("What dimension ?(1 for 1200/950, 2 for 1900/1150, 3 for 1600/968, 4 to enter your dimension")
if dimension == '1':
width = 1200
height = 950
elif dimension == '2':
width = 1900
height = 1150
elif dimension == '3':
width = 1600
height = 968
elif dimension == '4':
width = int(raw_input("Width ?"))
height = int(raw_input("Height ?"))
t = Tk()
if os.path.isfile(psfkicklist):
print "The psfkicklist already exists :"
else:
cmd = "touch " + psfkicklist
os.system(cmd)
print "I have just touched the psfkicklist for you :"
print psfkicklist
# Functions to increase or decrease i
def incri():
global i
i += 1
return i
def decri():
global i
i-= 1
return i
#Useful to see wether or not the picture you want to put in the skiplist is already there
skipl = open(psfkicklist, "r")
liste = skipl.read()
colonne = liste.split("\n")
skipl.close()
def skip():
global t
global image
global psfkicklist
global skiplist
global colonne
if askyesno("Confirmation of order", "Are you sure you want to put that image in the skiplist?"):
if (images[i] in colonne):
showwarning("Could you think before clicking ?" , "Already in the skiplist, I knew you had Alzheimer !")
else :
skiplist = open(psfkicklist, "a")
skiplist.write("\n" + images[i])
skiplist.close()
incri()
if resize == "yes":
im4= Image.open(workdir+'/'+psfkey+"_png/"+str(images[i])+".png")
im4=im4.resize((width,height), Image.ANTIALIAS)
im4.save(workdir+'/'+psfkey+"_png/"+str(images[i])+".png")
myimg = workdir+'/'+psfkey+"_png/"+images[i]+".png"
else:
myimg = workdir+'/'+psfkey+"_png/"+images[i]+".png"
new_photoimage = ImageTk.PhotoImage(file=myimg)
image = myimg
w.config(image = new_photoimage)
decompte.config(text = str(i) + '/'+ str(len(images)-1))
w.after(skip)
else:
showwarning("gaga","Stop bothering me ")
def keep():
global t
global image
incri()
if resize == "yes":
im3= Image.open(workdir+'/'+psfkey+"_png/"+str(images[i])+".png")
im3=im3.resize((width,height), Image.ANTIALIAS)
im3.save(workdir+'/'+psfkey+"_png/"+str(images[i])+".png")
myimg = workdir+'/'+psfkey+"_png/"+images[i]+".png"
else:
myimg = workdir+'/'+psfkey+"_png/"+images[i]+".png"
new_photoimage = ImageTk.PhotoImage(file=myimg)
image = myimg
w.config(image = new_photoimage)
decompte.config(text = str(i) + '/'+ str(len(images)-1))
w.after(keep)
def previous():
global t
global image
decri()
#if resize == "yes":
# im2= Image.open("/home/epfl/paic/Desktop/cosmouline/data/"+psfkey+"_png/"+str(images[i])+".png")
# im2=im2.resize((width,height), Image.ANTIALIAS)
# im2.save("/home/epfl/paic/Desktop/cosmouline/data/"+psfkey+"_png/"+str(images[i])+".png")
# myimg = "/home/epfl/paic/Desktop/cosmouline/data/"+psfkey+"_png/"+images[i]+".png"
#else:
myimg = workdir+'/'+psfkey+"_png/"+images[i]+".png"
new_photoimage = ImageTk.PhotoImage(file=myimg)
image = myimg
w.config(image = new_photoimage)
decompte.config(text = str(i) + '/'+ str(len(images)-1))
w.after(previous)
liste2 = [] # List of the sizes of all the images
liste3 = [] # List of the images that don't have the same size
def quit():
global t
global i
print "You stopped at the ", i, "th image, remember that if you want to come back ! (I wrote that in comment of your skiplist in case you have Alzheimer)"
skiplist = open(psfkicklist, "a")
skiplist.write("\n #" + str(i) )
skiplist.close()
for i,elem in enumerate(images):
im=Image.open(workdir+'/'+psfkey+'_png/'+str(images[i])+'.png')
liste2.append(im.size)
for i in range(1,783):
if liste2[i]!=liste2[0]:
liste3.append(images[i])
if liste3 == []:
print 'All the images have the same size : ' + str(liste2[0])
else:
print str(len(liste3))+" images do not have the same size as the first image ("+str(liste2[0])+"), here is the list :"
for i, elem in enumerate(liste3):
print str(elem)
t.destroy()
if resize == "yes":
im1= Image.open(workdir+'/'+psfkey+"_png/"+str(images[i])+".png")
im1=im1.resize((width,height), Image.ANTIALIAS)
im1.save(workdir+'/'+psfkey+"_png/"+str(images[i])+".png")
image = workdir+'/'+psfkey+"_png/"+str(images[i])+".png"
else:
image = workdir+'/'+psfkey+"_png/"+str(images[i])+".png"
frame = Frame(t)
frame.pack()
photoimage = ImageTk.PhotoImage(file=image)
w = Label(t, image = photoimage)
w.pack()
bouton1 = Button(t, text ='Skiplist and next', command=skip)
bouton2 = Button(t, text ='Keep it and go next', command=keep)
bouton3 = Button(t, text = 'Previous', command=previous)
bouton4 = Button(t, text = 'Quit ', command = quit)
decompte = Label(t, text = str(i) + '/' + str(len(images)-1))
bouton1.pack()
bouton2.pack()
bouton3.pack()
bouton4.pack()
decompte.pack()
bouton1.place(relx = 0.45, rely = 0.01)
bouton2.place(relx = 0.6, rely = 0.01)
bouton3.place(relx = 0.3, rely = 0.01)
bouton4.place(relx = 0.85, rely = 0.01)
decompte.place(relx = 0.1, rely = 0.01)
t.mainloop()
| gpl-3.0 |
mblondel/scikit-learn | sklearn/gaussian_process/tests/test_gaussian_process.py | 7 | 6830 | """
Testing for Gaussian Process module (sklearn.gaussian_process)
"""
# Author: Vincent Dubourg <vincent.dubourg@gmail.com>
# Licence: BSD 3 clause
from nose.tools import raises
from nose.tools import assert_true
import numpy as np
from sklearn.gaussian_process import GaussianProcess
from sklearn.gaussian_process import regression_models as regression
from sklearn.gaussian_process import correlation_models as correlation
from sklearn.datasets import make_regression
from sklearn.utils.testing import assert_greater
f = lambda x: x * np.sin(x)
X = np.atleast_2d([1., 3., 5., 6., 7., 8.]).T
X2 = np.atleast_2d([2., 4., 5.5, 6.5, 7.5]).T
y = f(X).ravel()
def test_1d(regr=regression.constant, corr=correlation.squared_exponential,
random_start=10, beta0=None):
"""
MLE estimation of a one-dimensional Gaussian Process model.
Check random start optimization.
Test the interpolating property.
"""
gp = GaussianProcess(regr=regr, corr=corr, beta0=beta0,
theta0=1e-2, thetaL=1e-4, thetaU=1e-1,
random_start=random_start, verbose=False).fit(X, y)
y_pred, MSE = gp.predict(X, eval_MSE=True)
y2_pred, MSE2 = gp.predict(X2, eval_MSE=True)
assert_true(np.allclose(y_pred, y) and np.allclose(MSE, 0.)
and np.allclose(MSE2, 0., atol=10))
def test_2d(regr=regression.constant, corr=correlation.squared_exponential,
random_start=10, beta0=None):
"""
MLE estimation of a two-dimensional Gaussian Process model accounting for
anisotropy. Check random start optimization.
Test the interpolating property.
"""
b, kappa, e = 5., .5, .1
g = lambda x: b - x[:, 1] - kappa * (x[:, 0] - e) ** 2.
X = np.array([[-4.61611719, -6.00099547],
[4.10469096, 5.32782448],
[0.00000000, -0.50000000],
[-6.17289014, -4.6984743],
[1.3109306, -6.93271427],
[-5.03823144, 3.10584743],
[-2.87600388, 6.74310541],
[5.21301203, 4.26386883]])
y = g(X).ravel()
thetaL = [1e-4] * 2
thetaU = [1e-1] * 2
gp = GaussianProcess(regr=regr, corr=corr, beta0=beta0,
theta0=[1e-2] * 2, thetaL=thetaL,
thetaU=thetaU,
random_start=random_start, verbose=False)
gp.fit(X, y)
y_pred, MSE = gp.predict(X, eval_MSE=True)
assert_true(np.allclose(y_pred, y) and np.allclose(MSE, 0.))
assert_true(np.all(gp.theta_ >= thetaL)) # Lower bounds of hyperparameters
assert_true(np.all(gp.theta_ <= thetaU)) # Upper bounds of hyperparameters
def test_2d_2d(regr=regression.constant, corr=correlation.squared_exponential,
random_start=10, beta0=None):
"""
MLE estimation of a two-dimensional Gaussian Process model accounting for
anisotropy. Check random start optimization.
Test the GP interpolation for 2D output
"""
b, kappa, e = 5., .5, .1
g = lambda x: b - x[:, 1] - kappa * (x[:, 0] - e) ** 2.
f = lambda x: np.vstack((g(x), g(x))).T
X = np.array([[-4.61611719, -6.00099547],
[4.10469096, 5.32782448],
[0.00000000, -0.50000000],
[-6.17289014, -4.6984743],
[1.3109306, -6.93271427],
[-5.03823144, 3.10584743],
[-2.87600388, 6.74310541],
[5.21301203, 4.26386883]])
y = f(X)
gp = GaussianProcess(regr=regr, corr=corr, beta0=beta0,
theta0=[1e-2] * 2, thetaL=[1e-4] * 2,
thetaU=[1e-1] * 2,
random_start=random_start, verbose=False)
gp.fit(X, y)
y_pred, MSE = gp.predict(X, eval_MSE=True)
assert_true(np.allclose(y_pred, y) and np.allclose(MSE, 0.))
@raises(ValueError)
def test_wrong_number_of_outputs():
gp = GaussianProcess()
gp.fit([[1, 2, 3], [4, 5, 6]], [1, 2, 3])
def test_more_builtin_correlation_models(random_start=1):
"""
Repeat test_1d and test_2d for several built-in correlation
models specified as strings.
"""
all_corr = ['absolute_exponential', 'squared_exponential', 'cubic',
'linear']
for corr in all_corr:
test_1d(regr='constant', corr=corr, random_start=random_start)
test_2d(regr='constant', corr=corr, random_start=random_start)
test_2d_2d(regr='constant', corr=corr, random_start=random_start)
def test_ordinary_kriging():
"""
Repeat test_1d and test_2d with given regression weights (beta0) for
different regression models (Ordinary Kriging).
"""
test_1d(regr='linear', beta0=[0., 0.5])
test_1d(regr='quadratic', beta0=[0., 0.5, 0.5])
test_2d(regr='linear', beta0=[0., 0.5, 0.5])
test_2d(regr='quadratic', beta0=[0., 0.5, 0.5, 0.5, 0.5, 0.5])
test_2d_2d(regr='linear', beta0=[0., 0.5, 0.5])
test_2d_2d(regr='quadratic', beta0=[0., 0.5, 0.5, 0.5, 0.5, 0.5])
def test_no_normalize():
gp = GaussianProcess(normalize=False).fit(X, y)
y_pred = gp.predict(X)
assert_true(np.allclose(y_pred, y))
def test_random_starts():
"""
Test that an increasing number of random-starts of GP fitting only
increases the reduced likelihood function of the optimal theta.
"""
n_samples, n_features = 50, 3
np.random.seed(0)
rng = np.random.RandomState(0)
X = rng.randn(n_samples, n_features) * 2 - 1
y = np.sin(X).sum(axis=1) + np.sin(3 * X).sum(axis=1)
best_likelihood = -np.inf
for random_start in range(1, 5):
gp = GaussianProcess(regr="constant", corr="squared_exponential",
theta0=[1e-0] * n_features,
thetaL=[1e-4] * n_features,
thetaU=[1e+1] * n_features,
random_start=random_start, random_state=0,
verbose=False).fit(X, y)
rlf = gp.reduced_likelihood_function()[0]
assert_greater(rlf, best_likelihood - np.finfo(np.float32).eps)
best_likelihood = rlf
def test_mse_solving():
# test the MSE estimate to be sane.
# non-regression test for ignoring off-diagonals of feature covariance,
# testing with nugget that renders covariance useless, only
# using the mean function, with low effective rank of data
gp = GaussianProcess(corr='absolute_exponential', theta0=1e-4,
thetaL=1e-12, thetaU=1e-2, nugget=1e-2,
optimizer='Welch', regr="linear", random_state=0)
X, y = make_regression(n_informative=3, n_features=60, noise=50,
random_state=0, effective_rank=1)
gp.fit(X, y)
assert_greater(1000, gp.predict(X, eval_MSE=True)[1].mean())
| bsd-3-clause |
rspavel/spack | var/spack/repos/builtin/packages/py-seaborn/package.py | 5 | 1184 | # Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class PySeaborn(PythonPackage):
"""Seaborn: statistical data visualization.
Seaborn is a library for making attractive and informative statistical
graphics in Python. It is built on top of matplotlib and tightly
integrated with the PyData stack, including support for numpy and pandas
data structures and statistical routines from scipy and statsmodels."""
homepage = "http://seaborn.pydata.org/"
url = "https://pypi.io/packages/source/s/seaborn/seaborn-0.7.1.tar.gz"
version('0.9.0', sha256='76c83f794ca320fb6b23a7c6192d5e185a5fcf4758966a0c0a54baee46d41e2f')
version('0.7.1', sha256='fa274344b1ee72f723bab751c40a5c671801d47a29ee9b5e69fcf63a18ce5c5d')
depends_on('py-setuptools', type='build')
depends_on('py-numpy', type=('build', 'run'))
depends_on('py-scipy', type=('build', 'run'))
depends_on('py-matplotlib', type=('build', 'run'))
depends_on('py-pandas', type=('build', 'run'))
| lgpl-2.1 |
KennethPierce/pylearnk | pylearn2/optimization/test_batch_gradient_descent.py | 5 | 6290 | from pylearn2.optimization.batch_gradient_descent import BatchGradientDescent
import theano.tensor as T
from pylearn2.utils import sharedX
import numpy as np
from theano import config
from theano.printing import min_informative_str
def test_batch_gradient_descent():
""" Verify that batch gradient descent works by checking that
it minimizes a quadratic function f(x) = x^T A x + b^T x + c
correctly for several sampled values of A, b, and c.
The ground truth minimizer is x = np.linalg.solve(A,-b)"""
n = 3
A = T.matrix(name = 'A')
b = T.vector(name = 'b')
c = T.scalar(name = 'c')
x = sharedX( np.zeros((n,)) , name = 'x')
half = np.cast[config.floatX](0.5)
obj = half * T.dot(T.dot(x,A),x)+T.dot(b,x)+c
minimizer = BatchGradientDescent(
objective = obj,
params = [ x],
inputs = [ A, b, c])
num_samples = 3
rng = np.random.RandomState([1,2,3])
for i in xrange(num_samples):
A = np.cast[config.floatX](rng.randn(1.5*n,n))
A = np.cast[config.floatX](np.dot(A.T,A))
A += np.cast[config.floatX](np.identity(n) * .02)
b = np.cast[config.floatX](rng.randn(n))
c = np.cast[config.floatX](rng.randn())
x.set_value(np.cast[config.floatX](rng.randn(n)))
analytical_x = np.linalg.solve(A,-b)
actual_obj = minimizer.minimize(A,b,c)
actual_x = x.get_value()
#Check that the value returned by the minimize method
#is the objective function value at the parameters
#chosen by the minimize method
cur_obj = minimizer.obj(A,b,c)
assert np.allclose(actual_obj, cur_obj)
x.set_value(analytical_x)
analytical_obj = minimizer.obj(A,b,c)
#make sure the objective function is accurate to first 4 digits
condition1 = not np.allclose(analytical_obj, actual_obj)
condition2 = np.abs(analytical_obj-actual_obj) >= 1e-4 * \
np.abs(analytical_obj)
if (config.floatX == 'float64' and condition1) \
or (config.floatX == 'float32' and condition2):
print 'objective function value came out wrong on sample ',i
print 'analytical obj', analytical_obj
print 'actual obj',actual_obj
"""
The following section of code was used to verify that numerical
error can make the objective function look non-convex
print 'Checking for numerically induced non-convex behavior'
def f(x):
return 0.5 * np.dot(x,np.dot(A,x)) + np.dot(b,x) + c
x.set_value(actual_x)
minimizer._compute_grad(A,b,c)
minimizer._normalize_grad()
d = minimizer.param_to_grad_shared[x].get_value()
x = actual_x.copy()
prev = f(x)
print prev
step_size = 1e-4
x += step_size * d
cur = f(x)
print cur
cur_sgn = np.sign(cur-prev)
flip_cnt = 0
for i in xrange(10000):
x += step_size * d
prev = cur
cur = f(x)
print cur
prev_sgn = cur_sgn
cur_sgn = np.sign(cur-prev)
if cur_sgn != prev_sgn:
print 'flip'
flip_cnt += 1
if flip_cnt > 1:
print "Non-convex!"
from matplotlib import pyplot as plt
y = []
x = actual_x.copy()
for j in xrange(10000):
y.append(f(x))
x += step_size * d
plt.plot(y)
plt.show()
assert False
print 'None found'
"""
#print 'actual x',actual_x
#print 'A:'
#print A
#print 'b:'
#print b
#print 'c:'
#print c
x.set_value(actual_x)
minimizer._compute_grad(A,b,c)
x_grad = minimizer.param_to_grad_shared[x]
actual_grad = x_grad.get_value()
correct_grad = 0.5 * np.dot(A,x.get_value())+ 0.5 * \
np.dot(A.T, x.get_value()) +b
if not np.allclose(actual_grad, correct_grad):
print 'gradient was wrong at convergence point'
print 'actual grad: '
print actual_grad
print 'correct grad: '
print correct_grad
print 'max difference: ',
np.abs(actual_grad-correct_grad).max()
assert False
minimizer._normalize_grad()
d = minimizer.param_to_grad_shared[x].get_value()
step_len = ( np.dot(b,d) + 0.5 * np.dot(d,np.dot(A,actual_x)) \
+ 0.5 * np.dot(actual_x,np.dot(A,d)) ) \
/ np.dot(d, np.dot(A,d))
g = np.dot(A,actual_x)+b
deriv = np.dot(g,d)
print 'directional deriv at actual', deriv
print 'optimal step_len', step_len
optimal_x = actual_x - d * step_len
g = np.dot(A,optimal_x) + b
deriv = np.dot(g,d)
print 'directional deriv at optimal: ',deriv
x.set_value(optimal_x)
print 'obj at optimal: ',minimizer.obj(A,b,c)
print 'eigenvalue range:'
val, vec = np.linalg.eig(A)
print (val.min(),val.max())
print 'condition number: ',(val.max()/val.min())
assert False
if __name__ == '__main__':
test_batch_gradient_descent()
| bsd-3-clause |
zackriegman/pydnn | examples/plankton/plankton.py | 1 | 11695 | __author__ = 'isaac'
from pydnn import neuralnet as nn
from pydnn import preprocess as pp
from pydnn import tools
from pydnn import data
from pydnn import img_util
import numpy as np
import pandas as pd
from scipy.misc import imread
import os
from os.path import join
import time
config = tools.load_config('PLANKTON_CONFIG', __file__, 'plankton.conf')['plankton']
train_set = data.DirectoryLabeledImageSet(config['input_train'], config['dtype'])
test_set = data.UnlabeledImageSet(config['input_test'])
def write_submission_csv_file(file_name, probs, image_file_names):
import gzip
df = pd.DataFrame(data=probs, index=image_file_names, columns=train_set.get_labels())
df.index.name = 'image'
with gzip.open(file_name, 'w') as outFile:
df.to_csv(outFile)
def generate_submission_file(net, name, num=None):
if num is None:
num = 130400
if num < 130400:
batch_size = num
num_batches = 1
else:
batch_size = 16300
num_batches = 8
probabilities = []
files = []
dotter = tools.Dot()
print('generating probabilities...')
for i in range(num_batches):
fns, images, = test_set.build(i * batch_size,
(i + 1) * batch_size)
_, probs = net.predict({'images': images})
probabilities.append(probs)
files += fns
dotter.dot(str(i) + ' ')
dotter.stop()
probabilities = np.row_stack(probabilities)
print('writing csv file...')
write_submission_csv_file(name, probabilities, files)
def load_net_and_generate_submission_file(net_name, submission_name):
print('loading net')
net = nn.load(net_name)
generate_submission_file(net, submission_name)
# n = 'e0??'
# load_net_and_generate_submission_file(n + '_best_net.pkl', n + '_sub_best.csv.gz')
# load_net_and_generate_submission_file(n + '_final_net.pkl', n + '_sub_final.csv.gz')
def write_confusion_matrices_to_csv_files(experiment, num_images, matrices):
set_names = ['train', 'valid', 'test']
labels = train_set.get_labels()
files, given_labels = zip(*train_set.get_files(num_images))
for (matrix, mistakes), set_name in zip(matrices, set_names):
df = pd.DataFrame(matrix, index=labels, columns=labels)
df.to_csv(join(config['output'], experiment + '_conf_mtrx_' + set_name + '.csv'))
file_indices, right_indices, wrong_indices = zip(*mistakes)
file_names = [files[index] for index in file_indices]
right_labels = [given_labels[index] for index in file_indices]
wrong_labels = [labels[index] for index in wrong_indices]
df = pd.DataFrame({'wrong': wrong_labels, 'right': right_labels},
index=file_names)
df.to_csv(join(config['output'], experiment + '_mistakes_' + set_name + '.csv'))
def make_confusion_matrix_from_saved_network(e):
print('making confusion matrices...')
data = train_set.build(e.num_images)
data = pp.split_training_data(data, e.batch_size, e.train_pct, e.valid_pct, e.test_pct)
net = nn.load(join(config['input_post'], e.name + '_best_net.pkl'))
net.preprocessor.set_data(data)
write_confusion_matrices_to_csv_files(e.name, net.get_confusion_matrices())
print('...done making confusion matrices')
def analyze_confusion_matrix(matrix_file):
n = 121
rng = np.random.RandomState(123)
x = pd.read_csv(matrix_file, index_col=0)
data = np.index_exp[:n, :n]
x['total'] = x.iloc[data].sum(axis=1)
total_predictions = x['total'].sum()
values = x.iloc[data].values # values can sometimes return a copy
np.fill_diagonal(values, 0) # so must save, zero and reassign
x.iloc[data] = values # (I've discovered after some confusion)
x['bad'] = x.iloc[data].sum(axis=1)
total_bad = x['bad'].sum()
x['pct_bad'] = x['bad'] / x['total']
top_by_num = x.sort('total', ascending=False)[0:10].index.values
worst_by_num = x.sort('bad', ascending=False)[0:10].index.values
worst_by_num_ct = x.sort('bad', ascending=False)[0:10].values
worst_by_pct = x.sort('pct_bad', ascending=False)[0:10].index.values
worst_by_pct_ct = x.sort('pct_bad', ascending=False)[0:10].values
print("total predictions: {}".format(total_predictions))
print("total bad predictions: {}".format(total_bad))
print("most common classes (regardless of error rate): " + str(top_by_num))
def most_confused_with(c):
# get the row, and only the class values (not the generated columns)
row = x.loc[c]
row = row.iloc[:n]
row.sort(ascending=False)
last_non_zero = 10
# print(row.iloc[:10].values)
for index, z in enumerate(row.iloc[:last_non_zero].values):
if z <= 0:
last_non_zero = index
break
# return the top classes for the row
return zip(row.iloc[:last_non_zero].index.values, row.iloc[:last_non_zero].values)
def print_worst(classes):
for c in classes:
c_total = x.loc[c, 'total']
c_bad = x.loc[c, 'bad']
c_contribution_to_error = float(c_bad) / total_bad
c_fair_share_of_error = c_total / total_predictions
print('\nclass {}:'.format(c))
print('total predictions: {}'.format(c_total))
print('total bad predictions: {}'.format(c_bad))
print('fair share of error: {:.3%}'.format(c_fair_share_of_error))
print('contribution to error: {:.3%} ({:.3f} time fair share)'.format(
c_contribution_to_error, c_contribution_to_error / c_fair_share_of_error))
print('most often confused with' + str(most_confused_with(c)))
import matplotlib.pyplot as plt
import matplotlib.cm as cm
import matplotlib.image as mpimg
def show_worst(worst):
def add_row(directory, count, index):
first = True
for i in range(5):
sub = fig.add_subplot(11, 5, index)
fn = rng.choice(tools.get_files(join(config['input_train'], directory),
cache=True))
image = mpimg.imread(fn)
plt.imshow(image, interpolation='none', cmap=cm.Greys_r)
if first:
title = '{}: {} ({}x{})'.format(
directory, count, image.shape[0], image.shape[1])
first = False
else:
title = '({}x{})'.format(image.shape[0], image.shape[1])
sub.set_title(title, size=10)
sub.axis('off')
index += 1
return index
for c in worst:
fig = plt.figure()
pos = add_row(c, x.loc[c, 'bad'], 1)
for i, num in most_confused_with(c):
pos = add_row(i, num, pos)
plt.show()
print("---------- worst classes by number -----------")
print_worst(worst_by_num)
show_worst(worst_by_num)
print("---------- worst classes by percent ----------")
print_worst(worst_by_pct)
show_worst(worst_by_num)
print('might also be useful to look at the post transformed images to gain'
'insight into why the net is not able to recognize them well')
print('also remember to look at whether confusion is symmetrical (i.e. if A '
'is frequently confused for B, is B also frequently confused for A?)')
print('at some point might be worth looking at the specific image that were'
'incorrectly classified, but to begin with Im just looking for the most'
'important trends (classes with the most confusion) and individual images'
'shouldnt tell me too much')
def run_experiment(e):
print('############## {} ################'.format(e.name))
print('start time: ' + tools.now())
rng = np.random.RandomState(e.rng_seed)
data = train_set.build(e.num_images)
data = pp.split_training_data(data, e.batch_size, e.train_pct, e.valid_pct, e.test_pct)
if e.resizer == pp.StochasticStretchResizer:
resizer = e.resizer(rng, e.stochastic_stretch_range)
elif e.resizer in [pp.ThresholdBoxPreserveAspectRatioResizer,
pp.ThresholdBoxStretchResizer]:
resizer = e.resizer(e.box_threshold)
elif e.resizer in [pp.ContiguousBoxPreserveAspectRatioResizer,
pp.ContiguousBoxStretchResizer]:
resizer = e.resizer(e.contiguous_box_threshold)
else:
resizer = e.resizer()
preprocessor = e.preprocessor(data, e.image_shape, resizer, rng, config['dtype'])
net = nn.NN(preprocessor=preprocessor,
channel='images',
num_classes=121,
batch_size=e.batch_size,
rng=rng,
activation=e.activation,
name=e.name,
output_dir=config['output'])
e.build_net(net)
try:
net.train(
updater=e.learning_rule,
epochs=e.epochs,
final_epochs=e.final_epochs,
l1_reg=e.l1_reg,
l2_reg=e.l2_reg)
finally:
print('Experiment "{}" ended'.format(e.name))
print('generating probabilities based on final network...')
generate_submission_file(net,
join(config['output'], e.name + '_submission_final.csv.gz'),
e.num_submission_images)
net = nn.load(join(config['output'], e.name + '_best_net.pkl'))
print('generating probabilities based on best network...')
generate_submission_file(net,
join(config['output'], e.name + '_submission_best.csv.gz'),
e.num_submission_images)
print('generating and writing confusion matrix based on best network...')
net.preprocessor.set_data(data)
write_confusion_matrices_to_csv_files(e.name, e.num_images,
net.get_confusion_matrices())
print('end time: ' + tools.now())
return net
def average_submissions(in_files, weights=None):
import gzip
subs = []
for f in in_files:
print('loading ' + f)
with gzip.open(join(config['input_post'], f), 'r') as inFile:
subs.append(np.loadtxt(
fname=inFile,
dtype=config['dtype'],
delimiter=',',
skiprows=1,
usecols=range(1, 122)))
# avg = np.mean(subs, axis=0)
avg = np.average(subs, axis=0, weights=weights)
out_file = (join(config['input_post'], 'avg_probs_' +
time.strftime("%Y-%m-%d--%H-%M-%S") + '.csv.gz'))
print('saving...')
write_submission_csv_file(out_file, avg, test_set.get_files())
print('done')
def show_mistakes(mistakes_file):
mistakes = pd.read_csv(mistakes_file, index_col=0)
for index, row in mistakes.iterrows():
images = [imread(join(config['input_train'], row['right'],
os.path.basename(index)))]
right_images = np.random.choice(
tools.get_files(join(config['input_train'], row['right'])),
9, replace=False)
wrong_images = np.random.choice(
tools.get_files(join(config['input_train'], row['wrong'])),
10, replace=False)
images.extend([imread(fn) for fn in right_images])
images.extend([imread(fn) for fn in wrong_images])
print(os.path.basename(index), row['right'], row['wrong'])
img_util.show_images_as_tiles(images, size=(128, 128), canvas_dims=(4, 5))
#show_mistakes(SUBMISSION_DIR + '/results/e075_mistakes_valid.csv') | mit |
fspaolo/scikit-learn | sklearn/utils/tests/test_random.py | 20 | 3872 | from __future__ import division
import numpy as np
from scipy.misc import comb as combinations
from sklearn.utils.random import sample_without_replacement
from sklearn.utils.testing import (
assert_raises,
assert_equal,
assert_true)
###############################################################################
# test custom sampling without replacement algorithm
###############################################################################
def test_invalid_sample_without_replacement_algorithm():
assert_raises(ValueError, sample_without_replacement, 5, 4, "unknown")
def test_sample_without_replacement_algorithms():
methods = ("auto", "tracking_selection", "reservoir_sampling", "pool")
for m in methods:
def sample_without_replacement_method(n_population, n_samples,
random_state=None):
return sample_without_replacement(n_population, n_samples,
method=m,
random_state=random_state)
check_edge_case_of_sample_int(sample_without_replacement_method)
check_sample_int(sample_without_replacement_method)
check_sample_int_distribution(sample_without_replacement_method)
def check_edge_case_of_sample_int(sample_without_replacement):
# n_poluation < n_sample
assert_raises(ValueError, sample_without_replacement, 0, 1)
assert_raises(ValueError, sample_without_replacement, 1, 2)
# n_population == n_samples
assert_equal(sample_without_replacement(0, 0).shape, (0, ))
assert_equal(sample_without_replacement(1, 1).shape, (1, ))
# n_population >= n_samples
assert_equal(sample_without_replacement(5, 0).shape, (0, ))
assert_equal(sample_without_replacement(5, 1).shape, (1, ))
# n_population < 0 or n_samples < 0
assert_raises(ValueError, sample_without_replacement, -1, 5)
assert_raises(ValueError, sample_without_replacement, 5, -1)
def check_sample_int(sample_without_replacement):
# This test is heavily inspired from test_random.py of python-core.
#
# For the entire allowable range of 0 <= k <= N, validate that
# the sample is of the correct length and contains only unique items
n_population = 100
for n_samples in range(n_population + 1):
s = sample_without_replacement(n_population, n_samples)
assert_equal(len(s), n_samples)
unique = np.unique(s)
assert_equal(np.size(unique), n_samples)
assert_true(np.all(unique < n_population))
# test edge case n_population == n_samples == 0
assert_equal(np.size(sample_without_replacement(0, 0)), 0)
def check_sample_int_distribution(sample_without_replacement):
# This test is heavily inspired from test_random.py of python-core.
#
# For the entire allowable range of 0 <= k <= N, validate that
# sample generates all possible permutations
n_population = 10
# a large number of trials prevents false negatives without slowing normal
# case
n_trials = 10000
for n_samples in range(n_population):
# Counting the number of combinations is not as good as counting the
# the number of permutations. However, it works with sampling algorithm
# that does not provide a random permutation of the subset of integer.
n_expected = combinations(n_population, n_samples, exact=True)
output = {}
for i in range(n_trials):
output[frozenset(sample_without_replacement(n_population,
n_samples))] = None
if len(output) == n_expected:
break
else:
raise AssertionError(
"number of combinations != number of expected (%s != %s)" %
(len(output), n_expected))
| bsd-3-clause |
RayMick/scikit-learn | sklearn/cluster/tests/test_k_means.py | 63 | 26190 | """Testing for K-means"""
import sys
import numpy as np
from scipy import sparse as sp
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import SkipTest
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_raises_regexp
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import assert_warns
from sklearn.utils.testing import if_safe_multiprocessing_with_blas
from sklearn.utils.validation import DataConversionWarning
from sklearn.utils.extmath import row_norms
from sklearn.metrics.cluster import v_measure_score
from sklearn.cluster import KMeans, k_means
from sklearn.cluster import MiniBatchKMeans
from sklearn.cluster.k_means_ import _labels_inertia
from sklearn.cluster.k_means_ import _mini_batch_step
from sklearn.datasets.samples_generator import make_blobs
from sklearn.externals.six.moves import cStringIO as StringIO
# non centered, sparse centers to check the
centers = np.array([
[0.0, 5.0, 0.0, 0.0, 0.0],
[1.0, 1.0, 4.0, 0.0, 0.0],
[1.0, 0.0, 0.0, 5.0, 1.0],
])
n_samples = 100
n_clusters, n_features = centers.shape
X, true_labels = make_blobs(n_samples=n_samples, centers=centers,
cluster_std=1., random_state=42)
X_csr = sp.csr_matrix(X)
def test_kmeans_dtype():
rnd = np.random.RandomState(0)
X = rnd.normal(size=(40, 2))
X = (X * 10).astype(np.uint8)
km = KMeans(n_init=1).fit(X)
pred_x = assert_warns(DataConversionWarning, km.predict, X)
assert_array_equal(km.labels_, pred_x)
def test_labels_assignment_and_inertia():
# pure numpy implementation as easily auditable reference gold
# implementation
rng = np.random.RandomState(42)
noisy_centers = centers + rng.normal(size=centers.shape)
labels_gold = - np.ones(n_samples, dtype=np.int)
mindist = np.empty(n_samples)
mindist.fill(np.infty)
for center_id in range(n_clusters):
dist = np.sum((X - noisy_centers[center_id]) ** 2, axis=1)
labels_gold[dist < mindist] = center_id
mindist = np.minimum(dist, mindist)
inertia_gold = mindist.sum()
assert_true((mindist >= 0.0).all())
assert_true((labels_gold != -1).all())
# perform label assignment using the dense array input
x_squared_norms = (X ** 2).sum(axis=1)
labels_array, inertia_array = _labels_inertia(
X, x_squared_norms, noisy_centers)
assert_array_almost_equal(inertia_array, inertia_gold)
assert_array_equal(labels_array, labels_gold)
# perform label assignment using the sparse CSR input
x_squared_norms_from_csr = row_norms(X_csr, squared=True)
labels_csr, inertia_csr = _labels_inertia(
X_csr, x_squared_norms_from_csr, noisy_centers)
assert_array_almost_equal(inertia_csr, inertia_gold)
assert_array_equal(labels_csr, labels_gold)
def test_minibatch_update_consistency():
# Check that dense and sparse minibatch update give the same results
rng = np.random.RandomState(42)
old_centers = centers + rng.normal(size=centers.shape)
new_centers = old_centers.copy()
new_centers_csr = old_centers.copy()
counts = np.zeros(new_centers.shape[0], dtype=np.int32)
counts_csr = np.zeros(new_centers.shape[0], dtype=np.int32)
x_squared_norms = (X ** 2).sum(axis=1)
x_squared_norms_csr = row_norms(X_csr, squared=True)
buffer = np.zeros(centers.shape[1], dtype=np.double)
buffer_csr = np.zeros(centers.shape[1], dtype=np.double)
# extract a small minibatch
X_mb = X[:10]
X_mb_csr = X_csr[:10]
x_mb_squared_norms = x_squared_norms[:10]
x_mb_squared_norms_csr = x_squared_norms_csr[:10]
# step 1: compute the dense minibatch update
old_inertia, incremental_diff = _mini_batch_step(
X_mb, x_mb_squared_norms, new_centers, counts,
buffer, 1, None, random_reassign=False)
assert_greater(old_inertia, 0.0)
# compute the new inertia on the same batch to check that it decreased
labels, new_inertia = _labels_inertia(
X_mb, x_mb_squared_norms, new_centers)
assert_greater(new_inertia, 0.0)
assert_less(new_inertia, old_inertia)
# check that the incremental difference computation is matching the
# final observed value
effective_diff = np.sum((new_centers - old_centers) ** 2)
assert_almost_equal(incremental_diff, effective_diff)
# step 2: compute the sparse minibatch update
old_inertia_csr, incremental_diff_csr = _mini_batch_step(
X_mb_csr, x_mb_squared_norms_csr, new_centers_csr, counts_csr,
buffer_csr, 1, None, random_reassign=False)
assert_greater(old_inertia_csr, 0.0)
# compute the new inertia on the same batch to check that it decreased
labels_csr, new_inertia_csr = _labels_inertia(
X_mb_csr, x_mb_squared_norms_csr, new_centers_csr)
assert_greater(new_inertia_csr, 0.0)
assert_less(new_inertia_csr, old_inertia_csr)
# check that the incremental difference computation is matching the
# final observed value
effective_diff = np.sum((new_centers_csr - old_centers) ** 2)
assert_almost_equal(incremental_diff_csr, effective_diff)
# step 3: check that sparse and dense updates lead to the same results
assert_array_equal(labels, labels_csr)
assert_array_almost_equal(new_centers, new_centers_csr)
assert_almost_equal(incremental_diff, incremental_diff_csr)
assert_almost_equal(old_inertia, old_inertia_csr)
assert_almost_equal(new_inertia, new_inertia_csr)
def _check_fitted_model(km):
# check that the number of clusters centers and distinct labels match
# the expectation
centers = km.cluster_centers_
assert_equal(centers.shape, (n_clusters, n_features))
labels = km.labels_
assert_equal(np.unique(labels).shape[0], n_clusters)
# check that the labels assignment are perfect (up to a permutation)
assert_equal(v_measure_score(true_labels, labels), 1.0)
assert_greater(km.inertia_, 0.0)
# check error on dataset being too small
assert_raises(ValueError, km.fit, [[0., 1.]])
def test_k_means_plus_plus_init():
km = KMeans(init="k-means++", n_clusters=n_clusters,
random_state=42).fit(X)
_check_fitted_model(km)
def test_k_means_new_centers():
# Explore the part of the code where a new center is reassigned
X = np.array([[0, 0, 1, 1],
[0, 0, 0, 0],
[0, 1, 0, 0],
[0, 0, 0, 0],
[0, 0, 0, 0],
[0, 1, 0, 0]])
labels = [0, 1, 2, 1, 1, 2]
bad_centers = np.array([[+0, 1, 0, 0],
[.2, 0, .2, .2],
[+0, 0, 0, 0]])
km = KMeans(n_clusters=3, init=bad_centers, n_init=1, max_iter=10,
random_state=1)
for this_X in (X, sp.coo_matrix(X)):
km.fit(this_X)
this_labels = km.labels_
# Reorder the labels so that the first instance is in cluster 0,
# the second in cluster 1, ...
this_labels = np.unique(this_labels, return_index=True)[1][this_labels]
np.testing.assert_array_equal(this_labels, labels)
@if_safe_multiprocessing_with_blas
def test_k_means_plus_plus_init_2_jobs():
if sys.version_info[:2] < (3, 4):
raise SkipTest(
"Possible multi-process bug with some BLAS under Python < 3.4")
km = KMeans(init="k-means++", n_clusters=n_clusters, n_jobs=2,
random_state=42).fit(X)
_check_fitted_model(km)
def test_k_means_precompute_distances_flag():
# check that a warning is raised if the precompute_distances flag is not
# supported
km = KMeans(precompute_distances="wrong")
assert_raises(ValueError, km.fit, X)
def test_k_means_plus_plus_init_sparse():
km = KMeans(init="k-means++", n_clusters=n_clusters, random_state=42)
km.fit(X_csr)
_check_fitted_model(km)
def test_k_means_random_init():
km = KMeans(init="random", n_clusters=n_clusters, random_state=42)
km.fit(X)
_check_fitted_model(km)
def test_k_means_random_init_sparse():
km = KMeans(init="random", n_clusters=n_clusters, random_state=42)
km.fit(X_csr)
_check_fitted_model(km)
def test_k_means_plus_plus_init_not_precomputed():
km = KMeans(init="k-means++", n_clusters=n_clusters, random_state=42,
precompute_distances=False).fit(X)
_check_fitted_model(km)
def test_k_means_random_init_not_precomputed():
km = KMeans(init="random", n_clusters=n_clusters, random_state=42,
precompute_distances=False).fit(X)
_check_fitted_model(km)
def test_k_means_perfect_init():
km = KMeans(init=centers.copy(), n_clusters=n_clusters, random_state=42,
n_init=1)
km.fit(X)
_check_fitted_model(km)
def test_k_means_n_init():
rnd = np.random.RandomState(0)
X = rnd.normal(size=(40, 2))
# two regression tests on bad n_init argument
# previous bug: n_init <= 0 threw non-informative TypeError (#3858)
assert_raises_regexp(ValueError, "n_init", KMeans(n_init=0).fit, X)
assert_raises_regexp(ValueError, "n_init", KMeans(n_init=-1).fit, X)
def test_k_means_fortran_aligned_data():
# Check the KMeans will work well, even if X is a fortran-aligned data.
X = np.asfortranarray([[0, 0], [0, 1], [0, 1]])
centers = np.array([[0, 0], [0, 1]])
labels = np.array([0, 1, 1])
km = KMeans(n_init=1, init=centers, precompute_distances=False,
random_state=42)
km.fit(X)
assert_array_equal(km.cluster_centers_, centers)
assert_array_equal(km.labels_, labels)
def test_mb_k_means_plus_plus_init_dense_array():
mb_k_means = MiniBatchKMeans(init="k-means++", n_clusters=n_clusters,
random_state=42)
mb_k_means.fit(X)
_check_fitted_model(mb_k_means)
def test_mb_kmeans_verbose():
mb_k_means = MiniBatchKMeans(init="k-means++", n_clusters=n_clusters,
random_state=42, verbose=1)
old_stdout = sys.stdout
sys.stdout = StringIO()
try:
mb_k_means.fit(X)
finally:
sys.stdout = old_stdout
def test_mb_k_means_plus_plus_init_sparse_matrix():
mb_k_means = MiniBatchKMeans(init="k-means++", n_clusters=n_clusters,
random_state=42)
mb_k_means.fit(X_csr)
_check_fitted_model(mb_k_means)
def test_minibatch_init_with_large_k():
mb_k_means = MiniBatchKMeans(init='k-means++', init_size=10, n_clusters=20)
# Check that a warning is raised, as the number clusters is larger
# than the init_size
assert_warns(RuntimeWarning, mb_k_means.fit, X)
def test_minibatch_k_means_random_init_dense_array():
# increase n_init to make random init stable enough
mb_k_means = MiniBatchKMeans(init="random", n_clusters=n_clusters,
random_state=42, n_init=10).fit(X)
_check_fitted_model(mb_k_means)
def test_minibatch_k_means_random_init_sparse_csr():
# increase n_init to make random init stable enough
mb_k_means = MiniBatchKMeans(init="random", n_clusters=n_clusters,
random_state=42, n_init=10).fit(X_csr)
_check_fitted_model(mb_k_means)
def test_minibatch_k_means_perfect_init_dense_array():
mb_k_means = MiniBatchKMeans(init=centers.copy(), n_clusters=n_clusters,
random_state=42, n_init=1).fit(X)
_check_fitted_model(mb_k_means)
def test_minibatch_k_means_init_multiple_runs_with_explicit_centers():
mb_k_means = MiniBatchKMeans(init=centers.copy(), n_clusters=n_clusters,
random_state=42, n_init=10)
assert_warns(RuntimeWarning, mb_k_means.fit, X)
def test_minibatch_k_means_perfect_init_sparse_csr():
mb_k_means = MiniBatchKMeans(init=centers.copy(), n_clusters=n_clusters,
random_state=42, n_init=1).fit(X_csr)
_check_fitted_model(mb_k_means)
def test_minibatch_sensible_reassign_fit():
# check if identical initial clusters are reassigned
# also a regression test for when there are more desired reassignments than
# samples.
zeroed_X, true_labels = make_blobs(n_samples=100, centers=5,
cluster_std=1., random_state=42)
zeroed_X[::2, :] = 0
mb_k_means = MiniBatchKMeans(n_clusters=20, batch_size=10, random_state=42,
init="random")
mb_k_means.fit(zeroed_X)
# there should not be too many exact zero cluster centers
assert_greater(mb_k_means.cluster_centers_.any(axis=1).sum(), 10)
# do the same with batch-size > X.shape[0] (regression test)
mb_k_means = MiniBatchKMeans(n_clusters=20, batch_size=201,
random_state=42, init="random")
mb_k_means.fit(zeroed_X)
# there should not be too many exact zero cluster centers
assert_greater(mb_k_means.cluster_centers_.any(axis=1).sum(), 10)
def test_minibatch_sensible_reassign_partial_fit():
zeroed_X, true_labels = make_blobs(n_samples=n_samples, centers=5,
cluster_std=1., random_state=42)
zeroed_X[::2, :] = 0
mb_k_means = MiniBatchKMeans(n_clusters=20, random_state=42, init="random")
for i in range(100):
mb_k_means.partial_fit(zeroed_X)
# there should not be too many exact zero cluster centers
assert_greater(mb_k_means.cluster_centers_.any(axis=1).sum(), 10)
def test_minibatch_reassign():
# Give a perfect initialization, but a large reassignment_ratio,
# as a result all the centers should be reassigned and the model
# should not longer be good
for this_X in (X, X_csr):
mb_k_means = MiniBatchKMeans(n_clusters=n_clusters, batch_size=100,
random_state=42)
mb_k_means.fit(this_X)
score_before = mb_k_means.score(this_X)
try:
old_stdout = sys.stdout
sys.stdout = StringIO()
# Turn on verbosity to smoke test the display code
_mini_batch_step(this_X, (X ** 2).sum(axis=1),
mb_k_means.cluster_centers_,
mb_k_means.counts_,
np.zeros(X.shape[1], np.double),
False, distances=np.zeros(X.shape[0]),
random_reassign=True, random_state=42,
reassignment_ratio=1, verbose=True)
finally:
sys.stdout = old_stdout
assert_greater(score_before, mb_k_means.score(this_X))
# Give a perfect initialization, with a small reassignment_ratio,
# no center should be reassigned
for this_X in (X, X_csr):
mb_k_means = MiniBatchKMeans(n_clusters=n_clusters, batch_size=100,
init=centers.copy(),
random_state=42, n_init=1)
mb_k_means.fit(this_X)
clusters_before = mb_k_means.cluster_centers_
# Turn on verbosity to smoke test the display code
_mini_batch_step(this_X, (X ** 2).sum(axis=1),
mb_k_means.cluster_centers_,
mb_k_means.counts_,
np.zeros(X.shape[1], np.double),
False, distances=np.zeros(X.shape[0]),
random_reassign=True, random_state=42,
reassignment_ratio=1e-15)
assert_array_almost_equal(clusters_before, mb_k_means.cluster_centers_)
def test_minibatch_with_many_reassignments():
# Test for the case that the number of clusters to reassign is bigger
# than the batch_size
n_samples = 550
rnd = np.random.RandomState(42)
X = rnd.uniform(size=(n_samples, 10))
# Check that the fit works if n_clusters is bigger than the batch_size.
# Run the test with 550 clusters and 550 samples, because it turned out
# that this values ensure that the number of clusters to reassign
# is always bigger than the batch_size
n_clusters = 550
MiniBatchKMeans(n_clusters=n_clusters,
batch_size=100,
init_size=n_samples,
random_state=42).fit(X)
def test_sparse_mb_k_means_callable_init():
def test_init(X, k, random_state):
return centers
# Small test to check that giving the wrong number of centers
# raises a meaningful error
assert_raises(ValueError,
MiniBatchKMeans(init=test_init, random_state=42).fit, X_csr)
# Now check that the fit actually works
mb_k_means = MiniBatchKMeans(n_clusters=3, init=test_init,
random_state=42).fit(X_csr)
_check_fitted_model(mb_k_means)
def test_mini_batch_k_means_random_init_partial_fit():
km = MiniBatchKMeans(n_clusters=n_clusters, init="random", random_state=42)
# use the partial_fit API for online learning
for X_minibatch in np.array_split(X, 10):
km.partial_fit(X_minibatch)
# compute the labeling on the complete dataset
labels = km.predict(X)
assert_equal(v_measure_score(true_labels, labels), 1.0)
def test_minibatch_default_init_size():
mb_k_means = MiniBatchKMeans(init=centers.copy(), n_clusters=n_clusters,
batch_size=10, random_state=42,
n_init=1).fit(X)
assert_equal(mb_k_means.init_size_, 3 * mb_k_means.batch_size)
_check_fitted_model(mb_k_means)
def test_minibatch_tol():
mb_k_means = MiniBatchKMeans(n_clusters=n_clusters, batch_size=10,
random_state=42, tol=.01).fit(X)
_check_fitted_model(mb_k_means)
def test_minibatch_set_init_size():
mb_k_means = MiniBatchKMeans(init=centers.copy(), n_clusters=n_clusters,
init_size=666, random_state=42,
n_init=1).fit(X)
assert_equal(mb_k_means.init_size, 666)
assert_equal(mb_k_means.init_size_, n_samples)
_check_fitted_model(mb_k_means)
def test_k_means_invalid_init():
km = KMeans(init="invalid", n_init=1, n_clusters=n_clusters)
assert_raises(ValueError, km.fit, X)
def test_mini_match_k_means_invalid_init():
km = MiniBatchKMeans(init="invalid", n_init=1, n_clusters=n_clusters)
assert_raises(ValueError, km.fit, X)
def test_k_means_copyx():
# Check if copy_x=False returns nearly equal X after de-centering.
my_X = X.copy()
km = KMeans(copy_x=False, n_clusters=n_clusters, random_state=42)
km.fit(my_X)
_check_fitted_model(km)
# check if my_X is centered
assert_array_almost_equal(my_X, X)
def test_k_means_non_collapsed():
# Check k_means with a bad initialization does not yield a singleton
# Starting with bad centers that are quickly ignored should not
# result in a repositioning of the centers to the center of mass that
# would lead to collapsed centers which in turns make the clustering
# dependent of the numerical unstabilities.
my_X = np.array([[1.1, 1.1], [0.9, 1.1], [1.1, 0.9], [0.9, 1.1]])
array_init = np.array([[1.0, 1.0], [5.0, 5.0], [-5.0, -5.0]])
km = KMeans(init=array_init, n_clusters=3, random_state=42, n_init=1)
km.fit(my_X)
# centers must not been collapsed
assert_equal(len(np.unique(km.labels_)), 3)
centers = km.cluster_centers_
assert_true(np.linalg.norm(centers[0] - centers[1]) >= 0.1)
assert_true(np.linalg.norm(centers[0] - centers[2]) >= 0.1)
assert_true(np.linalg.norm(centers[1] - centers[2]) >= 0.1)
def test_predict():
km = KMeans(n_clusters=n_clusters, random_state=42)
km.fit(X)
# sanity check: predict centroid labels
pred = km.predict(km.cluster_centers_)
assert_array_equal(pred, np.arange(n_clusters))
# sanity check: re-predict labeling for training set samples
pred = km.predict(X)
assert_array_equal(pred, km.labels_)
# re-predict labels for training set using fit_predict
pred = km.fit_predict(X)
assert_array_equal(pred, km.labels_)
def test_score():
km1 = KMeans(n_clusters=n_clusters, max_iter=1, random_state=42)
s1 = km1.fit(X).score(X)
km2 = KMeans(n_clusters=n_clusters, max_iter=10, random_state=42)
s2 = km2.fit(X).score(X)
assert_greater(s2, s1)
def test_predict_minibatch_dense_input():
mb_k_means = MiniBatchKMeans(n_clusters=n_clusters, random_state=40).fit(X)
# sanity check: predict centroid labels
pred = mb_k_means.predict(mb_k_means.cluster_centers_)
assert_array_equal(pred, np.arange(n_clusters))
# sanity check: re-predict labeling for training set samples
pred = mb_k_means.predict(X)
assert_array_equal(mb_k_means.predict(X), mb_k_means.labels_)
def test_predict_minibatch_kmeanspp_init_sparse_input():
mb_k_means = MiniBatchKMeans(n_clusters=n_clusters, init='k-means++',
n_init=10).fit(X_csr)
# sanity check: re-predict labeling for training set samples
assert_array_equal(mb_k_means.predict(X_csr), mb_k_means.labels_)
# sanity check: predict centroid labels
pred = mb_k_means.predict(mb_k_means.cluster_centers_)
assert_array_equal(pred, np.arange(n_clusters))
# check that models trained on sparse input also works for dense input at
# predict time
assert_array_equal(mb_k_means.predict(X), mb_k_means.labels_)
def test_predict_minibatch_random_init_sparse_input():
mb_k_means = MiniBatchKMeans(n_clusters=n_clusters, init='random',
n_init=10).fit(X_csr)
# sanity check: re-predict labeling for training set samples
assert_array_equal(mb_k_means.predict(X_csr), mb_k_means.labels_)
# sanity check: predict centroid labels
pred = mb_k_means.predict(mb_k_means.cluster_centers_)
assert_array_equal(pred, np.arange(n_clusters))
# check that models trained on sparse input also works for dense input at
# predict time
assert_array_equal(mb_k_means.predict(X), mb_k_means.labels_)
def test_input_dtypes():
X_list = [[0, 0], [10, 10], [12, 9], [-1, 1], [2, 0], [8, 10]]
X_int = np.array(X_list, dtype=np.int32)
X_int_csr = sp.csr_matrix(X_int)
init_int = X_int[:2]
fitted_models = [
KMeans(n_clusters=2).fit(X_list),
KMeans(n_clusters=2).fit(X_int),
KMeans(n_clusters=2, init=init_int, n_init=1).fit(X_list),
KMeans(n_clusters=2, init=init_int, n_init=1).fit(X_int),
# mini batch kmeans is very unstable on such a small dataset hence
# we use many inits
MiniBatchKMeans(n_clusters=2, n_init=10, batch_size=2).fit(X_list),
MiniBatchKMeans(n_clusters=2, n_init=10, batch_size=2).fit(X_int),
MiniBatchKMeans(n_clusters=2, n_init=10, batch_size=2).fit(X_int_csr),
MiniBatchKMeans(n_clusters=2, batch_size=2,
init=init_int, n_init=1).fit(X_list),
MiniBatchKMeans(n_clusters=2, batch_size=2,
init=init_int, n_init=1).fit(X_int),
MiniBatchKMeans(n_clusters=2, batch_size=2,
init=init_int, n_init=1).fit(X_int_csr),
]
expected_labels = [0, 1, 1, 0, 0, 1]
scores = np.array([v_measure_score(expected_labels, km.labels_)
for km in fitted_models])
assert_array_equal(scores, np.ones(scores.shape[0]))
def test_transform():
km = KMeans(n_clusters=n_clusters)
km.fit(X)
X_new = km.transform(km.cluster_centers_)
for c in range(n_clusters):
assert_equal(X_new[c, c], 0)
for c2 in range(n_clusters):
if c != c2:
assert_greater(X_new[c, c2], 0)
def test_fit_transform():
X1 = KMeans(n_clusters=3, random_state=51).fit(X).transform(X)
X2 = KMeans(n_clusters=3, random_state=51).fit_transform(X)
assert_array_equal(X1, X2)
def test_n_init():
# Check that increasing the number of init increases the quality
n_runs = 5
n_init_range = [1, 5, 10]
inertia = np.zeros((len(n_init_range), n_runs))
for i, n_init in enumerate(n_init_range):
for j in range(n_runs):
km = KMeans(n_clusters=n_clusters, init="random", n_init=n_init,
random_state=j).fit(X)
inertia[i, j] = km.inertia_
inertia = inertia.mean(axis=1)
failure_msg = ("Inertia %r should be decreasing"
" when n_init is increasing.") % list(inertia)
for i in range(len(n_init_range) - 1):
assert_true(inertia[i] >= inertia[i + 1], failure_msg)
def test_k_means_function():
# test calling the k_means function directly
# catch output
old_stdout = sys.stdout
sys.stdout = StringIO()
try:
cluster_centers, labels, inertia = k_means(X, n_clusters=n_clusters,
verbose=True)
finally:
sys.stdout = old_stdout
centers = cluster_centers
assert_equal(centers.shape, (n_clusters, n_features))
labels = labels
assert_equal(np.unique(labels).shape[0], n_clusters)
# check that the labels assignment are perfect (up to a permutation)
assert_equal(v_measure_score(true_labels, labels), 1.0)
assert_greater(inertia, 0.0)
# check warning when centers are passed
assert_warns(RuntimeWarning, k_means, X, n_clusters=n_clusters,
init=centers)
# to many clusters desired
assert_raises(ValueError, k_means, X, n_clusters=X.shape[0] + 1)
def test_x_squared_norms_init_centroids():
"""Test that x_squared_norms can be None in _init_centroids"""
from sklearn.cluster.k_means_ import _init_centroids
X_norms = np.sum(X**2, axis=1)
precompute = _init_centroids(
X, 3, "k-means++", random_state=0, x_squared_norms=X_norms)
assert_array_equal(
precompute,
_init_centroids(X, 3, "k-means++", random_state=0))
| bsd-3-clause |
winklerand/pandas | pandas/tests/frame/test_to_csv.py | 1 | 45142 | # -*- coding: utf-8 -*-
from __future__ import print_function
import csv
import pytest
from numpy import nan
import numpy as np
from pandas.compat import (lmap, range, lrange, StringIO, u)
from pandas.core.common import _all_none
from pandas.errors import ParserError
from pandas import (DataFrame, Index, Series, MultiIndex, Timestamp,
date_range, read_csv, compat, to_datetime)
import pandas as pd
from pandas.util.testing import (assert_almost_equal,
assert_series_equal,
assert_frame_equal,
ensure_clean,
makeCustomDataframe as mkdf)
import pandas.util.testing as tm
from pandas.tests.frame.common import TestData
MIXED_FLOAT_DTYPES = ['float16', 'float32', 'float64']
MIXED_INT_DTYPES = ['uint8', 'uint16', 'uint32', 'uint64', 'int8', 'int16',
'int32', 'int64']
class TestDataFrameToCSV(TestData):
def read_csv(self, path, **kwargs):
params = dict(index_col=0, parse_dates=True)
params.update(**kwargs)
return pd.read_csv(path, **params)
def test_from_csv_deprecation(self):
# see gh-17812
with ensure_clean('__tmp_from_csv_deprecation__') as path:
self.tsframe.to_csv(path)
with tm.assert_produces_warning(FutureWarning):
depr_recons = DataFrame.from_csv(path)
assert_frame_equal(self.tsframe, depr_recons)
def test_to_csv_from_csv1(self):
with ensure_clean('__tmp_to_csv_from_csv1__') as path:
self.frame['A'][:5] = nan
self.frame.to_csv(path)
self.frame.to_csv(path, columns=['A', 'B'])
self.frame.to_csv(path, header=False)
self.frame.to_csv(path, index=False)
# test roundtrip
self.tsframe.to_csv(path)
recons = self.read_csv(path)
assert_frame_equal(self.tsframe, recons)
self.tsframe.to_csv(path, index_label='index')
recons = self.read_csv(path, index_col=None)
assert(len(recons.columns) == len(self.tsframe.columns) + 1)
# no index
self.tsframe.to_csv(path, index=False)
recons = self.read_csv(path, index_col=None)
assert_almost_equal(self.tsframe.values, recons.values)
# corner case
dm = DataFrame({'s1': Series(lrange(3), lrange(3)),
's2': Series(lrange(2), lrange(2))})
dm.to_csv(path)
recons = self.read_csv(path)
assert_frame_equal(dm, recons)
def test_to_csv_from_csv2(self):
with ensure_clean('__tmp_to_csv_from_csv2__') as path:
# duplicate index
df = DataFrame(np.random.randn(3, 3), index=['a', 'a', 'b'],
columns=['x', 'y', 'z'])
df.to_csv(path)
result = self.read_csv(path)
assert_frame_equal(result, df)
midx = MultiIndex.from_tuples(
[('A', 1, 2), ('A', 1, 2), ('B', 1, 2)])
df = DataFrame(np.random.randn(3, 3), index=midx,
columns=['x', 'y', 'z'])
df.to_csv(path)
result = self.read_csv(path, index_col=[0, 1, 2],
parse_dates=False)
assert_frame_equal(result, df, check_names=False)
# column aliases
col_aliases = Index(['AA', 'X', 'Y', 'Z'])
self.frame2.to_csv(path, header=col_aliases)
rs = self.read_csv(path)
xp = self.frame2.copy()
xp.columns = col_aliases
assert_frame_equal(xp, rs)
pytest.raises(ValueError, self.frame2.to_csv, path,
header=['AA', 'X'])
def test_to_csv_from_csv3(self):
with ensure_clean('__tmp_to_csv_from_csv3__') as path:
df1 = DataFrame(np.random.randn(3, 1))
df2 = DataFrame(np.random.randn(3, 1))
df1.to_csv(path)
df2.to_csv(path, mode='a', header=False)
xp = pd.concat([df1, df2])
rs = pd.read_csv(path, index_col=0)
rs.columns = lmap(int, rs.columns)
xp.columns = lmap(int, xp.columns)
assert_frame_equal(xp, rs)
def test_to_csv_from_csv4(self):
with ensure_clean('__tmp_to_csv_from_csv4__') as path:
# GH 10833 (TimedeltaIndex formatting)
dt = pd.Timedelta(seconds=1)
df = pd.DataFrame({'dt_data': [i * dt for i in range(3)]},
index=pd.Index([i * dt for i in range(3)],
name='dt_index'))
df.to_csv(path)
result = pd.read_csv(path, index_col='dt_index')
result.index = pd.to_timedelta(result.index)
# TODO: remove renaming when GH 10875 is solved
result.index = result.index.rename('dt_index')
result['dt_data'] = pd.to_timedelta(result['dt_data'])
assert_frame_equal(df, result, check_index_type=True)
def test_to_csv_from_csv5(self):
# tz, 8260
with ensure_clean('__tmp_to_csv_from_csv5__') as path:
self.tzframe.to_csv(path)
result = pd.read_csv(path, index_col=0, parse_dates=['A'])
converter = lambda c: to_datetime(result[c]).dt.tz_localize(
'UTC').dt.tz_convert(self.tzframe[c].dt.tz)
result['B'] = converter('B')
result['C'] = converter('C')
assert_frame_equal(result, self.tzframe)
def test_to_csv_cols_reordering(self):
# GH3454
import pandas as pd
chunksize = 5
N = int(chunksize * 2.5)
df = mkdf(N, 3)
cs = df.columns
cols = [cs[2], cs[0]]
with ensure_clean() as path:
df.to_csv(path, columns=cols, chunksize=chunksize)
rs_c = pd.read_csv(path, index_col=0)
assert_frame_equal(df[cols], rs_c, check_names=False)
def test_to_csv_new_dupe_cols(self):
import pandas as pd
def _check_df(df, cols=None):
with ensure_clean() as path:
df.to_csv(path, columns=cols, chunksize=chunksize)
rs_c = pd.read_csv(path, index_col=0)
# we wrote them in a different order
# so compare them in that order
if cols is not None:
if df.columns.is_unique:
rs_c.columns = cols
else:
indexer, missing = df.columns.get_indexer_non_unique(
cols)
rs_c.columns = df.columns.take(indexer)
for c in cols:
obj_df = df[c]
obj_rs = rs_c[c]
if isinstance(obj_df, Series):
assert_series_equal(obj_df, obj_rs)
else:
assert_frame_equal(
obj_df, obj_rs, check_names=False)
# wrote in the same order
else:
rs_c.columns = df.columns
assert_frame_equal(df, rs_c, check_names=False)
chunksize = 5
N = int(chunksize * 2.5)
# dupe cols
df = mkdf(N, 3)
df.columns = ['a', 'a', 'b']
_check_df(df, None)
# dupe cols with selection
cols = ['b', 'a']
_check_df(df, cols)
@pytest.mark.slow
def test_to_csv_dtnat(self):
# GH3437
from pandas import NaT
def make_dtnat_arr(n, nnat=None):
if nnat is None:
nnat = int(n * 0.1) # 10%
s = list(date_range('2000', freq='5min', periods=n))
if nnat:
for i in np.random.randint(0, len(s), nnat):
s[i] = NaT
i = np.random.randint(100)
s[-i] = NaT
s[i] = NaT
return s
chunksize = 1000
# N=35000
s1 = make_dtnat_arr(chunksize + 5)
s2 = make_dtnat_arr(chunksize + 5, 0)
# s3=make_dtnjat_arr(chunksize+5,0)
with ensure_clean('1.csv') as pth:
df = DataFrame(dict(a=s1, b=s2))
df.to_csv(pth, chunksize=chunksize)
recons = self.read_csv(pth)._convert(datetime=True,
coerce=True)
assert_frame_equal(df, recons, check_names=False,
check_less_precise=True)
@pytest.mark.slow
def test_to_csv_moar(self):
def _do_test(df, r_dtype=None, c_dtype=None,
rnlvl=None, cnlvl=None, dupe_col=False):
kwargs = dict(parse_dates=False)
if cnlvl:
if rnlvl is not None:
kwargs['index_col'] = lrange(rnlvl)
kwargs['header'] = lrange(cnlvl)
with ensure_clean('__tmp_to_csv_moar__') as path:
df.to_csv(path, encoding='utf8',
chunksize=chunksize)
recons = self.read_csv(path, **kwargs)
else:
kwargs['header'] = 0
with ensure_clean('__tmp_to_csv_moar__') as path:
df.to_csv(path, encoding='utf8', chunksize=chunksize)
recons = self.read_csv(path, **kwargs)
def _to_uni(x):
if not isinstance(x, compat.text_type):
return x.decode('utf8')
return x
if dupe_col:
# read_Csv disambiguates the columns by
# labeling them dupe.1,dupe.2, etc'. monkey patch columns
recons.columns = df.columns
if rnlvl and not cnlvl:
delta_lvl = [recons.iloc[
:, i].values for i in range(rnlvl - 1)]
ix = MultiIndex.from_arrays([list(recons.index)] + delta_lvl)
recons.index = ix
recons = recons.iloc[:, rnlvl - 1:]
type_map = dict(i='i', f='f', s='O', u='O', dt='O', p='O')
if r_dtype:
if r_dtype == 'u': # unicode
r_dtype = 'O'
recons.index = np.array(lmap(_to_uni, recons.index),
dtype=r_dtype)
df.index = np.array(lmap(_to_uni, df.index), dtype=r_dtype)
elif r_dtype == 'dt': # unicode
r_dtype = 'O'
recons.index = np.array(lmap(Timestamp, recons.index),
dtype=r_dtype)
df.index = np.array(
lmap(Timestamp, df.index), dtype=r_dtype)
elif r_dtype == 'p':
r_dtype = 'O'
recons.index = np.array(
list(map(Timestamp, to_datetime(recons.index))),
dtype=r_dtype)
df.index = np.array(
list(map(Timestamp, df.index.to_timestamp())),
dtype=r_dtype)
else:
r_dtype = type_map.get(r_dtype)
recons.index = np.array(recons.index, dtype=r_dtype)
df.index = np.array(df.index, dtype=r_dtype)
if c_dtype:
if c_dtype == 'u':
c_dtype = 'O'
recons.columns = np.array(lmap(_to_uni, recons.columns),
dtype=c_dtype)
df.columns = np.array(
lmap(_to_uni, df.columns), dtype=c_dtype)
elif c_dtype == 'dt':
c_dtype = 'O'
recons.columns = np.array(lmap(Timestamp, recons.columns),
dtype=c_dtype)
df.columns = np.array(
lmap(Timestamp, df.columns), dtype=c_dtype)
elif c_dtype == 'p':
c_dtype = 'O'
recons.columns = np.array(
lmap(Timestamp, to_datetime(recons.columns)),
dtype=c_dtype)
df.columns = np.array(
lmap(Timestamp, df.columns.to_timestamp()),
dtype=c_dtype)
else:
c_dtype = type_map.get(c_dtype)
recons.columns = np.array(recons.columns, dtype=c_dtype)
df.columns = np.array(df.columns, dtype=c_dtype)
assert_frame_equal(df, recons, check_names=False,
check_less_precise=True)
N = 100
chunksize = 1000
for ncols in [4]:
base = int((chunksize // ncols or 1) or 1)
for nrows in [2, 10, N - 1, N, N + 1, N + 2, 2 * N - 2,
2 * N - 1, 2 * N, 2 * N + 1, 2 * N + 2,
base - 1, base, base + 1]:
_do_test(mkdf(nrows, ncols, r_idx_type='dt',
c_idx_type='s'), 'dt', 's')
for ncols in [4]:
base = int((chunksize // ncols or 1) or 1)
for nrows in [2, 10, N - 1, N, N + 1, N + 2, 2 * N - 2,
2 * N - 1, 2 * N, 2 * N + 1, 2 * N + 2,
base - 1, base, base + 1]:
_do_test(mkdf(nrows, ncols, r_idx_type='dt',
c_idx_type='s'), 'dt', 's')
pass
for r_idx_type, c_idx_type in [('i', 'i'), ('s', 's'), ('u', 'dt'),
('p', 'p')]:
for ncols in [1, 2, 3, 4]:
base = int((chunksize // ncols or 1) or 1)
for nrows in [2, 10, N - 1, N, N + 1, N + 2, 2 * N - 2,
2 * N - 1, 2 * N, 2 * N + 1, 2 * N + 2,
base - 1, base, base + 1]:
_do_test(mkdf(nrows, ncols, r_idx_type=r_idx_type,
c_idx_type=c_idx_type),
r_idx_type, c_idx_type)
for ncols in [1, 2, 3, 4]:
base = int((chunksize // ncols or 1) or 1)
for nrows in [10, N - 2, N - 1, N, N + 1, N + 2, 2 * N - 2,
2 * N - 1, 2 * N, 2 * N + 1, 2 * N + 2,
base - 1, base, base + 1]:
_do_test(mkdf(nrows, ncols))
for nrows in [10, N - 2, N - 1, N, N + 1, N + 2]:
df = mkdf(nrows, 3)
cols = list(df.columns)
cols[:2] = ["dupe", "dupe"]
cols[-2:] = ["dupe", "dupe"]
ix = list(df.index)
ix[:2] = ["rdupe", "rdupe"]
ix[-2:] = ["rdupe", "rdupe"]
df.index = ix
df.columns = cols
_do_test(df, dupe_col=True)
_do_test(DataFrame(index=lrange(10)))
_do_test(mkdf(chunksize // 2 + 1, 2, r_idx_nlevels=2), rnlvl=2)
for ncols in [2, 3, 4]:
base = int(chunksize // ncols)
for nrows in [10, N - 2, N - 1, N, N + 1, N + 2, 2 * N - 2,
2 * N - 1, 2 * N, 2 * N + 1, 2 * N + 2,
base - 1, base, base + 1]:
_do_test(mkdf(nrows, ncols, r_idx_nlevels=2), rnlvl=2)
_do_test(mkdf(nrows, ncols, c_idx_nlevels=2), cnlvl=2)
_do_test(mkdf(nrows, ncols, r_idx_nlevels=2, c_idx_nlevels=2),
rnlvl=2, cnlvl=2)
def test_to_csv_from_csv_w_some_infs(self):
# test roundtrip with inf, -inf, nan, as full columns and mix
self.frame['G'] = np.nan
f = lambda x: [np.inf, np.nan][np.random.rand() < .5]
self.frame['H'] = self.frame.index.map(f)
with ensure_clean() as path:
self.frame.to_csv(path)
recons = self.read_csv(path)
# TODO to_csv drops column name
assert_frame_equal(self.frame, recons, check_names=False)
assert_frame_equal(np.isinf(self.frame),
np.isinf(recons), check_names=False)
def test_to_csv_from_csv_w_all_infs(self):
# test roundtrip with inf, -inf, nan, as full columns and mix
self.frame['E'] = np.inf
self.frame['F'] = -np.inf
with ensure_clean() as path:
self.frame.to_csv(path)
recons = self.read_csv(path)
# TODO to_csv drops column name
assert_frame_equal(self.frame, recons, check_names=False)
assert_frame_equal(np.isinf(self.frame),
np.isinf(recons), check_names=False)
def test_to_csv_no_index(self):
# GH 3624, after appending columns, to_csv fails
with ensure_clean('__tmp_to_csv_no_index__') as path:
df = DataFrame({'c1': [1, 2, 3], 'c2': [4, 5, 6]})
df.to_csv(path, index=False)
result = read_csv(path)
assert_frame_equal(df, result)
df['c3'] = Series([7, 8, 9], dtype='int64')
df.to_csv(path, index=False)
result = read_csv(path)
assert_frame_equal(df, result)
def test_to_csv_with_mix_columns(self):
# gh-11637: incorrect output when a mix of integer and string column
# names passed as columns parameter in to_csv
df = DataFrame({0: ['a', 'b', 'c'],
1: ['aa', 'bb', 'cc']})
df['test'] = 'txt'
assert df.to_csv() == df.to_csv(columns=[0, 1, 'test'])
def test_to_csv_headers(self):
# GH6186, the presence or absence of `index` incorrectly
# causes to_csv to have different header semantics.
from_df = DataFrame([[1, 2], [3, 4]], columns=['A', 'B'])
to_df = DataFrame([[1, 2], [3, 4]], columns=['X', 'Y'])
with ensure_clean('__tmp_to_csv_headers__') as path:
from_df.to_csv(path, header=['X', 'Y'])
recons = self.read_csv(path)
assert_frame_equal(to_df, recons)
from_df.to_csv(path, index=False, header=['X', 'Y'])
recons = self.read_csv(path)
recons.reset_index(inplace=True)
assert_frame_equal(to_df, recons)
def test_to_csv_multiindex(self):
frame = self.frame
old_index = frame.index
arrays = np.arange(len(old_index) * 2).reshape(2, -1)
new_index = MultiIndex.from_arrays(arrays, names=['first', 'second'])
frame.index = new_index
with ensure_clean('__tmp_to_csv_multiindex__') as path:
frame.to_csv(path, header=False)
frame.to_csv(path, columns=['A', 'B'])
# round trip
frame.to_csv(path)
df = self.read_csv(path, index_col=[0, 1],
parse_dates=False)
# TODO to_csv drops column name
assert_frame_equal(frame, df, check_names=False)
assert frame.index.names == df.index.names
# needed if setUp becomes a class method
self.frame.index = old_index
# try multiindex with dates
tsframe = self.tsframe
old_index = tsframe.index
new_index = [old_index, np.arange(len(old_index))]
tsframe.index = MultiIndex.from_arrays(new_index)
tsframe.to_csv(path, index_label=['time', 'foo'])
recons = self.read_csv(path, index_col=[0, 1])
# TODO to_csv drops column name
assert_frame_equal(tsframe, recons, check_names=False)
# do not load index
tsframe.to_csv(path)
recons = self.read_csv(path, index_col=None)
assert len(recons.columns) == len(tsframe.columns) + 2
# no index
tsframe.to_csv(path, index=False)
recons = self.read_csv(path, index_col=None)
assert_almost_equal(recons.values, self.tsframe.values)
# needed if setUp becomes class method
self.tsframe.index = old_index
with ensure_clean('__tmp_to_csv_multiindex__') as path:
# GH3571, GH1651, GH3141
def _make_frame(names=None):
if names is True:
names = ['first', 'second']
return DataFrame(np.random.randint(0, 10, size=(3, 3)),
columns=MultiIndex.from_tuples(
[('bah', 'foo'),
('bah', 'bar'),
('ban', 'baz')], names=names),
dtype='int64')
# column & index are multi-index
df = mkdf(5, 3, r_idx_nlevels=2, c_idx_nlevels=4)
df.to_csv(path)
result = read_csv(path, header=[0, 1, 2, 3],
index_col=[0, 1])
assert_frame_equal(df, result)
# column is mi
df = mkdf(5, 3, r_idx_nlevels=1, c_idx_nlevels=4)
df.to_csv(path)
result = read_csv(
path, header=[0, 1, 2, 3], index_col=0)
assert_frame_equal(df, result)
# dup column names?
df = mkdf(5, 3, r_idx_nlevels=3, c_idx_nlevels=4)
df.to_csv(path)
result = read_csv(path, header=[0, 1, 2, 3],
index_col=[0, 1, 2])
assert_frame_equal(df, result)
# writing with no index
df = _make_frame()
df.to_csv(path, index=False)
result = read_csv(path, header=[0, 1])
assert_frame_equal(df, result)
# we lose the names here
df = _make_frame(True)
df.to_csv(path, index=False)
result = read_csv(path, header=[0, 1])
assert _all_none(*result.columns.names)
result.columns.names = df.columns.names
assert_frame_equal(df, result)
# tupleize_cols=True and index=False
df = _make_frame(True)
with tm.assert_produces_warning(FutureWarning):
df.to_csv(path, tupleize_cols=True, index=False)
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
result = read_csv(path, header=0,
tupleize_cols=True,
index_col=None)
result.columns = df.columns
assert_frame_equal(df, result)
# whatsnew example
df = _make_frame()
df.to_csv(path)
result = read_csv(path, header=[0, 1],
index_col=[0])
assert_frame_equal(df, result)
df = _make_frame(True)
df.to_csv(path)
result = read_csv(path, header=[0, 1],
index_col=[0])
assert_frame_equal(df, result)
# column & index are multi-index (compatibility)
df = mkdf(5, 3, r_idx_nlevels=2, c_idx_nlevels=4)
with tm.assert_produces_warning(FutureWarning):
df.to_csv(path, tupleize_cols=True)
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
result = read_csv(path, header=0, index_col=[0, 1],
tupleize_cols=True)
result.columns = df.columns
assert_frame_equal(df, result)
# invalid options
df = _make_frame(True)
df.to_csv(path)
for i in [6, 7]:
msg = 'len of {i}, but only 5 lines in file'.format(i=i)
with tm.assert_raises_regex(ParserError, msg):
read_csv(path, header=lrange(i), index_col=0)
# write with cols
with tm.assert_raises_regex(TypeError, 'cannot specify cols '
'with a MultiIndex'):
df.to_csv(path, columns=['foo', 'bar'])
with ensure_clean('__tmp_to_csv_multiindex__') as path:
# empty
tsframe[:0].to_csv(path)
recons = self.read_csv(path)
exp = tsframe[:0]
exp.index = []
tm.assert_index_equal(recons.columns, exp.columns)
assert len(recons) == 0
def test_to_csv_float32_nanrep(self):
df = DataFrame(np.random.randn(1, 4).astype(np.float32))
df[1] = np.nan
with ensure_clean('__tmp_to_csv_float32_nanrep__.csv') as path:
df.to_csv(path, na_rep=999)
with open(path) as f:
lines = f.readlines()
assert lines[1].split(',')[2] == '999'
def test_to_csv_withcommas(self):
# Commas inside fields should be correctly escaped when saving as CSV.
df = DataFrame({'A': [1, 2, 3], 'B': ['5,6', '7,8', '9,0']})
with ensure_clean('__tmp_to_csv_withcommas__.csv') as path:
df.to_csv(path)
df2 = self.read_csv(path)
assert_frame_equal(df2, df)
def test_to_csv_mixed(self):
def create_cols(name):
return ["%s%03d" % (name, i) for i in range(5)]
df_float = DataFrame(np.random.randn(
100, 5), dtype='float64', columns=create_cols('float'))
df_int = DataFrame(np.random.randn(100, 5),
dtype='int64', columns=create_cols('int'))
df_bool = DataFrame(True, index=df_float.index,
columns=create_cols('bool'))
df_object = DataFrame('foo', index=df_float.index,
columns=create_cols('object'))
df_dt = DataFrame(Timestamp('20010101'),
index=df_float.index, columns=create_cols('date'))
# add in some nans
df_float.loc[30:50, 1:3] = np.nan
# ## this is a bug in read_csv right now ####
# df_dt.loc[30:50,1:3] = np.nan
df = pd.concat([df_float, df_int, df_bool, df_object, df_dt], axis=1)
# dtype
dtypes = dict()
for n, dtype in [('float', np.float64), ('int', np.int64),
('bool', np.bool), ('object', np.object)]:
for c in create_cols(n):
dtypes[c] = dtype
with ensure_clean() as filename:
df.to_csv(filename)
rs = read_csv(filename, index_col=0, dtype=dtypes,
parse_dates=create_cols('date'))
assert_frame_equal(rs, df)
def test_to_csv_dups_cols(self):
df = DataFrame(np.random.randn(1000, 30), columns=lrange(
15) + lrange(15), dtype='float64')
with ensure_clean() as filename:
df.to_csv(filename) # single dtype, fine
result = read_csv(filename, index_col=0)
result.columns = df.columns
assert_frame_equal(result, df)
df_float = DataFrame(np.random.randn(1000, 3), dtype='float64')
df_int = DataFrame(np.random.randn(1000, 3), dtype='int64')
df_bool = DataFrame(True, index=df_float.index, columns=lrange(3))
df_object = DataFrame('foo', index=df_float.index, columns=lrange(3))
df_dt = DataFrame(Timestamp('20010101'),
index=df_float.index, columns=lrange(3))
df = pd.concat([df_float, df_int, df_bool, df_object,
df_dt], axis=1, ignore_index=True)
cols = []
for i in range(5):
cols.extend([0, 1, 2])
df.columns = cols
with ensure_clean() as filename:
df.to_csv(filename)
result = read_csv(filename, index_col=0)
# date cols
for i in ['0.4', '1.4', '2.4']:
result[i] = to_datetime(result[i])
result.columns = df.columns
assert_frame_equal(result, df)
# GH3457
from pandas.util.testing import makeCustomDataframe as mkdf
N = 10
df = mkdf(N, 3)
df.columns = ['a', 'a', 'b']
with ensure_clean() as filename:
df.to_csv(filename)
# read_csv will rename the dups columns
result = read_csv(filename, index_col=0)
result = result.rename(columns={'a.1': 'a'})
assert_frame_equal(result, df)
def test_to_csv_chunking(self):
aa = DataFrame({'A': lrange(100000)})
aa['B'] = aa.A + 1.0
aa['C'] = aa.A + 2.0
aa['D'] = aa.A + 3.0
for chunksize in [10000, 50000, 100000]:
with ensure_clean() as filename:
aa.to_csv(filename, chunksize=chunksize)
rs = read_csv(filename, index_col=0)
assert_frame_equal(rs, aa)
@pytest.mark.slow
def test_to_csv_wide_frame_formatting(self):
# Issue #8621
df = DataFrame(np.random.randn(1, 100010), columns=None, index=None)
with ensure_clean() as filename:
df.to_csv(filename, header=False, index=False)
rs = read_csv(filename, header=None)
assert_frame_equal(rs, df)
def test_to_csv_bug(self):
f1 = StringIO('a,1.0\nb,2.0')
df = self.read_csv(f1, header=None)
newdf = DataFrame({'t': df[df.columns[0]]})
with ensure_clean() as path:
newdf.to_csv(path)
recons = read_csv(path, index_col=0)
# don't check_names as t != 1
assert_frame_equal(recons, newdf, check_names=False)
def test_to_csv_unicode(self):
df = DataFrame({u('c/\u03c3'): [1, 2, 3]})
with ensure_clean() as path:
df.to_csv(path, encoding='UTF-8')
df2 = read_csv(path, index_col=0, encoding='UTF-8')
assert_frame_equal(df, df2)
df.to_csv(path, encoding='UTF-8', index=False)
df2 = read_csv(path, index_col=None, encoding='UTF-8')
assert_frame_equal(df, df2)
def test_to_csv_unicode_index_col(self):
buf = StringIO('')
df = DataFrame(
[[u("\u05d0"), "d2", "d3", "d4"], ["a1", "a2", "a3", "a4"]],
columns=[u("\u05d0"),
u("\u05d1"), u("\u05d2"), u("\u05d3")],
index=[u("\u05d0"), u("\u05d1")])
df.to_csv(buf, encoding='UTF-8')
buf.seek(0)
df2 = read_csv(buf, index_col=0, encoding='UTF-8')
assert_frame_equal(df, df2)
def test_to_csv_stringio(self):
buf = StringIO()
self.frame.to_csv(buf)
buf.seek(0)
recons = read_csv(buf, index_col=0)
# TODO to_csv drops column name
assert_frame_equal(recons, self.frame, check_names=False)
def test_to_csv_float_format(self):
df = DataFrame([[0.123456, 0.234567, 0.567567],
[12.32112, 123123.2, 321321.2]],
index=['A', 'B'], columns=['X', 'Y', 'Z'])
with ensure_clean() as filename:
df.to_csv(filename, float_format='%.2f')
rs = read_csv(filename, index_col=0)
xp = DataFrame([[0.12, 0.23, 0.57],
[12.32, 123123.20, 321321.20]],
index=['A', 'B'], columns=['X', 'Y', 'Z'])
assert_frame_equal(rs, xp)
def test_to_csv_unicodewriter_quoting(self):
df = DataFrame({'A': [1, 2, 3], 'B': ['foo', 'bar', 'baz']})
buf = StringIO()
df.to_csv(buf, index=False, quoting=csv.QUOTE_NONNUMERIC,
encoding='utf-8')
result = buf.getvalue()
expected = ('"A","B"\n'
'1,"foo"\n'
'2,"bar"\n'
'3,"baz"\n')
assert result == expected
def test_to_csv_quote_none(self):
# GH4328
df = DataFrame({'A': ['hello', '{"hello"}']})
for encoding in (None, 'utf-8'):
buf = StringIO()
df.to_csv(buf, quoting=csv.QUOTE_NONE,
encoding=encoding, index=False)
result = buf.getvalue()
expected = 'A\nhello\n{"hello"}\n'
assert result == expected
def test_to_csv_index_no_leading_comma(self):
df = DataFrame({'A': [1, 2, 3], 'B': [4, 5, 6]},
index=['one', 'two', 'three'])
buf = StringIO()
df.to_csv(buf, index_label=False)
expected = ('A,B\n'
'one,1,4\n'
'two,2,5\n'
'three,3,6\n')
assert buf.getvalue() == expected
def test_to_csv_line_terminators(self):
df = DataFrame({'A': [1, 2, 3], 'B': [4, 5, 6]},
index=['one', 'two', 'three'])
buf = StringIO()
df.to_csv(buf, line_terminator='\r\n')
expected = (',A,B\r\n'
'one,1,4\r\n'
'two,2,5\r\n'
'three,3,6\r\n')
assert buf.getvalue() == expected
buf = StringIO()
df.to_csv(buf) # The default line terminator remains \n
expected = (',A,B\n'
'one,1,4\n'
'two,2,5\n'
'three,3,6\n')
assert buf.getvalue() == expected
def test_to_csv_from_csv_categorical(self):
# CSV with categoricals should result in the same output as when one
# would add a "normal" Series/DataFrame.
s = Series(pd.Categorical(['a', 'b', 'b', 'a', 'a', 'c', 'c', 'c']))
s2 = Series(['a', 'b', 'b', 'a', 'a', 'c', 'c', 'c'])
res = StringIO()
s.to_csv(res)
exp = StringIO()
s2.to_csv(exp)
assert res.getvalue() == exp.getvalue()
df = DataFrame({"s": s})
df2 = DataFrame({"s": s2})
res = StringIO()
df.to_csv(res)
exp = StringIO()
df2.to_csv(exp)
assert res.getvalue() == exp.getvalue()
def test_to_csv_path_is_none(self):
# GH 8215
# Make sure we return string for consistency with
# Series.to_csv()
csv_str = self.frame.to_csv(path_or_buf=None)
assert isinstance(csv_str, str)
recons = pd.read_csv(StringIO(csv_str), index_col=0)
assert_frame_equal(self.frame, recons)
def test_to_csv_compression_gzip(self):
# GH7615
# use the compression kw in to_csv
df = DataFrame([[0.123456, 0.234567, 0.567567],
[12.32112, 123123.2, 321321.2]],
index=['A', 'B'], columns=['X', 'Y', 'Z'])
with ensure_clean() as filename:
df.to_csv(filename, compression="gzip")
# test the round trip - to_csv -> read_csv
rs = read_csv(filename, compression="gzip", index_col=0)
assert_frame_equal(df, rs)
# explicitly make sure file is gziped
import gzip
f = gzip.open(filename, 'rb')
text = f.read().decode('utf8')
f.close()
for col in df.columns:
assert col in text
def test_to_csv_compression_bz2(self):
# GH7615
# use the compression kw in to_csv
df = DataFrame([[0.123456, 0.234567, 0.567567],
[12.32112, 123123.2, 321321.2]],
index=['A', 'B'], columns=['X', 'Y', 'Z'])
with ensure_clean() as filename:
df.to_csv(filename, compression="bz2")
# test the round trip - to_csv -> read_csv
rs = read_csv(filename, compression="bz2", index_col=0)
assert_frame_equal(df, rs)
# explicitly make sure file is bz2ed
import bz2
f = bz2.BZ2File(filename, 'rb')
text = f.read().decode('utf8')
f.close()
for col in df.columns:
assert col in text
def test_to_csv_compression_xz(self):
# GH11852
# use the compression kw in to_csv
tm._skip_if_no_lzma()
df = DataFrame([[0.123456, 0.234567, 0.567567],
[12.32112, 123123.2, 321321.2]],
index=['A', 'B'], columns=['X', 'Y', 'Z'])
with ensure_clean() as filename:
df.to_csv(filename, compression="xz")
# test the round trip - to_csv -> read_csv
rs = read_csv(filename, compression="xz", index_col=0)
assert_frame_equal(df, rs)
# explicitly make sure file is xzipped
lzma = compat.import_lzma()
f = lzma.open(filename, 'rb')
assert_frame_equal(df, read_csv(f, index_col=0))
f.close()
def test_to_csv_compression_value_error(self):
# GH7615
# use the compression kw in to_csv
df = DataFrame([[0.123456, 0.234567, 0.567567],
[12.32112, 123123.2, 321321.2]],
index=['A', 'B'], columns=['X', 'Y', 'Z'])
with ensure_clean() as filename:
# zip compression is not supported and should raise ValueError
import zipfile
pytest.raises(zipfile.BadZipfile, df.to_csv,
filename, compression="zip")
def test_to_csv_date_format(self):
with ensure_clean('__tmp_to_csv_date_format__') as path:
dt_index = self.tsframe.index
datetime_frame = DataFrame(
{'A': dt_index, 'B': dt_index.shift(1)}, index=dt_index)
datetime_frame.to_csv(path, date_format='%Y%m%d')
# Check that the data was put in the specified format
test = read_csv(path, index_col=0)
datetime_frame_int = datetime_frame.applymap(
lambda x: int(x.strftime('%Y%m%d')))
datetime_frame_int.index = datetime_frame_int.index.map(
lambda x: int(x.strftime('%Y%m%d')))
assert_frame_equal(test, datetime_frame_int)
datetime_frame.to_csv(path, date_format='%Y-%m-%d')
# Check that the data was put in the specified format
test = read_csv(path, index_col=0)
datetime_frame_str = datetime_frame.applymap(
lambda x: x.strftime('%Y-%m-%d'))
datetime_frame_str.index = datetime_frame_str.index.map(
lambda x: x.strftime('%Y-%m-%d'))
assert_frame_equal(test, datetime_frame_str)
# Check that columns get converted
datetime_frame_columns = datetime_frame.T
datetime_frame_columns.to_csv(path, date_format='%Y%m%d')
test = read_csv(path, index_col=0)
datetime_frame_columns = datetime_frame_columns.applymap(
lambda x: int(x.strftime('%Y%m%d')))
# Columns don't get converted to ints by read_csv
datetime_frame_columns.columns = (
datetime_frame_columns.columns
.map(lambda x: x.strftime('%Y%m%d')))
assert_frame_equal(test, datetime_frame_columns)
# test NaTs
nat_index = to_datetime(
['NaT'] * 10 + ['2000-01-01', '1/1/2000', '1-1-2000'])
nat_frame = DataFrame({'A': nat_index}, index=nat_index)
nat_frame.to_csv(path, date_format='%Y-%m-%d')
test = read_csv(path, parse_dates=[0, 1], index_col=0)
assert_frame_equal(test, nat_frame)
def test_to_csv_with_dst_transitions(self):
with ensure_clean('csv_date_format_with_dst') as path:
# make sure we are not failing on transitions
times = pd.date_range("2013-10-26 23:00", "2013-10-27 01:00",
tz="Europe/London",
freq="H",
ambiguous='infer')
for i in [times, times + pd.Timedelta('10s')]:
time_range = np.array(range(len(i)), dtype='int64')
df = DataFrame({'A': time_range}, index=i)
df.to_csv(path, index=True)
# we have to reconvert the index as we
# don't parse the tz's
result = read_csv(path, index_col=0)
result.index = to_datetime(result.index).tz_localize(
'UTC').tz_convert('Europe/London')
assert_frame_equal(result, df)
# GH11619
idx = pd.date_range('2015-01-01', '2015-12-31',
freq='H', tz='Europe/Paris')
df = DataFrame({'values': 1, 'idx': idx},
index=idx)
with ensure_clean('csv_date_format_with_dst') as path:
df.to_csv(path, index=True)
result = read_csv(path, index_col=0)
result.index = to_datetime(result.index).tz_localize(
'UTC').tz_convert('Europe/Paris')
result['idx'] = to_datetime(result['idx']).astype(
'datetime64[ns, Europe/Paris]')
assert_frame_equal(result, df)
# assert working
df.astype(str)
with ensure_clean('csv_date_format_with_dst') as path:
df.to_pickle(path)
result = pd.read_pickle(path)
assert_frame_equal(result, df)
def test_to_csv_quoting(self):
df = DataFrame({
'c_string': ['a', 'b,c'],
'c_int': [42, np.nan],
'c_float': [1.0, 3.2],
'c_bool': [True, False],
})
expected = """\
,c_bool,c_float,c_int,c_string
0,True,1.0,42.0,a
1,False,3.2,,"b,c"
"""
result = df.to_csv()
assert result == expected
result = df.to_csv(quoting=None)
assert result == expected
result = df.to_csv(quoting=csv.QUOTE_MINIMAL)
assert result == expected
expected = """\
"","c_bool","c_float","c_int","c_string"
"0","True","1.0","42.0","a"
"1","False","3.2","","b,c"
"""
result = df.to_csv(quoting=csv.QUOTE_ALL)
assert result == expected
# see gh-12922, gh-13259: make sure changes to
# the formatters do not break this behaviour
expected = """\
"","c_bool","c_float","c_int","c_string"
0,True,1.0,42.0,"a"
1,False,3.2,"","b,c"
"""
result = df.to_csv(quoting=csv.QUOTE_NONNUMERIC)
assert result == expected
msg = "need to escape, but no escapechar set"
tm.assert_raises_regex(csv.Error, msg, df.to_csv,
quoting=csv.QUOTE_NONE)
tm.assert_raises_regex(csv.Error, msg, df.to_csv,
quoting=csv.QUOTE_NONE,
escapechar=None)
expected = """\
,c_bool,c_float,c_int,c_string
0,True,1.0,42.0,a
1,False,3.2,,b!,c
"""
result = df.to_csv(quoting=csv.QUOTE_NONE,
escapechar='!')
assert result == expected
expected = """\
,c_bool,c_ffloat,c_int,c_string
0,True,1.0,42.0,a
1,False,3.2,,bf,c
"""
result = df.to_csv(quoting=csv.QUOTE_NONE,
escapechar='f')
assert result == expected
# see gh-3503: quoting Windows line terminators
# presents with encoding?
text = 'a,b,c\n1,"test \r\n",3\n'
df = pd.read_csv(StringIO(text))
buf = StringIO()
df.to_csv(buf, encoding='utf-8', index=False)
assert buf.getvalue() == text
# xref gh-7791: make sure the quoting parameter is passed through
# with multi-indexes
df = pd.DataFrame({'a': [1, 2], 'b': [3, 4], 'c': [5, 6]})
df = df.set_index(['a', 'b'])
expected = '"a","b","c"\n"1","3","5"\n"2","4","6"\n'
assert df.to_csv(quoting=csv.QUOTE_ALL) == expected
def test_period_index_date_overflow(self):
# see gh-15982
dates = ["1990-01-01", "2000-01-01", "3005-01-01"]
index = pd.PeriodIndex(dates, freq="D")
df = pd.DataFrame([4, 5, 6], index=index)
result = df.to_csv()
expected = ',0\n1990-01-01,4\n2000-01-01,5\n3005-01-01,6\n'
assert result == expected
date_format = "%m-%d-%Y"
result = df.to_csv(date_format=date_format)
expected = ',0\n01-01-1990,4\n01-01-2000,5\n01-01-3005,6\n'
assert result == expected
# Overflow with pd.NaT
dates = ["1990-01-01", pd.NaT, "3005-01-01"]
index = pd.PeriodIndex(dates, freq="D")
df = pd.DataFrame([4, 5, 6], index=index)
result = df.to_csv()
expected = ',0\n1990-01-01,4\n,5\n3005-01-01,6\n'
assert result == expected
def test_multi_index_header(self):
# see gh-5539
columns = pd.MultiIndex.from_tuples([("a", 1), ("a", 2),
("b", 1), ("b", 2)])
df = pd.DataFrame([[1, 2, 3, 4], [5, 6, 7, 8]])
df.columns = columns
header = ["a", "b", "c", "d"]
result = df.to_csv(header=header)
expected = ",a,b,c,d\n0,1,2,3,4\n1,5,6,7,8\n"
assert result == expected
| bsd-3-clause |
rs2/pandas | pandas/tests/frame/test_combine_concat.py | 2 | 8607 | from datetime import datetime
import numpy as np
import pytest
import pandas as pd
from pandas import DataFrame, Index, Series, Timestamp, date_range
import pandas._testing as tm
class TestDataFrameConcat:
def test_concat_multiple_frames_dtypes(self):
# GH 2759
A = DataFrame(data=np.ones((10, 2)), columns=["foo", "bar"], dtype=np.float64)
B = DataFrame(data=np.ones((10, 2)), dtype=np.float32)
results = pd.concat((A, B), axis=1).dtypes
expected = Series(
[np.dtype("float64")] * 2 + [np.dtype("float32")] * 2,
index=["foo", "bar", 0, 1],
)
tm.assert_series_equal(results, expected)
def test_concat_multiple_tzs(self):
# GH 12467
# combining datetime tz-aware and naive DataFrames
ts1 = Timestamp("2015-01-01", tz=None)
ts2 = Timestamp("2015-01-01", tz="UTC")
ts3 = Timestamp("2015-01-01", tz="EST")
df1 = DataFrame(dict(time=[ts1]))
df2 = DataFrame(dict(time=[ts2]))
df3 = DataFrame(dict(time=[ts3]))
results = pd.concat([df1, df2]).reset_index(drop=True)
expected = DataFrame(dict(time=[ts1, ts2]), dtype=object)
tm.assert_frame_equal(results, expected)
results = pd.concat([df1, df3]).reset_index(drop=True)
expected = DataFrame(dict(time=[ts1, ts3]), dtype=object)
tm.assert_frame_equal(results, expected)
results = pd.concat([df2, df3]).reset_index(drop=True)
expected = DataFrame(dict(time=[ts2, ts3]))
tm.assert_frame_equal(results, expected)
@pytest.mark.parametrize(
"t1",
[
"2015-01-01",
pytest.param(
pd.NaT,
marks=pytest.mark.xfail(
reason="GH23037 incorrect dtype when concatenating"
),
),
],
)
def test_concat_tz_NaT(self, t1):
# GH 22796
# Concating tz-aware multicolumn DataFrames
ts1 = Timestamp(t1, tz="UTC")
ts2 = Timestamp("2015-01-01", tz="UTC")
ts3 = Timestamp("2015-01-01", tz="UTC")
df1 = DataFrame([[ts1, ts2]])
df2 = DataFrame([[ts3]])
result = pd.concat([df1, df2])
expected = DataFrame([[ts1, ts2], [ts3, pd.NaT]], index=[0, 0])
tm.assert_frame_equal(result, expected)
def test_concat_tz_not_aligned(self):
# GH 22796
ts = pd.to_datetime([1, 2]).tz_localize("UTC")
a = pd.DataFrame({"A": ts})
b = pd.DataFrame({"A": ts, "B": ts})
result = pd.concat([a, b], sort=True, ignore_index=True)
expected = pd.DataFrame(
{"A": list(ts) + list(ts), "B": [pd.NaT, pd.NaT] + list(ts)}
)
tm.assert_frame_equal(result, expected)
def test_concat_tuple_keys(self):
# GH 14438
df1 = pd.DataFrame(np.ones((2, 2)), columns=list("AB"))
df2 = pd.DataFrame(np.ones((3, 2)) * 2, columns=list("AB"))
results = pd.concat((df1, df2), keys=[("bee", "bah"), ("bee", "boo")])
expected = pd.DataFrame(
{
"A": {
("bee", "bah", 0): 1.0,
("bee", "bah", 1): 1.0,
("bee", "boo", 0): 2.0,
("bee", "boo", 1): 2.0,
("bee", "boo", 2): 2.0,
},
"B": {
("bee", "bah", 0): 1.0,
("bee", "bah", 1): 1.0,
("bee", "boo", 0): 2.0,
("bee", "boo", 1): 2.0,
("bee", "boo", 2): 2.0,
},
}
)
tm.assert_frame_equal(results, expected)
def test_concat_named_keys(self):
# GH 14252
df = pd.DataFrame({"foo": [1, 2], "bar": [0.1, 0.2]})
index = Index(["a", "b"], name="baz")
concatted_named_from_keys = pd.concat([df, df], keys=index)
expected_named = pd.DataFrame(
{"foo": [1, 2, 1, 2], "bar": [0.1, 0.2, 0.1, 0.2]},
index=pd.MultiIndex.from_product((["a", "b"], [0, 1]), names=["baz", None]),
)
tm.assert_frame_equal(concatted_named_from_keys, expected_named)
index_no_name = Index(["a", "b"], name=None)
concatted_named_from_names = pd.concat(
[df, df], keys=index_no_name, names=["baz"]
)
tm.assert_frame_equal(concatted_named_from_names, expected_named)
concatted_unnamed = pd.concat([df, df], keys=index_no_name)
expected_unnamed = pd.DataFrame(
{"foo": [1, 2, 1, 2], "bar": [0.1, 0.2, 0.1, 0.2]},
index=pd.MultiIndex.from_product((["a", "b"], [0, 1]), names=[None, None]),
)
tm.assert_frame_equal(concatted_unnamed, expected_unnamed)
def test_concat_axis_parameter(self):
# GH 14369
df1 = pd.DataFrame({"A": [0.1, 0.2]}, index=range(2))
df2 = pd.DataFrame({"A": [0.3, 0.4]}, index=range(2))
# Index/row/0 DataFrame
expected_index = pd.DataFrame({"A": [0.1, 0.2, 0.3, 0.4]}, index=[0, 1, 0, 1])
concatted_index = pd.concat([df1, df2], axis="index")
tm.assert_frame_equal(concatted_index, expected_index)
concatted_row = pd.concat([df1, df2], axis="rows")
tm.assert_frame_equal(concatted_row, expected_index)
concatted_0 = pd.concat([df1, df2], axis=0)
tm.assert_frame_equal(concatted_0, expected_index)
# Columns/1 DataFrame
expected_columns = pd.DataFrame(
[[0.1, 0.3], [0.2, 0.4]], index=[0, 1], columns=["A", "A"]
)
concatted_columns = pd.concat([df1, df2], axis="columns")
tm.assert_frame_equal(concatted_columns, expected_columns)
concatted_1 = pd.concat([df1, df2], axis=1)
tm.assert_frame_equal(concatted_1, expected_columns)
series1 = pd.Series([0.1, 0.2])
series2 = pd.Series([0.3, 0.4])
# Index/row/0 Series
expected_index_series = pd.Series([0.1, 0.2, 0.3, 0.4], index=[0, 1, 0, 1])
concatted_index_series = pd.concat([series1, series2], axis="index")
tm.assert_series_equal(concatted_index_series, expected_index_series)
concatted_row_series = pd.concat([series1, series2], axis="rows")
tm.assert_series_equal(concatted_row_series, expected_index_series)
concatted_0_series = pd.concat([series1, series2], axis=0)
tm.assert_series_equal(concatted_0_series, expected_index_series)
# Columns/1 Series
expected_columns_series = pd.DataFrame(
[[0.1, 0.3], [0.2, 0.4]], index=[0, 1], columns=[0, 1]
)
concatted_columns_series = pd.concat([series1, series2], axis="columns")
tm.assert_frame_equal(concatted_columns_series, expected_columns_series)
concatted_1_series = pd.concat([series1, series2], axis=1)
tm.assert_frame_equal(concatted_1_series, expected_columns_series)
# Testing ValueError
with pytest.raises(ValueError, match="No axis named"):
pd.concat([series1, series2], axis="something")
def test_concat_numerical_names(self):
# #15262 # #12223
df = pd.DataFrame(
{"col": range(9)},
dtype="int32",
index=(
pd.MultiIndex.from_product(
[["A0", "A1", "A2"], ["B0", "B1", "B2"]], names=[1, 2]
)
),
)
result = pd.concat((df.iloc[:2, :], df.iloc[-2:, :]))
expected = pd.DataFrame(
{"col": [0, 1, 7, 8]},
dtype="int32",
index=pd.MultiIndex.from_tuples(
[("A0", "B0"), ("A0", "B1"), ("A2", "B1"), ("A2", "B2")], names=[1, 2]
),
)
tm.assert_frame_equal(result, expected)
def test_concat_astype_dup_col(self):
# gh 23049
df = pd.DataFrame([{"a": "b"}])
df = pd.concat([df, df], axis=1)
result = df.astype("category")
expected = pd.DataFrame(
np.array(["b", "b"]).reshape(1, 2), columns=["a", "a"]
).astype("category")
tm.assert_frame_equal(result, expected)
def test_concat_datetime_datetime64_frame(self):
# #2624
rows = []
rows.append([datetime(2010, 1, 1), 1])
rows.append([datetime(2010, 1, 2), "hi"])
df2_obj = DataFrame.from_records(rows, columns=["date", "test"])
ind = date_range(start="2000/1/1", freq="D", periods=10)
df1 = DataFrame({"date": ind, "test": range(10)})
# it works!
pd.concat([df1, df2_obj])
| bsd-3-clause |
wbinventor/openmc | openmc/data/photon.py | 1 | 43051 | from collections import OrderedDict
from collections.abc import Mapping, Callable
from copy import deepcopy
from io import StringIO
from numbers import Integral, Real
import os
import h5py
import numpy as np
import pandas as pd
from scipy.interpolate import CubicSpline
from openmc.mixin import EqualityMixin
import openmc.checkvalue as cv
from . import HDF5_VERSION
from .ace import Table, get_metadata, get_table
from .data import ATOMIC_SYMBOL, EV_PER_MEV
from .endf import Evaluation, get_head_record, get_tab1_record, get_list_record
from .function import Tabulated1D
# Electron subshell labels
_SUBSHELLS = [None, 'K', 'L1', 'L2', 'L3', 'M1', 'M2', 'M3', 'M4', 'M5',
'N1', 'N2', 'N3', 'N4', 'N5', 'N6', 'N7', 'O1', 'O2', 'O3',
'O4', 'O5', 'O6', 'O7', 'O8', 'O9', 'P1', 'P2', 'P3', 'P4',
'P5', 'P6', 'P7', 'P8', 'P9', 'P10', 'P11','Q1', 'Q2', 'Q3']
_REACTION_NAME = {
501: ('Total photon interaction', 'total'),
502: ('Photon coherent scattering', 'coherent'),
504: ('Photon incoherent scattering', 'incoherent'),
515: ('Pair production, electron field', 'pair_production_electron'),
516: ('Total pair production', 'pair_production_total'),
517: ('Pair production, nuclear field', 'pair_production_nuclear'),
522: ('Photoelectric absorption', 'photoelectric'),
526: ('Electro-atomic scattering', 'electro_atomic_scat'),
527: ('Electro-atomic bremsstrahlung', 'electro_atomic_brem'),
528: ('Electro-atomic excitation', 'electro_atomic_excit'),
534: ('K (1s1/2) subshell photoelectric', 'K'),
535: ('L1 (2s1/2) subshell photoelectric', 'L1'),
536: ('L2 (2p1/2) subshell photoelectric', 'L2'),
537: ('L3 (2p3/2) subshell photoelectric', 'L3'),
538: ('M1 (3s1/2) subshell photoelectric', 'M1'),
539: ('M2 (3p1/2) subshell photoelectric', 'M2'),
540: ('M3 (3p3/2) subshell photoelectric', 'M3'),
541: ('M4 (3d3/2) subshell photoelectric', 'M4'),
542: ('M5 (3d5/2) subshell photoelectric', 'M5'),
543: ('N1 (4s1/2) subshell photoelectric', 'N1'),
544: ('N2 (4p1/2) subshell photoelectric', 'N2'),
545: ('N3 (4p3/2) subshell photoelectric', 'N3'),
546: ('N4 (4d3/2) subshell photoelectric', 'N4'),
547: ('N5 (4d5/2) subshell photoelectric', 'N5'),
548: ('N6 (4f5/2) subshell photoelectric', 'N6'),
549: ('N7 (4f7/2) subshell photoelectric', 'N7'),
550: ('O1 (5s1/2) subshell photoelectric', 'O1'),
551: ('O2 (5p1/2) subshell photoelectric', 'O2'),
552: ('O3 (5p3/2) subshell photoelectric', 'O3'),
553: ('O4 (5d3/2) subshell photoelectric', 'O4'),
554: ('O5 (5d5/2) subshell photoelectric', 'O5'),
555: ('O6 (5f5/2) subshell photoelectric', 'O6'),
556: ('O7 (5f7/2) subshell photoelectric', 'O7'),
557: ('O8 (5g7/2) subshell photoelectric', 'O8'),
558: ('O9 (5g9/2) subshell photoelectric', 'O9'),
559: ('P1 (6s1/2) subshell photoelectric', 'P1'),
560: ('P2 (6p1/2) subshell photoelectric', 'P2'),
561: ('P3 (6p3/2) subshell photoelectric', 'P3'),
562: ('P4 (6d3/2) subshell photoelectric', 'P4'),
563: ('P5 (6d5/2) subshell photoelectric', 'P5'),
564: ('P6 (6f5/2) subshell photoelectric', 'P6'),
565: ('P7 (6f7/2) subshell photoelectric', 'P7'),
566: ('P8 (6g7/2) subshell photoelectric', 'P8'),
567: ('P9 (6g9/2) subshell photoelectric', 'P9'),
568: ('P10 (6h9/2) subshell photoelectric', 'P10'),
569: ('P11 (6h11/2) subshell photoelectric', 'P11'),
570: ('Q1 (7s1/2) subshell photoelectric', 'Q1'),
571: ('Q2 (7p1/2) subshell photoelectric', 'Q2'),
572: ('Q3 (7p3/2) subshell photoelectric', 'Q3')
}
# Compton profiles are read from a pre-generated HDF5 file when they are first
# needed. The dictionary stores an array of electron momentum values (at which
# the profiles are tabulated) with the key 'pz' and the profile for each element
# is a 2D array with shape (n_shells, n_momentum_values) stored on the key Z
_COMPTON_PROFILES = {}
# Scaled bremsstrahlung DCSs are read from a data file provided by Selzter and
# Berger when they are first needed. The dictionary stores an array of n
# incident electron kinetic energies with key 'electron_energies', an array of
# k reduced photon energies with key 'photon_energies', and the cross sections
# for each element are in a 2D array with shape (n, k) stored on the key 'Z'.
# It also stores data used for calculating the density effect correction and
# stopping power, namely, the mean excitation energy with the key 'I', number
# of electrons per subshell with the key 'num_electrons', and binding energies
# with the key 'ionization_energy'.
_BREMSSTRAHLUNG = {}
class AtomicRelaxation(EqualityMixin):
"""Atomic relaxation data.
This class stores the binding energy, number of electrons, and electron
transitions possible from ioniziation for each electron subshell of an
atom. All of the data originates from an ENDF-6 atomic relaxation
sub-library (NSUB=6). Instances of this class are not normally instantiated
directly but rather created using the factory method
:math:`AtomicRelaxation.from_endf`.
Parameters
----------
binding_energy : dict
Dictionary indicating the binding energy in eV (values) for given
subshells (keys). The subshells should be given as strings, e.g., 'K',
'L1', 'L2', etc.
num_electrons : dict
Dictionary indicating the number of electrons in a subshell when neutral
(values) for given subshells (keys). The subshells should be given as
strings, e.g., 'K', 'L1', 'L2', etc.
transitions : pandas.DataFrame
Dictionary indicating allowed transitions and their probabilities
(values) for given subshells (keys). The subshells should be given as
strings, e.g., 'K', 'L1', 'L2', etc. The transitions are represented as
a DataFrame with columns indicating the secondary and tertiary subshell,
the energy of the transition in eV, and the fractional probability of
the transition.
Attributes
----------
binding_energy : dict
Dictionary indicating the binding energy in eV (values) for given
subshells (keys). The subshells should be given as strings, e.g., 'K',
'L1', 'L2', etc.
num_electrons : dict
Dictionary indicating the number of electrons in a subshell when neutral
(values) for given subshells (keys). The subshells should be given as
strings, e.g., 'K', 'L1', 'L2', etc.
transitions : pandas.DataFrame
Dictionary indicating allowed transitions and their probabilities
(values) for given subshells (keys). The subshells should be given as
strings, e.g., 'K', 'L1', 'L2', etc. The transitions are represented as
a DataFrame with columns indicating the secondary and tertiary subshell,
the energy of the transition in eV, and the fractional probability of
the transition.
See Also
--------
IncidentPhoton
"""
def __init__(self, binding_energy, num_electrons, transitions):
self.binding_energy = binding_energy
self.num_electrons = num_electrons
self.transitions = transitions
@property
def binding_energy(self):
return self._binding_energy
@property
def num_electrons(self):
return self._num_electrons
@property
def subshells(self):
return list(sorted(self.binding_energy.keys()))
@property
def transitions(self):
return self._transitions
@binding_energy.setter
def binding_energy(self, binding_energy):
cv.check_type('binding energies', binding_energy, Mapping)
for subshell, energy in binding_energy.items():
cv.check_value('subshell', subshell, _SUBSHELLS)
cv.check_type('binding energy', energy, Real)
cv.check_greater_than('binding energy', energy, 0.0, True)
self._binding_energy = binding_energy
@num_electrons.setter
def num_electrons(self, num_electrons):
cv.check_type('number of electrons', num_electrons, Mapping)
for subshell, num in num_electrons.items():
cv.check_value('subshell', subshell, _SUBSHELLS)
cv.check_type('number of electrons', num, Real)
cv.check_greater_than('number of electrons', num, 0.0, True)
self._num_electrons = num_electrons
@transitions.setter
def transitions(self, transitions):
cv.check_type('transitions', transitions, Mapping)
for subshell, df in transitions.items():
cv.check_value('subshell', subshell, _SUBSHELLS)
cv.check_type('transitions', df, pd.DataFrame)
self._transitions = transitions
@classmethod
def from_ace(cls, ace):
"""Generate atomic relaxation data from an ACE file
Parameters
----------
ace : openmc.data.ace.Table
ACE table to read from
Returns
-------
openmc.data.AtomicRelaxation
Atomic relaxation data
"""
# Create data dictionaries
binding_energy = {}
num_electrons = {}
transitions = {}
# Get shell designators
n = ace.nxs[7]
idx = ace.jxs[11]
shells = [_SUBSHELLS[int(i)] for i in ace.xss[idx : idx+n]]
# Get number of electrons for each shell
idx = ace.jxs[12]
for shell, num in zip(shells, ace.xss[idx : idx+n]):
num_electrons[shell] = num
# Get binding energy for each shell
idx = ace.jxs[13]
for shell, e in zip(shells, ace.xss[idx : idx+n]):
binding_energy[shell] = e*EV_PER_MEV
# Get transition table
columns = ['secondary', 'tertiary', 'energy (eV)', 'probability']
idx = ace.jxs[18]
for i, subi in enumerate(shells):
n_transitions = int(ace.xss[ace.jxs[15] + i])
if n_transitions > 0:
records = []
for j in range(n_transitions):
subj = _SUBSHELLS[int(ace.xss[idx])]
subk = _SUBSHELLS[int(ace.xss[idx + 1])]
etr = ace.xss[idx + 2]*EV_PER_MEV
if j == 0:
ftr = ace.xss[idx + 3]
else:
ftr = ace.xss[idx + 3] - ace.xss[idx - 1]
records.append((subj, subk, etr, ftr))
idx += 4
# Create dataframe for transitions
transitions[subi] = pd.DataFrame.from_records(
records, columns=columns)
return cls(binding_energy, num_electrons, transitions)
@classmethod
def from_endf(cls, ev_or_filename):
"""Generate atomic relaxation data from an ENDF evaluation
Parameters
----------
ev_or_filename : str or openmc.data.endf.Evaluation
ENDF atomic relaxation evaluation to read from. If given as a
string, it is assumed to be the filename for the ENDF file.
Returns
-------
openmc.data.AtomicRelaxation
Atomic relaxation data
"""
if isinstance(ev_or_filename, Evaluation):
ev = ev_or_filename
else:
ev = Evaluation(ev_or_filename)
# Atomic relaxation data is always MF=28, MT=533
if (28, 533) not in ev.section:
raise IOError('{} does not appear to be an atomic relaxation '
'sublibrary.'.format(ev))
# Determine number of subshells
file_obj = StringIO(ev.section[28, 533])
params = get_head_record(file_obj)
n_subshells = params[4]
# Create data dictionaries
binding_energy = {}
num_electrons = {}
transitions = {}
columns = ['secondary', 'tertiary', 'energy (eV)', 'probability']
# Read data for each subshell
for i in range(n_subshells):
params, list_items = get_list_record(file_obj)
subi = _SUBSHELLS[int(params[0])]
n_transitions = int(params[5])
binding_energy[subi] = list_items[0]
num_electrons[subi] = list_items[1]
if n_transitions > 0:
# Read transition data
records = []
for j in range(n_transitions):
subj = _SUBSHELLS[int(list_items[6*(j+1)])]
subk = _SUBSHELLS[int(list_items[6*(j+1) + 1])]
etr = list_items[6*(j+1) + 2]
ftr = list_items[6*(j+1) + 3]
records.append((subj, subk, etr, ftr))
# Create dataframe for transitions
transitions[subi] = pd.DataFrame.from_records(
records, columns=columns)
# Return instance of class
return cls(binding_energy, num_electrons, transitions)
@classmethod
def from_hdf5(cls, group):
"""Generate atomic relaxation data from an HDF5 group
Parameters
----------
group : h5py.Group
HDF5 group to read from
Returns
-------
openmc.data.AtomicRelaxation
Atomic relaxation data
"""
# Create data dictionaries
binding_energy = {}
num_electrons = {}
transitions = {}
designators = [s.decode() for s in group.attrs['designators']]
columns = ['secondary', 'tertiary', 'energy (eV)', 'probability']
for shell in designators:
# Shell group
sub_group = group[shell]
# Read subshell binding energy and number of electrons
if 'binding_energy' in sub_group.attrs:
binding_energy[shell] = sub_group.attrs['binding_energy']
if 'num_electrons' in sub_group.attrs:
num_electrons[shell] = sub_group.attrs['num_electrons']
# Read transition data
if 'transitions' in sub_group:
df = pd.DataFrame(sub_group['transitions'][()],
columns=columns)
# Replace float indexes back to subshell strings
df[columns[:2]] = df[columns[:2]].replace(
np.arange(float(len(_SUBSHELLS))), _SUBSHELLS)
transitions[shell] = df
return cls(binding_energy, num_electrons, transitions)
def to_hdf5(self, group, shell):
"""Write atomic relaxation data to an HDF5 group
Parameters
----------
group : h5py.Group
HDF5 group to write to
shell : str
The subshell to write data for
"""
# Write subshell binding energy and number of electrons
group.attrs['binding_energy'] = self.binding_energy[shell]
group.attrs['num_electrons'] = self.num_electrons[shell]
# Write transition data with replacements
if shell in self.transitions:
df = self.transitions[shell].replace(
_SUBSHELLS, range(len(_SUBSHELLS)))
group.create_dataset('transitions', data=df.values.astype(float))
class IncidentPhoton(EqualityMixin):
r"""Photon interaction data.
This class stores photo-atomic, photo-nuclear, atomic relaxation,
Compton profile, stopping power, and bremsstrahlung data assembled from
different sources. To create an instance, the factory method
:meth:`IncidentPhoton.from_endf` can be used. To add atomic relaxation or
Compton profile data, set the :attr:`IncidentPhoton.atomic_relaxation` and
:attr:`IncidentPhoton.compton_profiles` attributes directly.
Parameters
----------
atomic_number : int
Number of protons in the target nucleus
Attributes
----------
atomic_number : int
Number of protons in the target nucleus
atomic_relaxation : openmc.data.AtomicRelaxation or None
Atomic relaxation data
bremsstrahlung : dict
Dictionary of bremsstrahlung data with keys 'I' (mean excitation energy
in [eV]), 'num_electrons' (number of electrons in each subshell),
'ionization_energy' (ionization potential of each subshell),
'electron_energy' (incident electron kinetic energy values in [eV]),
'photon_energy' (ratio of the energy of the emitted photon to the
incident electron kinetic energy), and 'dcs' (cross section values in
[b]). The cross sections are in scaled form: :math:`(\beta^2/Z^2) E_k
(d\sigma/dE_k)`, where :math:`E_k` is the energy of the emitted photon.
A negative number of electrons in a subshell indicates conduction
electrons.
compton_profiles : dict
Dictionary of Compton profile data with keys 'num_electrons' (number of
electrons in each subshell), 'binding_energy' (ionization potential of
each subshell), and 'J' (Hartree-Fock Compton profile as a function of
the projection of the electron momentum on the scattering vector,
:math:`p_z` for each subshell). Note that subshell occupancies may not
match the atomic relaxation data.
reactions : collections.OrderedDict
Contains the cross sections for each photon reaction. The keys are MT
values and the values are instances of :class:`PhotonReaction`.
"""
def __init__(self, atomic_number):
self.atomic_number = atomic_number
self._atomic_relaxation = None
self.reactions = OrderedDict()
self.compton_profiles = {}
self.bremsstrahlung = {}
def __contains__(self, mt):
return mt in self.reactions
def __getitem__(self, mt):
if mt in self.reactions:
return self.reactions[mt]
else:
raise KeyError('No reaction with MT={}.'.format(mt))
def __repr__(self):
return "<IncidentPhoton: {}>".format(self.name)
def __iter__(self):
return iter(self.reactions.values())
@property
def atomic_number(self):
return self._atomic_number
@property
def atomic_relaxation(self):
return self._atomic_relaxation
@property
def name(self):
return ATOMIC_SYMBOL[self.atomic_number]
@atomic_number.setter
def atomic_number(self, atomic_number):
cv.check_type('atomic number', atomic_number, Integral)
cv.check_greater_than('atomic number', atomic_number, 0, True)
self._atomic_number = atomic_number
@atomic_relaxation.setter
def atomic_relaxation(self, atomic_relaxation):
cv.check_type('atomic relaxation data', atomic_relaxation,
AtomicRelaxation)
self._atomic_relaxation = atomic_relaxation
@classmethod
def from_ace(cls, ace_or_filename):
"""Generate incident photon data from an ACE table
Parameters
----------
ace_or_filename : str or openmc.data.ace.Table
ACE table to read from. If given as a string, it is assumed to be
the filename for the ACE file.
Returns
-------
openmc.data.IncidentPhoton
Photon interaction data
"""
# First obtain the data for the first provided ACE table/file
if isinstance(ace_or_filename, Table):
ace = ace_or_filename
else:
ace = get_table(ace_or_filename)
# Get atomic number based on name of ACE table
zaid = ace.name.split('.')[0]
Z = get_metadata(int(zaid))[2]
# Read each reaction
data = cls(Z)
for mt in (502, 504, 515, 522):
data.reactions[mt] = PhotonReaction.from_ace(ace, mt)
# Compton profiles
n_shell = ace.nxs[5]
if n_shell != 0:
# Get number of electrons in each shell
idx = ace.jxs[6]
data.compton_profiles['num_electrons'] = ace.xss[idx : idx+n_shell]
# Get binding energy for each shell
idx = ace.jxs[7]
e = ace.xss[idx : idx+n_shell]*EV_PER_MEV
data.compton_profiles['binding_energy'] = e
# Create Compton profile for each electron shell
profiles = []
for k in range(n_shell):
# Get number of momentum values and interpolation scheme
loca = int(ace.xss[ace.jxs[9] + k])
jj = int(ace.xss[ace.jxs[10] + loca - 1])
m = int(ace.xss[ace.jxs[10] + loca])
# Read momentum and PDF
idx = ace.jxs[10] + loca + 1
pz = ace.xss[idx : idx+m]
pdf = ace.xss[idx+m : idx+2*m]
# Create proflie function
J_k = Tabulated1D(pz, pdf, [m], [jj])
profiles.append(J_k)
data.compton_profiles['J'] = profiles
# Subshell photoelectric xs and atomic relaxation data
if ace.nxs[7] > 0:
data.atomic_relaxation = AtomicRelaxation.from_ace(ace)
# Get subshell designators
n_subshells = ace.nxs[7]
idx = ace.jxs[11]
designators = [int(i) for i in ace.xss[idx : idx+n_subshells]]
# Get energy grid for subshell photoionization
n_energy = ace.nxs[3]
idx = ace.jxs[1]
energy = np.exp(ace.xss[idx : idx+n_energy])*EV_PER_MEV
# Get cross section for each subshell
idx = ace.jxs[16]
for d in designators:
# Create photon reaction
mt = 533 + d
rx = PhotonReaction(mt)
data.reactions[mt] = rx
# Store cross section
xs = ace.xss[idx : idx+n_energy].copy()
nonzero = (xs != 0.0)
xs[nonzero] = np.exp(xs[nonzero])
rx.xs = Tabulated1D(energy, xs, [n_energy], [5])
idx += n_energy
# Copy binding energy
shell = _SUBSHELLS[d]
e = data.atomic_relaxation.binding_energy[shell]
rx.subshell_binding_energy = e
# Add bremsstrahlung DCS data
data._add_bremsstrahlung()
return data
@classmethod
def from_endf(cls, photoatomic, relaxation=None):
"""Generate incident photon data from an ENDF evaluation
Parameters
----------
photoatomic : str or openmc.data.endf.Evaluation
ENDF photoatomic data evaluation to read from. If given as a string,
it is assumed to be the filename for the ENDF file.
relaxation : str or openmc.data.endf.Evaluation, optional
ENDF atomic relaxation data evaluation to read from. If given as a
string, it is assumed to be the filename for the ENDF file.
Returns
-------
openmc.data.IncidentPhoton
Photon interaction data
"""
if isinstance(photoatomic, Evaluation):
ev = photoatomic
else:
ev = Evaluation(photoatomic)
Z = ev.target['atomic_number']
data = cls(Z)
# Read each reaction
for mf, mt, nc, mod in ev.reaction_list:
if mf == 23:
data.reactions[mt] = PhotonReaction.from_endf(ev, mt)
# Add atomic relaxation data if it hasn't been added already
if relaxation is not None:
data.atomic_relaxation = AtomicRelaxation.from_endf(relaxation)
# If Compton profile data hasn't been loaded, do so
if not _COMPTON_PROFILES:
filename = os.path.join(os.path.dirname(__file__), 'compton_profiles.h5')
with h5py.File(filename, 'r') as f:
_COMPTON_PROFILES['pz'] = f['pz'][()]
for i in range(1, 101):
group = f['{:03}'.format(i)]
num_electrons = group['num_electrons'][()]
binding_energy = group['binding_energy'][()]*EV_PER_MEV
J = group['J'][()]
_COMPTON_PROFILES[i] = {'num_electrons': num_electrons,
'binding_energy': binding_energy,
'J': J}
# Add Compton profile data
pz = _COMPTON_PROFILES['pz']
profile = _COMPTON_PROFILES[Z]
data.compton_profiles['num_electrons'] = profile['num_electrons']
data.compton_profiles['binding_energy'] = profile['binding_energy']
data.compton_profiles['J'] = [Tabulated1D(pz, J_k) for J_k in profile['J']]
# Add bremsstrahlung DCS data
data._add_bremsstrahlung()
return data
@classmethod
def from_hdf5(cls, group_or_filename):
"""Generate photon reaction from an HDF5 group
Parameters
----------
group_or_filename : h5py.Group or str
HDF5 group containing interaction data. If given as a string, it is
assumed to be the filename for the HDF5 file, and the first group is
used to read from.
Returns
-------
openmc.data.IncidentPhoton
Photon interaction data
"""
if isinstance(group_or_filename, h5py.Group):
group = group_or_filename
else:
h5file = h5py.File(str(group_or_filename), 'r')
# Make sure version matches
if 'version' in h5file.attrs:
major, minor = h5file.attrs['version']
# For now all versions of HDF5 data can be read
else:
raise IOError(
'HDF5 data does not indicate a version. Your installation '
'of the OpenMC Python API expects version {}.x data.'
.format(HDF5_VERSION_MAJOR))
group = list(h5file.values())[0]
Z = group.attrs['Z']
data = cls(Z)
# Read energy grid
energy = group['energy'][()]
# Read cross section data
for mt, (name, key) in _REACTION_NAME.items():
if key in group:
rgroup = group[key]
elif key in group['subshells']:
rgroup = group['subshells'][key]
else:
continue
data.reactions[mt] = PhotonReaction.from_hdf5(rgroup, mt, energy)
# Check for necessary reactions
for mt in (502, 504, 522):
assert mt in data, "Reaction {} not found".format(mt)
# Read atomic relaxation
data.atomic_relaxation = AtomicRelaxation.from_hdf5(group['subshells'])
# Read Compton profiles
if 'compton_profiles' in group:
rgroup = group['compton_profiles']
profile = data.compton_profiles
profile['num_electrons'] = rgroup['num_electrons'][()]
profile['binding_energy'] = rgroup['binding_energy'][()]
# Get electron momentum values
pz = rgroup['pz'][()]
J = rgroup['J'][()]
if pz.size != J.shape[1]:
raise ValueError("'J' array shape is not consistent with the "
"'pz' array shape")
profile['J'] = [Tabulated1D(pz, Jk) for Jk in J]
# Read bremsstrahlung
if 'bremsstrahlung' in group:
rgroup = group['bremsstrahlung']
data.bremsstrahlung['I'] = rgroup.attrs['I']
for key in ('dcs', 'electron_energy', 'ionization_energy',
'num_electrons', 'photon_energy'):
data.bremsstrahlung[key] = rgroup[key][()]
return data
def export_to_hdf5(self, path, mode='a', libver='earliest'):
"""Export incident photon data to an HDF5 file.
Parameters
----------
path : str
Path to write HDF5 file to
mode : {'r', r+', 'w', 'x', 'a'}
Mode that is used to open the HDF5 file. This is the second argument
to the :class:`h5py.File` constructor.
libver : {'earliest', 'latest'}
Compatibility mode for the HDF5 file. 'latest' will produce files
that are less backwards compatible but have performance benefits.
"""
# Open file and write version
f = h5py.File(str(path), mode, libver=libver)
f.attrs['filetype'] = np.string_('data_photon')
if 'version' not in f.attrs:
f.attrs['version'] = np.array(HDF5_VERSION)
group = f.create_group(self.name)
group.attrs['Z'] = Z = self.atomic_number
# Determine union energy grid
union_grid = np.array([])
for rx in self:
union_grid = np.union1d(union_grid, rx.xs.x)
group.create_dataset('energy', data=union_grid)
# Write cross sections
shell_group = group.create_group('subshells')
designators = []
for mt, rx in self.reactions.items():
name, key = _REACTION_NAME[mt]
if mt in [502, 504, 515, 517, 522]:
sub_group = group.create_group(key)
elif mt >= 534 and mt <= 572:
# Subshell
designators.append(key)
sub_group = shell_group.create_group(key)
# Write atomic relaxation
if key in self.atomic_relaxation.subshells:
self.atomic_relaxation.to_hdf5(sub_group, key)
else:
continue
rx.to_hdf5(sub_group, union_grid, Z)
shell_group.attrs['designators'] = np.array(designators, dtype='S')
# Write Compton profiles
if self.compton_profiles:
compton_group = group.create_group('compton_profiles')
profile = self.compton_profiles
compton_group.create_dataset('num_electrons',
data=profile['num_electrons'])
compton_group.create_dataset('binding_energy',
data=profile['binding_energy'])
# Get electron momentum values
compton_group.create_dataset('pz', data=profile['J'][0].x)
# Create/write 2D array of profiles
J = np.array([Jk.y for Jk in profile['J']])
compton_group.create_dataset('J', data=J)
# Write bremsstrahlung
if self.bremsstrahlung:
brem_group = group.create_group('bremsstrahlung')
for key, value in self.bremsstrahlung.items():
if key == 'I':
brem_group.attrs[key] = value
else:
brem_group.create_dataset(key, data=value)
def _add_bremsstrahlung(self):
"""Add the data used in the thick-target bremsstrahlung approximation
"""
# Load bremsstrahlung data if it has not yet been loaded
if not _BREMSSTRAHLUNG:
# Add data used for density effect correction
filename = os.path.join(os.path.dirname(__file__), 'density_effect.h5')
with h5py.File(filename, 'r') as f:
for i in range(1, 101):
group = f['{:03}'.format(i)]
_BREMSSTRAHLUNG[i] = {
'I': group.attrs['I'],
'num_electrons': group['num_electrons'][()],
'ionization_energy': group['ionization_energy'][()]
}
filename = os.path.join(os.path.dirname(__file__), 'BREMX.DAT')
brem = open(filename, 'r').read().split()
# Incident electron kinetic energy grid in eV
_BREMSSTRAHLUNG['electron_energy'] = np.logspace(3, 9, 200)
log_energy = np.log(_BREMSSTRAHLUNG['electron_energy'])
# Get number of tabulated electron and photon energy values
n = int(brem[37])
k = int(brem[38])
# Index in data
p = 39
# Get log of incident electron kinetic energy values, used for
# cubic spline interpolation in log energy. Units are in MeV, so
# convert to eV.
logx = np.log(np.fromiter(brem[p:p+n], float, n)*EV_PER_MEV)
p += n
# Get reduced photon energy values
_BREMSSTRAHLUNG['photon_energy'] = np.fromiter(brem[p:p+k], float, k)
p += k
for i in range(1, 101):
dcs = np.empty([len(log_energy), k])
# Get the scaled cross section values for each electron energy
# and reduced photon energy for this Z. Units are in mb, so
# convert to b.
y = np.reshape(np.fromiter(brem[p:p+n*k], float, n*k), (n, k))*1.0e-3
p += k*n
for j in range(k):
# Cubic spline interpolation in log energy and linear DCS
cs = CubicSpline(logx, y[:,j])
# Get scaled DCS values (millibarns) on new energy grid
dcs[:,j] = cs(log_energy)
_BREMSSTRAHLUNG[i]['dcs'] = dcs
# Add bremsstrahlung DCS data
self.bremsstrahlung['electron_energy'] = _BREMSSTRAHLUNG['electron_energy']
self.bremsstrahlung['photon_energy'] = _BREMSSTRAHLUNG['photon_energy']
self.bremsstrahlung.update(_BREMSSTRAHLUNG[self.atomic_number])
class PhotonReaction(EqualityMixin):
"""Photon-induced reaction
Parameters
----------
mt : int
The ENDF MT number for this reaction.
Attributes
----------
anomalous_real : openmc.data.Tabulated1D
Real part of the anomalous scattering factor
anomlaous_imag : openmc.data.Tabulated1D
Imaginary part of the anomalous scatttering factor
mt : int
The ENDF MT number for this reaction.
scattering_factor : openmc.data.Tabulated1D
Coherent or incoherent form factor.
xs : Callable
Cross section as a function of incident photon energy
"""
def __init__(self, mt):
self.mt = mt
self._xs = None
self._scattering_factor = None
self._anomalous_real = None
self._anomalous_imag = None
def __repr__(self):
if self.mt in _REACTION_NAME:
return "<Photon Reaction: MT={} {}>".format(
self.mt, _REACTION_NAME[self.mt][0])
else:
return "<Photon Reaction: MT={}>".format(self.mt)
@property
def anomalous_real(self):
return self._anomalous_real
@property
def anomalous_imag(self):
return self._anomalous_imag
@property
def scattering_factor(self):
return self._scattering_factor
@property
def xs(self):
return self._xs
@anomalous_real.setter
def anomalous_real(self, anomalous_real):
cv.check_type('real part of anomalous scattering factor',
anomalous_real, Callable)
self._anomalous_real = anomalous_real
@anomalous_imag.setter
def anomalous_imag(self, anomalous_imag):
cv.check_type('imaginary part of anomalous scattering factor',
anomalous_imag, Callable)
self._anomalous_imag = anomalous_imag
@scattering_factor.setter
def scattering_factor(self, scattering_factor):
cv.check_type('scattering factor', scattering_factor, Callable)
self._scattering_factor = scattering_factor
@xs.setter
def xs(self, xs):
cv.check_type('reaction cross section', xs, Callable)
self._xs = xs
@classmethod
def from_ace(cls, ace, mt):
"""Generate photon reaction from an ACE table
Parameters
----------
ace : openmc.data.ace.Table
ACE table to read from
mt : int
The MT value of the reaction to get data for
Returns
-------
openmc.data.PhotonReaction
Photon reaction data
"""
# Create instance
rx = cls(mt)
# Get energy grid (stored as logarithms)
n = ace.nxs[3]
idx = ace.jxs[1]
energy = np.exp(ace.xss[idx : idx+n])*EV_PER_MEV
# Get index for appropriate reaction
if mt == 502:
# Coherent scattering
idx = ace.jxs[1] + 2*n
elif mt == 504:
# Incoherent scattering
idx = ace.jxs[1] + n
elif mt == 515:
# Pair production
idx = ace.jxs[1] + 4*n
elif mt == 522:
# Photoelectric
idx = ace.jxs[1] + 3*n
else:
raise ValueError('ACE photoatomic cross sections do not have '
'data for MT={}.'.format(mt))
# Store cross section
xs = ace.xss[idx : idx+n].copy()
nonzero = (xs != 0.0)
xs[nonzero] = np.exp(xs[nonzero])
rx.xs = Tabulated1D(energy, xs, [n], [5])
# Get form factors for incoherent/coherent scattering
new_format = (ace.nxs[6] > 0)
if mt == 502:
idx = ace.jxs[3]
if new_format:
n = (ace.jxs[4] - ace.jxs[3]) // 3
x = ace.xss[idx : idx+n]
idx += n
else:
x = np.array([
0.0, 0.01, 0.02, 0.03, 0.04, 0.05, 0.06, 0.08, 0.1, 0.12,
0.15, 0.18, 0.2, 0.25, 0.3, 0.35, 0.4, 0.45, 0.5, 0.55,
0.6, 0.7, 0.8, 0.9, 1.0, 1.1, 1.2, 1.3, 1.4, 1.5, 1.6,
1.7, 1.8, 1.9, 2.0, 2.2, 2.4, 2.6, 2.8, 3.0, 3.2, 3.4,
3.6, 3.8, 4.0, 4.2, 4.4, 4.6, 4.8, 5.0, 5.2, 5.4, 5.6,
5.8, 6.0])
n = x.size
ff = ace.xss[idx+n : idx+2*n]
rx.scattering_factor = Tabulated1D(x, ff)
elif mt == 504:
idx = ace.jxs[2]
if new_format:
n = (ace.jxs[3] - ace.jxs[2]) // 2
x = ace.xss[idx : idx+n]
idx += n
else:
x = np.array([
0.0, 0.005, 0.01, 0.05, 0.1, 0.15, 0.2, 0.3, 0.4, 0.5, 0.6,
0.7, 0.8, 0.9, 1.0, 1.5, 2.0, 3.0, 4.0, 5.0, 8.0
])
n = x.size
ff = ace.xss[idx : idx+n]
rx.scattering_factor = Tabulated1D(x, ff)
return rx
@classmethod
def from_endf(cls, ev, mt):
"""Generate photon reaction from an ENDF evaluation
Parameters
----------
ev : openmc.data.endf.Evaluation
ENDF photo-atomic interaction data evaluation
mt : int
The MT value of the reaction to get data for
Returns
-------
openmc.data.PhotonReaction
Photon reaction data
"""
rx = cls(mt)
# Read photon cross section
if (23, mt) in ev.section:
file_obj = StringIO(ev.section[23, mt])
get_head_record(file_obj)
params, rx.xs = get_tab1_record(file_obj)
# Set subshell binding energy and/or fluorescence yield
if mt >= 534 and mt <= 599:
rx.subshell_binding_energy = params[0]
if mt >= 534 and mt <= 572:
rx.fluorescence_yield = params[1]
# Read form factors / scattering functions
if (27, mt) in ev.section:
file_obj = StringIO(ev.section[27, mt])
get_head_record(file_obj)
params, rx.scattering_factor = get_tab1_record(file_obj)
# Check for anomalous scattering factor
if mt == 502:
if (27, 506) in ev.section:
file_obj = StringIO(ev.section[27, 506])
get_head_record(file_obj)
params, rx.anomalous_real = get_tab1_record(file_obj)
if (27, 505) in ev.section:
file_obj = StringIO(ev.section[27, 505])
get_head_record(file_obj)
params, rx.anomalous_imag = get_tab1_record(file_obj)
return rx
@classmethod
def from_hdf5(cls, group, mt, energy):
"""Generate photon reaction from an HDF5 group
Parameters
----------
group : h5py.Group
HDF5 group to read from
mt : int
The MT value of the reaction to get data for
energy : Iterable of float
arrays of energies at which cross sections are tabulated at
Returns
-------
openmc.data.PhotonReaction
Photon reaction data
"""
# Create instance
rx = cls(mt)
# Cross sections
xs = group['xs'][()]
# Replace zero elements to small non-zero to enable log-log
xs[xs == 0.0] = np.exp(-500.0)
# Threshold
threshold_idx = 0
if 'threshold_idx' in group['xs'].attrs:
threshold_idx = group['xs'].attrs['threshold_idx']
# Store cross section
rx.xs = Tabulated1D(energy[threshold_idx:], xs, [len(xs)], [5])
# Check for anomalous scattering factor
if 'anomalous_real' in group:
rx.anomalous_real = Tabulated1D.from_hdf5(group['anomalous_real'])
if 'anomalous_imag' in group:
rx.anomalous_imag = Tabulated1D.from_hdf5(group['anomalous_imag'])
# Check for factors / scattering functions
if 'scattering_factor' in group:
rx.scattering_factor = Tabulated1D.from_hdf5(group['scattering_factor'])
return rx
def to_hdf5(self, group, energy, Z):
"""Write photon reaction to an HDF5 group
Parameters
----------
group : h5py.Group
HDF5 group to write to
energy : Iterable of float
arrays of energies at which cross sections are tabulated at
Z : int
atomic number
"""
# Write cross sections
if self.mt >= 534 and self.mt <= 572:
# Determine threshold
threshold = self.xs.x[0]
idx = np.searchsorted(energy, threshold, side='right') - 1
# Interpolate cross section onto union grid and write
photoionization = self.xs(energy[idx:])
group.create_dataset('xs', data=photoionization)
assert len(energy) == len(photoionization) + idx
group['xs'].attrs['threshold_idx'] = idx
else:
group.create_dataset('xs', data=self.xs(energy))
# Write scattering factor
if self.scattering_factor is not None:
if self.mt == 502:
# Create integrated form factor
ff = deepcopy(self.scattering_factor)
ff.x *= ff.x
ff.y *= ff.y/Z**2
int_ff = Tabulated1D(ff.x, ff.integral())
int_ff.to_hdf5(group, 'integrated_scattering_factor')
self.scattering_factor.to_hdf5(group, 'scattering_factor')
if self.anomalous_real is not None:
self.anomalous_real.to_hdf5(group, 'anomalous_real')
if self.anomalous_imag is not None:
self.anomalous_imag.to_hdf5(group, 'anomalous_imag')
| mit |
ankurankan/scikit-learn | examples/applications/topics_extraction_with_nmf.py | 106 | 2313 | """
========================================================
Topics extraction with Non-Negative Matrix Factorization
========================================================
This is a proof of concept application of Non Negative Matrix
Factorization of the term frequency matrix of a corpus of documents so
as to extract an additive model of the topic structure of the corpus.
The output is a list of topics, each represented as a list of terms
(weights are not shown).
The default parameters (n_samples / n_features / n_topics) should make
the example runnable in a couple of tens of seconds. You can try to
increase the dimensions of the problem, but be aware than the time complexity
is polynomial.
"""
# Author: Olivier Grisel <olivier.grisel@ensta.org>
# Lars Buitinck <L.J.Buitinck@uva.nl>
# License: BSD 3 clause
from __future__ import print_function
from time import time
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.decomposition import NMF
from sklearn.datasets import fetch_20newsgroups
n_samples = 2000
n_features = 1000
n_topics = 10
n_top_words = 20
# Load the 20 newsgroups dataset and vectorize it. We use a few heuristics
# to filter out useless terms early on: the posts are stripped of headers,
# footers and quoted replies, and common English words, words occurring in
# only one document or in at least 95% of the documents are removed.
t0 = time()
print("Loading dataset and extracting TF-IDF features...")
dataset = fetch_20newsgroups(shuffle=True, random_state=1,
remove=('headers', 'footers', 'quotes'))
vectorizer = TfidfVectorizer(max_df=0.95, min_df=2, max_features=n_features,
stop_words='english')
tfidf = vectorizer.fit_transform(dataset.data[:n_samples])
print("done in %0.3fs." % (time() - t0))
# Fit the NMF model
print("Fitting the NMF model with n_samples=%d and n_features=%d..."
% (n_samples, n_features))
nmf = NMF(n_components=n_topics, random_state=1).fit(tfidf)
print("done in %0.3fs." % (time() - t0))
feature_names = vectorizer.get_feature_names()
for topic_idx, topic in enumerate(nmf.components_):
print("Topic #%d:" % topic_idx)
print(" ".join([feature_names[i]
for i in topic.argsort()[:-n_top_words - 1:-1]]))
print()
| bsd-3-clause |
glauffer/Conditional-Entropy | old_CE.py | 1 | 2699 | ################################################################################
# #
# Program to calculate the Conditional Entropy for a single pulsating star #
# This program calculates and saves a file with periods and entropies #
# To run: type in terminal -> python3 CE.py #
# #
# To change the file: change the .dat file in the "#Load the data" section #
# To change the period range: change the values in the #
# "#Creates the period array" section #
# The standard precision is 0.0001 but for some stars we need a finner #
# precision, 0.00001 is usualy enough. *precision is the period step! #
# #
################################################################################
import numpy
from periodogram import find_period, rephase, get_phase
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
def hc(bins, row, col, size):
return numpy.sum((lambda p: p*numpy.log(numpy.sum(bins[i,:])/size/p) \
if p > 0 else 0)(bins[i][j] / size)
for i in row for j in col) if size > 0 else numpy.PINF
#Load the data
data = numpy.ma.array(data=numpy.loadtxt('/path/to/star/file'),
mask=None, dtype=float)
#Creates the period array
periods = numpy.arange(0.1, 1.00001, 0.00001) #period range (p_min, p_max, step)
#Set the number of rows and columns to calculate entropy
xbins = 10 #keep this fix
ybins = 5 #keep this fix
row = numpy.linspace(0, xbins-1, xbins)
col = numpy.linspace(0, ybins-1, ybins)
#For loop to calculate entropy
entropy = []
for p in periods:
r = rephase(data, p, 0)
r.T[1] = (r.T[1]-numpy.min(r.T[1]))/(numpy.max(r.T[1])-numpy.min(r.T[1]))
bins, binsX, binsY = numpy.histogram2d(r.T[0], r.T[1], [xbins, ybins],
[[0,1], [0,1]])
ent = hc(bins, row, col, len(r.T[1]))
#print(p, ent)
entropy.append(ent)
#Save the period and entropy into a file
#numpy.savetxt('period_entropy.dat',
# numpy.dstack((periods, entropy))[0],
# fmt='%s')
#Find the position of the minimum entropy to get the correspondent period
minimum = numpy.argmin(entropy)
e_min = entropy[minimum]
p_min = periods[minimum]
print(p_min)
#Print the minimum entropy and the correspondent period
#print('\n', p_min, e_min)
'''
#plot the entropy against periods
fig = plt.figure()
plt.plot(periods, entropy, 'r')
plt.plot(periods, entropy, 'k+', markersize=12)
fig.suptitle('OGLE-LMC-CEP-0010')
plt.xlabel('Periods')
plt.ylabel('Entropy')
#fig.savefig('0010_test11.png')
'''
| gpl-2.0 |
michaelbramwell/sms-tools | lectures/04-STFT/plots-code/time-freq-compromise.py | 19 | 1255 | import numpy as np
import time, os, sys
sys.path.append(os.path.join(os.path.dirname(os.path.realpath(__file__)), '../../../software/models/'))
import stft as STFT
import utilFunctions as UF
import matplotlib.pyplot as plt
from scipy.signal import hamming
from scipy.fftpack import fft
import math
(fs, x) = UF.wavread('../../../sounds/piano.wav')
plt.figure(1, figsize=(9.5, 6))
w = np.hamming(256)
N = 256
H = 128
mX1, pX1 = STFT.stftAnal(x, fs, w, N, H)
plt.subplot(211)
numFrames = int(mX1[:,0].size)
frmTime = H*np.arange(numFrames)/float(fs)
binFreq = np.arange(mX1[0,:].size)*float(fs)/N
plt.pcolormesh(frmTime, binFreq, np.transpose(mX1))
plt.title('mX (piano.wav), M=256, N=256, H=128')
plt.autoscale(tight=True)
w = np.hamming(1024)
N = 1024
H = 128
mX2, pX2 = STFT.stftAnal(x, fs, w, N, H)
plt.subplot(212)
numFrames = int(mX2[:,0].size)
frmTime = H*np.arange(numFrames)/float(fs)
binFreq = np.arange(mX2[0,:].size)*float(fs)/N
plt.pcolormesh(frmTime, binFreq, np.transpose(mX2))
plt.title('mX (piano.wav), M=1024, N=1024, H=128')
plt.autoscale(tight=True)
plt.tight_layout()
plt.savefig('time-freq-compromise.png')
plt.show()
| agpl-3.0 |
iulian787/spack | var/spack/repos/builtin/packages/py-inference-schema/package.py | 3 | 1603 | # Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
class PyInferenceSchema(Package):
"""This package is intended to provide a uniform schema for common machine
learning applications, as well as a set of decorators that can be used to
aid in web based ML prediction applications."""
homepage = "https://pypi.org/project/inference-schema/"
url = "https://pypi.io/packages/py3/i/inference-schema/inference_schema-1.0.2-py3-none-any.whl"
version('1.0.2', sha256='fd379becbd12dcf9f7a1ad5c03b163d501ef1dcc4fb85204735c84b1d139f478', expand=False)
variant('numpy', default=False, description='Enable numpy support')
variant('pandas', default=False, description='Enable pandas support')
variant('pyspark', default=False, description='Enable pyspark support')
extends('python')
depends_on('python@3:', type=('build', 'run'))
depends_on('py-pip', type='build')
depends_on('py-python-dateutil@2.5.3:', type=('build', 'run'))
depends_on('py-pytz@2017.2:', type=('build', 'run'))
depends_on('py-wrapt@1.11.1', type=('build', 'run'))
depends_on('py-numpy@1.13.0:', when='+numpy', type=('build', 'run'))
depends_on('py-pandas@0.20.2:', when='+pandas', type=('build', 'run'))
depends_on('py-pyspark@2.3.2', when='+pyspark', type=('build', 'run'))
def install(self, spec, prefix):
pip = which('pip')
pip('install', self.stage.archive_file, '--prefix={0}'.format(prefix))
| lgpl-2.1 |
gcarq/freqtrade | tests/strategy/test_strategy_helpers.py | 1 | 3034 | import numpy as np
import pandas as pd
from freqtrade.strategy import merge_informative_pair, timeframe_to_minutes
def generate_test_data(timeframe: str, size: int):
np.random.seed(42)
tf_mins = timeframe_to_minutes(timeframe)
base = np.random.normal(20, 2, size=size)
date = pd.period_range('2020-07-05', periods=size, freq=f'{tf_mins}min').to_timestamp()
df = pd.DataFrame({
'date': date,
'open': base,
'high': base + np.random.normal(2, 1, size=size),
'low': base - np.random.normal(2, 1, size=size),
'close': base + np.random.normal(0, 1, size=size),
'volume': np.random.normal(200, size=size)
}
)
df = df.dropna()
return df
def test_merge_informative_pair():
data = generate_test_data('15m', 40)
informative = generate_test_data('1h', 40)
result = merge_informative_pair(data, informative, '15m', '1h', ffill=True)
assert isinstance(result, pd.DataFrame)
assert len(result) == len(data)
assert 'date' in result.columns
assert result['date'].equals(data['date'])
assert 'date_1h' in result.columns
assert 'open' in result.columns
assert 'open_1h' in result.columns
assert result['open'].equals(data['open'])
assert 'close' in result.columns
assert 'close_1h' in result.columns
assert result['close'].equals(data['close'])
assert 'volume' in result.columns
assert 'volume_1h' in result.columns
assert result['volume'].equals(data['volume'])
# First 4 rows are empty
assert result.iloc[0]['date_1h'] is pd.NaT
assert result.iloc[1]['date_1h'] is pd.NaT
assert result.iloc[2]['date_1h'] is pd.NaT
assert result.iloc[3]['date_1h'] is pd.NaT
# Next 4 rows contain the starting date (0:00)
assert result.iloc[4]['date_1h'] == result.iloc[0]['date']
assert result.iloc[5]['date_1h'] == result.iloc[0]['date']
assert result.iloc[6]['date_1h'] == result.iloc[0]['date']
assert result.iloc[7]['date_1h'] == result.iloc[0]['date']
# Next 4 rows contain the next Hourly date original date row 4
assert result.iloc[8]['date_1h'] == result.iloc[4]['date']
def test_merge_informative_pair_same():
data = generate_test_data('15m', 40)
informative = generate_test_data('15m', 40)
result = merge_informative_pair(data, informative, '15m', '15m', ffill=True)
assert isinstance(result, pd.DataFrame)
assert len(result) == len(data)
assert 'date' in result.columns
assert result['date'].equals(data['date'])
assert 'date_15m' in result.columns
assert 'open' in result.columns
assert 'open_15m' in result.columns
assert result['open'].equals(data['open'])
assert 'close' in result.columns
assert 'close_15m' in result.columns
assert result['close'].equals(data['close'])
assert 'volume' in result.columns
assert 'volume_15m' in result.columns
assert result['volume'].equals(data['volume'])
# Dates match 1:1
assert result['date_15m'].equals(result['date'])
| gpl-3.0 |
seckcoder/lang-learn | python/sklearn/sklearn/feature_selection/tests/test_rfe.py | 4 | 3061 | """
Testing Recursive feature elimination
"""
import numpy as np
from numpy.testing import assert_array_almost_equal, assert_array_equal
from nose.tools import assert_equal
from scipy import sparse
from sklearn.feature_selection.rfe import RFE, RFECV
from sklearn.datasets import load_iris
from sklearn.metrics import zero_one
from sklearn.svm import SVC
from sklearn.utils import check_random_state
def test_rfe_set_params():
generator = check_random_state(0)
iris = load_iris()
X = np.c_[iris.data, generator.normal(size=(len(iris.data), 6))]
y = iris.target
clf = SVC(kernel="linear")
rfe = RFE(estimator=clf, n_features_to_select=4, step=0.1)
y_pred = rfe.fit(X, y).predict(X)
clf = SVC()
rfe = RFE(estimator=clf, n_features_to_select=4, step=0.1,
estimator_params={'kernel': 'linear'})
y_pred2 = rfe.fit(X, y).predict(X)
assert_array_equal(y_pred, y_pred2)
def test_rfe():
generator = check_random_state(0)
iris = load_iris()
X = np.c_[iris.data, generator.normal(size=(len(iris.data), 6))]
X_sparse = sparse.csr_matrix(X)
y = iris.target
# dense model
clf = SVC(kernel="linear")
rfe = RFE(estimator=clf, n_features_to_select=4, step=0.1)
rfe.fit(X, y)
X_r = rfe.transform(X)
clf.fit(X_r, y)
assert_equal(len(rfe.ranking_), X.shape[1])
# sparse model
clf_sparse = SVC(kernel="linear")
rfe_sparse = RFE(estimator=clf_sparse, n_features_to_select=4, step=0.1)
rfe_sparse.fit(X_sparse, y)
X_r_sparse = rfe_sparse.transform(X_sparse)
assert_equal(X_r.shape, iris.data.shape)
assert_array_almost_equal(X_r[:10], iris.data[:10])
assert_array_almost_equal(rfe.predict(X), clf.predict(iris.data))
assert_equal(rfe.score(X, y), clf.score(iris.data, iris.target))
assert_array_almost_equal(X_r, X_r_sparse.toarray())
def test_rfecv():
generator = check_random_state(0)
iris = load_iris()
X = np.c_[iris.data, generator.normal(size=(len(iris.data), 6))]
y = list(iris.target) # regression test: list should be supported
# Test using the score function
rfecv = RFECV(estimator=SVC(kernel="linear"), step=1, cv=3)
rfecv.fit(X, y)
# non-regression test for missing worst feature:
assert_equal(len(rfecv.cv_scores_), X.shape[1])
assert_equal(len(rfecv.ranking_), X.shape[1])
X_r = rfecv.transform(X)
# same in sparse
rfecv_sparse = RFECV(estimator=SVC(kernel="linear"), step=1, cv=3)
X_sparse = sparse.csr_matrix(X)
rfecv_sparse.fit(X_sparse, y)
X_r_sparse = rfecv_sparse.transform(X_sparse)
assert_equal(X_r.shape, iris.data.shape)
assert_array_almost_equal(X_r[:10], iris.data[:10])
assert_array_almost_equal(X_r_sparse.toarray(), X_r)
# Test using a customized loss function
rfecv = RFECV(estimator=SVC(kernel="linear"), step=1, cv=3,
loss_func=zero_one)
rfecv.fit(X, y)
X_r = rfecv.transform(X)
assert_equal(X_r.shape, iris.data.shape)
assert_array_almost_equal(X_r[:10], iris.data[:10])
| unlicense |
bsipocz/bokeh | bokeh/charts/builder/tests/test_bar_builder.py | 33 | 6390 | """ This is the Bokeh charts testing interface.
"""
#-----------------------------------------------------------------------------
# Copyright (c) 2012 - 2014, Continuum Analytics, Inc. All rights reserved.
#
# Powered by the Bokeh Development Team.
#
# The full license is in the file LICENSE.txt, distributed with this software.
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
from __future__ import absolute_import
from collections import OrderedDict
import unittest
import numpy as np
import pandas as pd
from bokeh.charts import Bar
from bokeh.charts.builder.tests._utils import create_chart
from bokeh.models import Range1d, FactorRange
#-----------------------------------------------------------------------------
# Classes and functions
#-----------------------------------------------------------------------------
class TestBar(unittest.TestCase):
def test_supported_input(self):
xyvalues = OrderedDict()
xyvalues['python'] = [2, 5]
xyvalues['pypy'] = [12, 40]
xyvalues['jython'] = [22, 30]
for i, _xy in enumerate([xyvalues,
dict(xyvalues),
pd.DataFrame(xyvalues)]):
bar = create_chart(Bar, _xy)
builder = bar._builders[0]
np.testing.assert_array_equal(builder._data['pypy'], np.array(xyvalues['pypy']))
np.testing.assert_array_equal(builder._data['python'], np.array(xyvalues['python']))
np.testing.assert_array_equal(builder._data['jython'], np.array(xyvalues['jython']))
# test mid values, that should always be y/2 ..
np.testing.assert_array_equal(builder._data['midpython'], np.array([1, 2.5]))
np.testing.assert_array_equal(builder._data['midpypy'], np.array([6, 20]))
np.testing.assert_array_equal(builder._data['midjython'], np.array([11, 15]))
# stacked values should be 0 as base and + y/2 of the column
# skipping plain dict case as stacked values randomly fails due to
# dictionary unordered nature
if i != 1:
np.testing.assert_array_equal(builder._data['stackedpython'], np.array([1, 2.5]))
np.testing.assert_array_equal(builder._data['stackedpypy'], np.array([8, 25]))
np.testing.assert_array_equal(builder._data['stackedjython'], np.array([25, 60]))
np.testing.assert_array_equal(builder._data['cat'], np.array(['0', '1']))
np.testing.assert_array_equal(builder._data['width'], np.array([0.8, 0.8]))
np.testing.assert_array_equal(builder._data['width_cat'], np.array([0.2, 0.2]))
lvalues = [[2, 5], [12, 40], [22, 30]]
for i, _xy in enumerate([lvalues, np.array(lvalues)]):
bar = create_chart(Bar, _xy)
builder = bar._builders[0]
np.testing.assert_array_equal(builder._data['0'], np.array(lvalues[0]))
np.testing.assert_array_equal(builder._data['1'], np.array(lvalues[1]))
np.testing.assert_array_equal(builder._data['2'], np.array(lvalues[2]))
# test mid values, that should always be y/2 ..
np.testing.assert_array_equal(builder._data['mid0'], np.array([1, 2.5]))
np.testing.assert_array_equal(builder._data['mid1'], np.array([6, 20]))
np.testing.assert_array_equal(builder._data['mid2'], np.array([11, 15]))
# stacked values should be 0 as base and + y/2 of the column
np.testing.assert_array_equal(builder._data['stacked0'], np.array([1, 2.5]))
np.testing.assert_array_equal(builder._data['stacked1'], np.array([8, 25]))
np.testing.assert_array_equal(builder._data['stacked2'], np.array([25, 60]))
np.testing.assert_array_equal(builder._data['cat'], np.array(['0', '1']))
np.testing.assert_array_equal(builder._data['width'], np.array([0.8, 0.8]))
np.testing.assert_array_equal(builder._data['width_cat'], np.array([0.2, 0.2]))
def test_all_positive_input(self):
source = OrderedDict()
source['percent change 1'] = [1, 13]
source['percent change 2'] = [12, 40]
bar_chart = create_chart(Bar, source)
self.assertEqual(bar_chart._builders[0].y_range.start, 0)
self.assertEqual(bar_chart._builders[0].y_range.end, 40 * 1.1)
def test_all_negative_input(self):
source = OrderedDict()
source['percent change 1'] = [-1, -13]
source['percent change 2'] = [-12, -40]
bar_chart = create_chart(Bar, source)
# We want the start to be negative, so that data points downwards
self.assertEqual(bar_chart._builders[0].y_range.start, -40 * 1.1)
self.assertEqual(bar_chart._builders[0].y_range.end, 0)
def test_mixed_sign_input(self):
source = OrderedDict()
source['percent change 1'] = [-1, -13]
source['percent change 2'] = [12, 40]
bar_chart = create_chart(Bar, source)
self.assertEqual(bar_chart._builders[0].y_range.start, -13 * 1.1)
self.assertEqual(bar_chart._builders[0].y_range.end, 40 * 1.1)
def test_set_custom_continuous_range(self):
# Users can specify their own y_range for cases where the
# default guess is not what's desired.
source = OrderedDict()
source['percent change 1'] = [25, -13]
source['percent change 2'] = [-12, -40]
custom_y_range = Range1d(50, -50)
bar_chart = create_chart(Bar, source, continuous_range=custom_y_range)
self.assertEqual(bar_chart._builders[0].y_range, custom_y_range)
def test_invalid_continuous_range_raises_error(self):
source = OrderedDict({'p': [0, 1]})
bad_y_range = range(0, 50) # Not a Range object
with self.assertRaises(ValueError):
create_chart(Bar, source, continuous_range=bad_y_range)
def test_non_range1d_continuous_range_raises_value_error(self):
source = OrderedDict({'p': [0, 1]})
non_1d_range = FactorRange(factors=['a', 'b'])
with self.assertRaises(ValueError):
create_chart(Bar, source, continuous_range=non_1d_range)
| bsd-3-clause |
pratapvardhan/scikit-learn | sklearn/cross_decomposition/cca_.py | 151 | 3192 | from .pls_ import _PLS
__all__ = ['CCA']
class CCA(_PLS):
"""CCA Canonical Correlation Analysis.
CCA inherits from PLS with mode="B" and deflation_mode="canonical".
Read more in the :ref:`User Guide <cross_decomposition>`.
Parameters
----------
n_components : int, (default 2).
number of components to keep.
scale : boolean, (default True)
whether to scale the data?
max_iter : an integer, (default 500)
the maximum number of iterations of the NIPALS inner loop
tol : non-negative real, default 1e-06.
the tolerance used in the iterative algorithm
copy : boolean
Whether the deflation be done on a copy. Let the default value
to True unless you don't care about side effects
Attributes
----------
x_weights_ : array, [p, n_components]
X block weights vectors.
y_weights_ : array, [q, n_components]
Y block weights vectors.
x_loadings_ : array, [p, n_components]
X block loadings vectors.
y_loadings_ : array, [q, n_components]
Y block loadings vectors.
x_scores_ : array, [n_samples, n_components]
X scores.
y_scores_ : array, [n_samples, n_components]
Y scores.
x_rotations_ : array, [p, n_components]
X block to latents rotations.
y_rotations_ : array, [q, n_components]
Y block to latents rotations.
n_iter_ : array-like
Number of iterations of the NIPALS inner loop for each
component.
Notes
-----
For each component k, find the weights u, v that maximizes
max corr(Xk u, Yk v), such that ``|u| = |v| = 1``
Note that it maximizes only the correlations between the scores.
The residual matrix of X (Xk+1) block is obtained by the deflation on the
current X score: x_score.
The residual matrix of Y (Yk+1) block is obtained by deflation on the
current Y score.
Examples
--------
>>> from sklearn.cross_decomposition import CCA
>>> X = [[0., 0., 1.], [1.,0.,0.], [2.,2.,2.], [3.,5.,4.]]
>>> Y = [[0.1, -0.2], [0.9, 1.1], [6.2, 5.9], [11.9, 12.3]]
>>> cca = CCA(n_components=1)
>>> cca.fit(X, Y)
... # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
CCA(copy=True, max_iter=500, n_components=1, scale=True, tol=1e-06)
>>> X_c, Y_c = cca.transform(X, Y)
References
----------
Jacob A. Wegelin. A survey of Partial Least Squares (PLS) methods, with
emphasis on the two-block case. Technical Report 371, Department of
Statistics, University of Washington, Seattle, 2000.
In french but still a reference:
Tenenhaus, M. (1998). La regression PLS: theorie et pratique. Paris:
Editions Technic.
See also
--------
PLSCanonical
PLSSVD
"""
def __init__(self, n_components=2, scale=True,
max_iter=500, tol=1e-06, copy=True):
super(CCA, self).__init__(n_components=n_components, scale=scale,
deflation_mode="canonical", mode="B",
norm_y_weights=True, algorithm="nipals",
max_iter=max_iter, tol=tol, copy=copy)
| bsd-3-clause |
dilawar/moogli | moogli/visualization/plots/line.py | 2 | 9350 | from moogli import constants
from moogli.visualization.pipeline import SimulationDataConsumer
from PyQt4.QtGui import QWidget
from PyQt4.QtGui import QMenu
from PyQt4.QtGui import QAction
from PyQt4.QtGui import QGridLayout
from PyQt4.QtCore import pyqtSlot
from PyQt4 import QtCore
from matplotlib.lines import Line2D
from matplotlib.backends.backend_qt4agg import FigureCanvasQTAgg as FigureCanvas
from matplotlib.backends.backend_qt4agg import NavigationToolbar2QTAgg as NavigationToolbar
from matplotlib.figure import Figure
import numpy
import csv
import time
from PyQt4.QtCore import SLOT, SIGNAL
class LinePlotter(SimulationDataConsumer, QWidget):
def __init__(self, visualizables, colors=None, parent=None,
figure_width=5.0, figure_height=4.0, dpi=100,
facecolor="", legend_alpha=0.5):
QWidget.__init__(self, parent)
SimulationDataConsumer.__init__(self)
self.figure_width = figure_width
self.figure_height = figure_height
self.dpi = dpi
self.grid_visible = False
self.legend_visible = True
self.facecolor = facecolor
self.legend_alpha = legend_alpha
self.visualizables = visualizables
self._setup_prelude()
self._setup_plot_canvas()
self._setup_navigation_toolbar()
self._setup_actions()
self._setup_context_menu()
self._setup_signal_slot_connections()
self._setup_postlude()
"""
def lineplot(self, visids, *args, **kwargs):
axes = self.figure.add_subplot(args, kwargs)
for visid in visids:
line = axes.plot(numpy.array([]),
numpy.array([]),
label=visid,
gid=visid)[0]
self._create_legend()
return axes
def eventplot():
pass
def eventplot(self, nrows, ncols, plot_number, ids, **kwargs, ):
pass
def add_subplot(self, nrows, ncols, plot_number, plot_type,
ids, **kwargs):
axes = self.add_subplot(rows, cols, plot_number, kwargs)
if plot_type == moogli.constants.EVENT_PLOT:
axes.eventplot
elif:
return axes
def get_subplot(plot_number):
pass
"""
def set_title(self, title):
self.axes.set_title(title)
def get_field(self):
return self.field
def _create_line(self,
label,
color=constants.DEFAULT,
zorder=0):
line = self.axes.plot(numpy.array([]),
numpy.array([]),
label=label,
gid=label)[0]
if color is not constants.DEFAULT:
line.set_color(color)
self._create_legend()
def _setup_prelude(self):
self.setContextMenuPolicy(QtCore.Qt.CustomContextMenu)
self.setLayout(QGridLayout())
def _setup_postlude(self):
map(self._create_line, self.visualizables)
def _setup_plot_canvas(self):
self.figure = Figure((self.figure_width, self.figure_height),
dpi=self.dpi, facecolor='#F3EFEE')
self.canvas = FigureCanvas(self.figure)
self.layout().addWidget(self.canvas, 0, 0)
self.axes = self.figure.add_subplot(1, 1, 1)
self.legend = None
def _create_legend(self):
self.legend = self.axes.legend(loc='upper right',
prop={'size': 10},
# bbox_to_anchor=(1.0, 0.5),
fancybox=True,
shadow=False,
ncol=1)
self.legend.draggable()
self.legend.get_frame().set_alpha(self.legend_alpha)
if self.legend_visible:
self.show_legend_slot()
else:
self.hide_legend_slot()
def _setup_navigation_toolbar(self):
self.navigation_toolbar = NavigationToolbar(self.canvas, self)
self.layout().addWidget(self.navigation_toolbar, 1, 0)
def _setup_actions(self):
self.toggle_grid_action = QAction(self)
self.toggle_grid_action.setText("Show Grid")
self.toggle_autoscale_action = QAction(self)
self.toggle_autoscale_action.setText("Enable Autoscaling")
self.toggle_axis_hold_action = QAction(self)
self.toggle_axis_hold_action.setText("Hold Axes")
self.toggle_legend_action = QAction(self)
self.toggle_legend_action.setText("Hide Legend")
self.export_to_csv_action = QAction(self)
self.export_to_csv_action.setText("CSV")
def _setup_signal_slot_connections(self):
self.canvas.mpl_connect('pick_event', self.pick_event_slot)
self.toggle_grid_action.triggered.connect(self.toggle_grid_slot)
self.toggle_autoscale_action.triggered.connect(
self.toggle_autoscale_slot)
self.toggle_axis_hold_action.triggered.connect(
self.toggle_axis_hold_slot)
self.toggle_legend_action.triggered.connect(self.toggle_legend_slot)
self.export_to_csv_action.triggered.connect(self.export_to_csv_slot)
self.connect(self, SIGNAL("customContextMenuRequested(QPoint)"),
self, SLOT("show_context_menu_slot(QPoint)"))
@QtCore.pyqtSlot()
def draw_slot(self):
self.canvas.draw()
def pick_event_slot(self, event):
pass
def show_grid_slot(self):
self.axes.grid(True)
self.toggle_grid_action.setText("Hide Grid")
self.draw_slot()
def hide_grid_slot(self):
self.axes.grid(False)
self.toggle_grid_action.setText("Show Grid")
self.draw_slot()
def toggle_grid_slot(self):
self.hide_grid_slot() if self.grid_visible else self.show_grid_slot()
self.grid_visible = not self.grid_visible
def toggle_autoscale_slot(self):
pass
def toggle_axis_hold_slot(self):
pass
def show_legend_slot(self):
if self.legend is None:
return
self.legend.set_visible(True)
self.toggle_legend_action.setText("Hide Legend")
self.draw_slot()
def hide_legend_slot(self):
if self.legend is None:
return
self.legend.set_visible(False)
self.toggle_legend_action.setText("Show Legend")
self.draw_slot()
def toggle_legend_slot(self):
if self.legend_visible:
self.hide_legend_slot()
else:
self.show_legend_slot()
self.legend_visible = not self.legend_visible
def set_xlabel(self, xlabel):
self.axes.set_xlabel(xlabel)
def set_ylabel(self, ylable):
self.axes.set_ylabel(ylable)
@pyqtSlot(QtCore.QPoint)
def show_context_menu_slot(self, point):
self.context_menu.exec_(self.mapToGlobal(point))
def _setup_context_menu(self):
self.context_menu = QMenu()
self.context_menu.addAction(self.toggle_grid_action)
self.context_menu.addAction(self.toggle_autoscale_action)
self.context_menu.addAction(self.toggle_axis_hold_action)
self.context_menu.addAction(self.toggle_legend_action)
export_menu = self.context_menu.addMenu("Export")
export_menu.addAction(self.export_to_csv_action)
def toggle_line_visibility_slot(self, line):
line.set_visible(not line.get_visible())
@QtCore.pyqtSlot(str)
def export_to_csv_slot(self, filepath):
with open(filepath,"wb") as csv_file:
writer = csv.writer( csv_file
, delimiter = ' '
, quotechar = '"'
, fieldnames = ["Time"] + self._tables.keys()
, quoting = csv.QUOTE_NONNUMERIC
)
writer.writerow(["#MODEL : {model}".format(model = self.instance["model"])])
writer.writerow(["#TIMESTAMP : {timestamp}".format(timestamp = int(time.time()))])
writer.writerow(["#MOOSE VERSION : {moose_version}".format(moose_version = "123")])
writer.writerow(["#GOOSE VERSION : {goose_version}".format(goose_version = "456")])
writer.writeheader()
units = ["s"] + [self._unit] * len(self._tables)
writer.writerow(units)
data = [ value.get_ydata() for value in self._tables.values() ]
data = data + data[0].get_xdata()
data = transpose(data)
writer.writerows(data)
def consume(self):
self.axes.set_xlim([self.clock.begin, self.clock.end])
for line in self.axes.lines:
for mediator in self.mediators:
try:
print "gid", line.get_gid()
yvalue = mediator.output[line.get_gid()]
ydata = numpy.append(line.get_ydata(),
yvalue)
print("y_data", ydata)
line.set_ydata(ydata)
line.set_xdata(numpy.linspace(self.clock.begin, self.clock.now(), len(ydata)))
break
except:
continue
self.draw_slot()
| gpl-2.0 |
ThomasSweijen/yadesolute2 | py/post2d.py | 4 | 13849 | # encoding: utf-8
# 2009 © Václav Šmilauer <eudoxos@arcig.cz>
"""
Module for 2d postprocessing, containing classes to project points from 3d to 2d in various ways,
providing basic but flexible framework for extracting arbitrary scalar values from bodies/interactions
and plotting the results. There are 2 basic components: flatteners and extractors.
The algorithms operate on bodies (default) or interactions, depending on the ``intr`` parameter
of post2d.data.
Flatteners
==========
Instance of classes that convert 3d (model) coordinates to 2d (plot) coordinates. Their interface is
defined by the :yref:`yade.post2d.Flatten` class (``__call__``, ``planar``, ``normal``).
Extractors
==========
Callable objects returning scalar or vector value, given a body/interaction object.
If a 3d vector is returned, Flattener.planar is called, which should return only in-plane
components of the vector.
Example
=======
This example can be found in examples/concrete/uniax-post.py ::
from yade import post2d
import pylab # the matlab-like interface of matplotlib
O.load('/tmp/uniax-tension.xml.bz2')
# flattener that project to the xz plane
flattener=post2d.AxisFlatten(useRef=False,axis=1)
# return scalar given a Body instance
extractDmg=lambda b: b.state.normDmg
# will call flattener.planar implicitly
# the same as: extractVelocity=lambda b: flattener.planar(b,b.state.vel)
extractVelocity=lambda b: b.state.vel
# create new figure
pylab.figure()
# plot raw damage
post2d.plot(post2d.data(extractDmg,flattener))
# plot smooth damage into new figure
pylab.figure(); ax,map=post2d.plot(post2d.data(extractDmg,flattener,stDev=2e-3))
# show color scale
pylab.colorbar(map,orientation='horizontal')
# raw velocity (vector field) plot
pylab.figure(); post2d.plot(post2d.data(extractVelocity,flattener))
# smooth velocity plot; data are sampled at regular grid
pylab.figure(); ax,map=post2d.plot(post2d.data(extractVelocity,flattener,stDev=1e-3))
# save last (current) figure to file
pylab.gcf().savefig('/tmp/foo.png')
# show the figures
pylab.show()
"""
from yade.wrapper import *
try:
from minieigen import *
except ImportError:
from miniEigen import *
class Flatten:
"""Abstract class for converting 3d point into 2d. Used by post2d.data2d."""
def __init__(self): pass
def __call__(self,b):
"Given a :yref:`Body` / :yref:`Interaction` instance, should return either 2d coordinates as a 2-tuple, or None if the Body should be discarded."
pass
def planar(self,pos,vec):
"Given position and vector value, project the vector value to the flat plane and return its 2 in-plane components."
def normal(self,pos,vec):
"Given position and vector value, return lenght of the vector normal to the flat plane."
class HelixFlatten(Flatten):
"""Class converting 3d point to 2d based on projection from helix.
The y-axis in the projection corresponds to the rotation axis"""
def __init__(self,useRef,thetaRange,dH_dTheta,axis=2,periodStart=0):
"""
:param bool useRef: use reference positions rather than actual positions
:param (θmin,θmax) thetaRange: bodies outside this range will be discarded
:param float dH_dTheta: inclination of the spiral (per radian)
:param {0,1,2} axis: axis of rotation of the spiral
:param float periodStart: height of the spiral for zero angle
"""
self.useRef,self.thetaRange,self.dH_dTheta,self.axis,self.periodStart=useRef,thetaRange,dH_dTheta,axis,periodStart
self.ax1,self.ax2=(axis+1)%3,(axis+2)%3
def _getPos(self,b):
return b.state.refPos if self.useRef else b.state.pos
def __call__(self,b):
import yade.utils
xy,theta=yade.utils.spiralProject(_getPos(b),self.dH_dTheta,self.axis,self.periodStart)
if theta<thetaRange[0] or theta>thetaRange[1]: return None
return xy
def planar(self,b,vec):
from math import sqrt
pos=_getPos(b)
pos[self.axis]=0; pos.Normalize()
return pos.Dot(vec),vec[axis]
def normal(self,pos,vec):
ax=Vector3(0,0,0); ax[axis]=1; pos=_getPos(b)
circum=ax.Cross(pos); circum.Normalize()
return circum.Dot(vec)
class CylinderFlatten(Flatten):
"""Class for converting 3d point to 2d based on projection onto plane from circle.
The y-axis in the projection corresponds to the rotation axis; the x-axis is distance form the axis.
"""
def __init__(self,useRef,axis=2):
"""
:param useRef: (bool) use reference positions rather than actual positions
:param axis: axis of the cylinder, ∈{0,1,2}
"""
if axis not in (0,1,2): raise IndexError("axis must be one of 0,1,2 (not %d)"%axis)
self.useRef,self.axis=useRef,axis
def _getPos(self,b):
return b.state.refPos if self.useRef else b.state.pos
def __call__(self,b):
p=_getPos(b)
pp=(p[(self.axis+1)%3],p[(self.axis+2)%3])
import math.sqrt
return math.sqrt(pp[0]**2+pp[2]**2),p[self.axis]
def planar(self,b,vec):
pos=_getPos(b)
from math import sqrt
pos[self.axis]=0; pos.Normalize()
return pos.Dot(vec),vec[axis]
def normal(self,b,vec):
pos=_getPos(b)
ax=Vector3(0,0,0); ax[axis]=1
circum=ax.Cross(pos); circum.Normalize()
return circum.Dot(vec)
class AxisFlatten(Flatten):
def __init__(self,useRef=False,axis=2):
"""
:param bool useRef: use reference positions rather than actual positions (only meaningful when operating on Bodies)
:param {0,1,2} axis: axis normal to the plane; the return value will be simply position with this component dropped.
"""
if axis not in (0,1,2): raise IndexError("axis must be one of 0,1,2 (not %d)"%axis)
self.useRef,self.axis=useRef,axis
self.ax1,self.ax2=(self.axis+1)%3,(self.axis+2)%3
def __call__(self,b):
p=((b.state.refPos if self.useRef else b.state.pos) if isinstance(b,Body) else b.geom.contactPoint)
return (p[self.ax1],p[self.ax2])
def planar(self,pos,vec):
return vec[self.ax1],vec[self.ax2]
def normal(self,pos,vec):
return vec[self.axis]
def data(extractor,flattener,intr=False,onlyDynamic=True,stDev=None,relThreshold=3.,perArea=0,div=(50,50),margin=(0,0),radius=1):
"""Filter all bodies/interactions, project them to 2d and extract required scalar value;
return either discrete array of positions and values, or smoothed data, depending on whether the stDev
value is specified.
The ``intr`` parameter determines whether we operate on bodies or interactions;
the extractor provided should expect to receive body/interaction.
:param callable extractor: receives :yref:`Body` (or :yref:`Interaction`, if ``intr`` is ``True``) instance, should return scalar, a 2-tuple (vector fields) or None (to skip that body/interaction)
:param callable flattener: :yref:`yade.post2d.Flatten` instance, receiving body/interaction, returns its 2d coordinates or ``None`` (to skip that body/interaction)
:param bool intr: operate on interactions rather than bodies
:param bool onlyDynamic: skip all non-dynamic bodies
:param float/None stDev: standard deviation for averaging, enables smoothing; ``None`` (default) means raw mode, where discrete points are returned
:param float relThreshold: threshold for the gaussian weight function relative to stDev (smooth mode only)
:param int perArea: if 1, compute weightedSum/weightedArea rather than weighted average (weightedSum/sumWeights); the first is useful to compute average stress; if 2, compute averages on subdivision elements, not using weight function
:param (int,int) div: number of cells for the gaussian grid (smooth mode only)
:param (float,float) margin: x,y margins around bounding box for data (smooth mode only)
:param float/callable radius: Fallback value for radius (for raw plotting) for non-spherical bodies or interactions; if a callable, receives body/interaction and returns radius
:return: dictionary
Returned dictionary always containing keys 'type' (one of 'rawScalar','rawVector','smoothScalar','smoothVector', depending on value of smooth and on return value from extractor), 'x', 'y', 'bbox'.
Raw data further contains 'radii'.
Scalar fields contain 'val' (value from *extractor*), vector fields have 'valX' and 'valY' (2 components returned by the *extractor*).
"""
from miniEigen import Vector3
xx,yy,dd1,dd2,rr=[],[],[],[],[]
nDim=0
objects=O.interactions if intr else O.bodies
for b in objects:
if not intr and onlyDynamic and not b.dynamic: continue
xy,d=flattener(b),extractor(b)
if xy==None or d==None: continue
if nDim==0: nDim=1 if isinstance(d,float) else 2
if nDim==1: dd1.append(d);
elif len(d)==2:
dd1.append(d[0]); dd2.append(d[1])
elif len(d)==3:
d1,d2=flattener.planar(b,Vector3(d))
dd1.append(d1); dd2.append(d2)
else:
raise RuntimeError("Extractor must return float or 2 or 3 (not %d) floats"%nDim)
if stDev==None: # radii are needed in the raw mode exclusively
if not intr and isinstance(b.shape,Sphere): r=b.shape.radius
else: r=(radius(b) if callable(radius) else radius)
rr.append(r)
xx.append(xy[0]); yy.append(xy[1]);
if stDev==None:
bbox=(min(xx),min(yy)),(max(xx),max(yy))
if nDim==1: return {'type':'rawScalar','x':xx,'y':yy,'val':dd1,'radii':rr,'bbox':bbox}
else: return {'type':'rawVector','x':xx,'y':yy,'valX':dd1,'valY':dd2,'radii':rr,'bbox':bbox}
from yade.WeightedAverage2d import GaussAverage
import numpy
lo,hi=(min(xx),min(yy)),(max(xx),max(yy))
llo=lo[0]-margin[0],lo[1]-margin[1]; hhi=hi[0]+margin[0],hi[1]+margin[1]
ga=GaussAverage(llo,hhi,div,stDev,relThreshold)
ga2=GaussAverage(llo,hhi,div,stDev,relThreshold)
for i in range(0,len(xx)):
ga.add(dd1[i],(xx[i],yy[i]))
if nDim>1: ga2.add(dd2[i],(xx[i],yy[i]))
step=[(hhi[i]-llo[i])/float(div[i]) for i in [0,1]]
xxx,yyy=[numpy.arange(llo[i]+.5*step[i],hhi[i],step[i]) for i in [0,1]]
ddd=numpy.zeros((len(yyy),len(xxx)),float)
ddd2=numpy.zeros((len(yyy),len(xxx)),float)
# set the type of average we are going to use
if perArea==0:
def compAvg(gauss,coord,cellCoord): return float(gauss.avg(coord))
elif perArea==1:
def compAvg(gauss,coord,cellCoord): return gauss.avgPerUnitArea(coord)
elif perArea==2:
def compAvg(gauss,coord,cellCoord):
s=gauss.cellSum(cellCoord);
return (s/gauss.cellArea) if s>0 else float('nan')
elif perArea==3:
def compAvg(gauss,coord,cellCoord):
s=gauss.cellSum(cellCoord);
return s if s>0 else float('nan')
else: raise RuntimeError('Invalid value of *perArea*, must be one of 0,1,2,3.')
#
for cx in range(0,div[0]):
for cy in range(0,div[1]):
ddd[cy,cx]=compAvg(ga,(xxx[cx],yyy[cy]),(cx,cy))
if nDim>1: ddd2[cy,cx]=compAvg(ga2,(xxx[cx],yyy[cy]),(cx,cy))
if nDim==1: return {'type':'smoothScalar','x':xxx,'y':yyy,'val':ddd,'bbox':(llo,hhi),'perArea':perArea,'grid':ga}
else: return {'type':'smoothVector','x':xxx,'y':yyy,'valX':ddd,'valY':ddd2,'bbox':(llo,hhi),'grid':ga,'grid2':ga2}
def plot(data,axes=None,alpha=.5,clabel=True,cbar=False,aspect='equal',**kw):
"""Given output from post2d.data, plot the scalar as discrete or smooth plot.
For raw discrete data, plot filled circles with radii of particles, colored by the scalar value.
For smooth discrete data, plot image with optional contours and contour labels.
For vector data (raw or smooth), plot quiver (vector field), with arrows colored by the magnitude.
:param axes: matplotlib.axes\ instance where the figure will be plotted; if None, will be created from scratch.
:param data: value returned by :yref:`yade.post2d.data`
:param bool clabel: show contour labels (smooth mode only), or annotate cells with numbers inside (with perArea==2)
:param bool cbar: show colorbar (equivalent to calling pylab.colorbar(mappable) on the returned mappable)
:return: tuple of ``(axes,mappable)``; mappable can be used in further calls to pylab.colorbar.
"""
import pylab,math
if not axes: axes=pylab.gca()
if data['type']=='rawScalar':
from matplotlib.patches import Circle
import matplotlib.collections,numpy
patches=[]
for x,y,d,r in zip(data['x'],data['y'],data['val'],data['radii']):
patches.append(Circle(xy=(x,y),radius=r))
coll=matplotlib.collections.PatchCollection(patches,linewidths=0.,**kw)
coll.set_array(numpy.array(data['val']))
bb=coll.get_datalim(coll.get_transform())
axes.add_collection(coll)
axes.set_xlim(bb.xmin,bb.xmax); axes.set_ylim(bb.ymin,bb.ymax)
if cbar: axes.get_figure().colorbar(coll)
axes.grid(True); axes.set_aspect(aspect)
return axes,coll
elif data['type']=='smoothScalar':
loHi=data['bbox']
if data['perArea'] in (0,1):
img=axes.imshow(data['val'],extent=(loHi[0][0],loHi[1][0],loHi[0][1],loHi[1][1]),origin='lower',aspect=aspect,**kw)
ct=axes.contour(data['x'],data['y'],data['val'],colors='k',origin='lower',extend='both')
if clabel: axes.clabel(ct,inline=1,fontsize=10)
else:
img=axes.imshow(data['val'],extent=(loHi[0][0],loHi[1][0],loHi[0][1],loHi[1][1]),origin='lower',aspect=aspect,interpolation='nearest',**kw)
xStep=(data['x'][1]-data['x'][0]) if len(data['x'])>1 else 0
for y,valLine in zip(data['y'],data['val']):
for x,val in zip(data['x'],valLine): axes.text(x-.4*xStep,y,('-' if math.isnan(val) else '%5g'%val),size=4)
axes.update_datalim(loHi)
axes.set_xlim(loHi[0][0],loHi[1][0]); axes.set_ylim(loHi[0][1],loHi[1][1])
if cbar: axes.get_figure().colorbar(img)
axes.grid(True if data['perArea'] in (0,1) else False); axes.set_aspect(aspect)
return axes,img
elif data['type'] in ('rawVector','smoothVector'):
import numpy
loHi=data['bbox']
valX,valY=numpy.array(data['valX']),numpy.array(data['valY']) # rawVector data are plain python lists
scalars=numpy.sqrt(valX**2+valY**2)
# numpy.sqrt computes element-wise sqrt
quiv=axes.quiver(data['x'],data['y'],data['valX'],data['valY'],scalars,**kw)
#axes.update_datalim(loHi)
axes.set_xlim(loHi[0][0],loHi[1][0]); axes.set_ylim(loHi[0][1],loHi[1][1])
if cbar: axes.get_figure().colorbar(coll)
axes.grid(True); axes.set_aspect(aspect)
return axes,quiv
| gpl-2.0 |
spallavolu/scikit-learn | examples/linear_model/lasso_dense_vs_sparse_data.py | 348 | 1862 | """
==============================
Lasso on dense and sparse data
==============================
We show that linear_model.Lasso provides the same results for dense and sparse
data and that in the case of sparse data the speed is improved.
"""
print(__doc__)
from time import time
from scipy import sparse
from scipy import linalg
from sklearn.datasets.samples_generator import make_regression
from sklearn.linear_model import Lasso
###############################################################################
# The two Lasso implementations on Dense data
print("--- Dense matrices")
X, y = make_regression(n_samples=200, n_features=5000, random_state=0)
X_sp = sparse.coo_matrix(X)
alpha = 1
sparse_lasso = Lasso(alpha=alpha, fit_intercept=False, max_iter=1000)
dense_lasso = Lasso(alpha=alpha, fit_intercept=False, max_iter=1000)
t0 = time()
sparse_lasso.fit(X_sp, y)
print("Sparse Lasso done in %fs" % (time() - t0))
t0 = time()
dense_lasso.fit(X, y)
print("Dense Lasso done in %fs" % (time() - t0))
print("Distance between coefficients : %s"
% linalg.norm(sparse_lasso.coef_ - dense_lasso.coef_))
###############################################################################
# The two Lasso implementations on Sparse data
print("--- Sparse matrices")
Xs = X.copy()
Xs[Xs < 2.5] = 0.0
Xs = sparse.coo_matrix(Xs)
Xs = Xs.tocsc()
print("Matrix density : %s %%" % (Xs.nnz / float(X.size) * 100))
alpha = 0.1
sparse_lasso = Lasso(alpha=alpha, fit_intercept=False, max_iter=10000)
dense_lasso = Lasso(alpha=alpha, fit_intercept=False, max_iter=10000)
t0 = time()
sparse_lasso.fit(Xs, y)
print("Sparse Lasso done in %fs" % (time() - t0))
t0 = time()
dense_lasso.fit(Xs.toarray(), y)
print("Dense Lasso done in %fs" % (time() - t0))
print("Distance between coefficients : %s"
% linalg.norm(sparse_lasso.coef_ - dense_lasso.coef_))
| bsd-3-clause |
anaruse/chainer | chainer/training/extensions/plot_report.py | 4 | 6421 | import json
from os import path
import warnings
import numpy
import six
from chainer import reporter
from chainer import serializer as serializer_module
from chainer.training import extension
from chainer.training import trigger as trigger_module
try:
import matplotlib # NOQA
_available = True
except (ImportError, TypeError):
_available = False
def _check_available():
if not _available:
warnings.warn('matplotlib is not installed on your environment, '
'so nothing will be plotted at this time. '
'Please install matplotlib to plot figures.\n\n'
' $ pip install matplotlib\n')
class PlotReport(extension.Extension):
"""Trainer extension to output plots.
This extension accumulates the observations of the trainer to
:class:`~chainer.DictSummary` at a regular interval specified by a supplied
trigger, and plot a graph with using them.
There are two triggers to handle this extension. One is the trigger to
invoke this extension, which is used to handle the timing of accumulating
the results. It is set to ``1, 'iteration'`` by default. The other is the
trigger to determine when to emit the result. When this trigger returns
True, this extension appends the summary of accumulated values to the list
of past summaries, and writes the list to the log file. Then, this
extension makes a new fresh summary object which is used until the next
time that the trigger fires.
It also adds ``'epoch'`` and ``'iteration'`` entries to each result
dictionary, which are the epoch and iteration counts at the output.
.. warning::
If your environment needs to specify a backend of matplotlib
explicitly, please call ``matplotlib.use`` before calling
``trainer.run``. For example:
.. code-block:: python
import matplotlib
matplotlib.use('Agg')
trainer.extend(
extensions.PlotReport(['main/loss', 'validation/main/loss'],
'epoch', file_name='loss.png'))
trainer.run()
Then, once one of instances of this extension is called,
``matplotlib.use`` will have no effect.
For the details, please see here:
https://matplotlib.org/faq/usage_faq.html#what-is-a-backend
Args:
y_keys (iterable of strs): Keys of values regarded as y. If this is
None, nothing is output to the graph.
x_key (str): Keys of values regarded as x. The default value is
'iteration'.
trigger: Trigger that decides when to aggregate the result and output
the values. This is distinct from the trigger of this extension
itself. If it is a tuple in the form ``<int>, 'epoch'`` or ``<int>,
'iteration'``, it is passed to :class:`IntervalTrigger`.
postprocess: Callback to postprocess the result dictionaries. Figure
object, Axes object, and all plot data are passed to this callback
in this order. This callback can modify the figure.
file_name (str): Name of the figure file under the output directory.
It can be a format string.
marker (str): The marker used to plot the graph. Default is ``'x'``. If
``None`` is given, it draws with no markers.
grid (bool): Set the axis grid on if True. Default is True.
"""
def __init__(self, y_keys, x_key='iteration', trigger=(1, 'epoch'),
postprocess=None, file_name='plot.png', marker='x',
grid=True):
_check_available()
self._x_key = x_key
if isinstance(y_keys, str):
y_keys = (y_keys,)
self._y_keys = y_keys
self._trigger = trigger_module.get_trigger(trigger)
self._file_name = file_name
self._marker = marker
self._grid = grid
self._postprocess = postprocess
self._init_summary()
self._data = {k: [] for k in y_keys}
@staticmethod
def available():
_check_available()
return _available
def __call__(self, trainer):
if _available:
# Dynamically import pyplot to call matplotlib.use()
# after importing chainer.training.extensions
import matplotlib.pyplot as plt
else:
return
keys = self._y_keys
observation = trainer.observation
summary = self._summary
if keys is None:
summary.add(observation)
else:
summary.add({k: observation[k] for k in keys if k in observation})
if self._trigger(trainer):
stats = self._summary.compute_mean()
stats_cpu = {}
for name, value in six.iteritems(stats):
stats_cpu[name] = float(value) # copy to CPU
updater = trainer.updater
stats_cpu['epoch'] = updater.epoch
stats_cpu['iteration'] = updater.iteration
x = stats_cpu[self._x_key]
data = self._data
for k in keys:
if k in stats_cpu:
data[k].append((x, stats_cpu[k]))
f = plt.figure()
a = f.add_subplot(111)
a.set_xlabel(self._x_key)
if self._grid:
a.grid()
for k in keys:
xy = data[k]
if len(xy) == 0:
continue
xy = numpy.array(xy)
a.plot(xy[:, 0], xy[:, 1], marker=self._marker, label=k)
if a.has_data():
if self._postprocess is not None:
self._postprocess(f, a, summary)
l = a.legend(bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.)
f.savefig(path.join(trainer.out, self._file_name),
bbox_extra_artists=(l,), bbox_inches='tight')
plt.close()
self._init_summary()
def serialize(self, serializer):
if isinstance(serializer, serializer_module.Serializer):
serializer('_plot_{}'.format(self._file_name),
json.dumps(self._data))
else:
self._data = json.loads(
serializer('_plot_{}'.format(self._file_name), ''))
def _init_summary(self):
self._summary = reporter.DictSummary()
| mit |
sguthrie/predicting-depression | scripts/find_subjects_behavior_data.py | 1 | 4298 | """
"""
import csv
import re
import sys
import argparse
import numpy as np
import matplotlib.pyplot as plt
def check_valid_value(key, val):
if key == "participant_id":
if re.match(r'sub-[0-9]{6}', val) is None:
return False
return True
# "else" statements not necessary because function always returns if the "if"
# statement evaluates to True
if val == "n/a":
return True
try:
float(val)
return True
except ValueError:
return False
def add_to_subjects(subjects, subject_data, behavior_name):
subject_name = subject_data["participant_id"]
behavior_data = float(subject_data[behavior_name])
if subject_name in subjects:
subjects[subject_name][behavior_name] = behavior_data
else:
subjects[subject_name] = {behavior_name:behavior_data}
def find_subjects_with_reported_behavior(subjects, behavior_name, filepath):
assert type(subjects) == dict
assert type(behavior_name) == str
with open(filepath, 'r') as tsv:
tsv = csv.DictReader(tsv, delimiter='\t')
for subject in tsv:
assert behavior_name in subject, \
"Unable to find %s in subject %s for file %s" % (behavior_name,
subject["participant_id"], filepath)
for key in subject:
assert check_valid_value(key, subject[key]), \
"%s is an unexpected value for subject %s for key %s (%s)" % (subject[key],
subject["participant_id"], key, filepath)
if subject[behavior_name] != "n/a":
add_to_subjects(subjects, subject, behavior_name)
return subjects
def get_data(behavior_paths, behavior_keys):
assert len(behavior_paths) == len(behavior_keys), "Behavior files must be matched \
one-to-one with behavior keys. Number of behavior files: %i. \
Number of behavior keys: %i" % (len(behavior_paths), len(behavior_keys))
subjects = {}
for path, behavior_key in zip(behavior_paths, behavior_keys):
subjects = find_subjects_with_reported_behavior(subjects, behavior_key, path)
num_subjects_with_all_data = 0
complete_subjects = {}
raw_data = {behavior_key:[] for behavior_key in behavior_keys}
complete_raw_data = {behavior_key:[] for behavior_key in behavior_keys}
for subject in subjects:
for behavior_key in behavior_keys:
if behavior_key in subjects[subject]:
raw_data[behavior_key].append(subjects[subject][behavior_key])
if len(subjects[subject]) == len(behavior_paths):
num_subjects_with_all_data += 1
complete_subjects[subject] = subjects[subject]
for behavior_key in behavior_keys:
complete_raw_data[behavior_key].append(subjects[subject][behavior_key])
print("%i subjects have data in all given files" % num_subjects_with_all_data)
return subjects, complete_subjects, raw_data, complete_raw_data
def draw_figure(behavior_keys, raw_data, complete_raw_data, autoscale=True):
if autoscale:
plt.figure(figsize=(6,len(behavior_keys)*4))
plt.subplot(len(behavior_keys),2,1)
plt.title("All data")
plt.subplot(len(behavior_keys),2,2)
plt.title("Complete data")
for i, behavior_key in enumerate(behavior_keys):
raw_data[behavior_key] = (np.array(raw_data[behavior_key]))
complete_raw_data[behavior_key] = (np.array(complete_raw_data[behavior_key]))
plt.subplot(len(behavior_keys),2,2*i+1)
plt.boxplot(raw_data[behavior_key], labels=[behavior_key], showmeans=True, meanline=True)
plt.subplot(len(behavior_keys),2,2*i+2)
plt.boxplot(complete_raw_data[behavior_key], labels=[behavior_key], showmeans=True, meanline=True)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("-bf", "--behavior-files", help="TSV file paths", action='append')
parser.add_argument("-bk", "--behavior-keys", help="keys to use for each TSV file", action="append")
args = parser.parse_args()
subjects, complete_subjects, raw_data, complete_raw_data = get_data(args.behavior_files, args.behavior_keys)
draw_figure(args.behavior_keys, raw_data, complete_raw_data, autoscale=False)
plt.show()
| gpl-3.0 |
florian-f/sklearn | sklearn/cluster/setup.py | 10 | 1202 | # Author: Alexandre Gramfort <alexandre.gramfort@inria.fr>
# License: BSD Style.
import os
from os.path import join
import numpy
from sklearn._build_utils import get_blas_info
def configuration(parent_package='', top_path=None):
from numpy.distutils.misc_util import Configuration
cblas_libs, blas_info = get_blas_info()
libraries = []
if os.name == 'posix':
cblas_libs.append('m')
libraries.append('m')
config = Configuration('cluster', parent_package, top_path)
config.add_extension('_hierarchical',
sources=['_hierarchical.c'],
include_dirs=[numpy.get_include()],
libraries=libraries)
config.add_extension(
'_k_means',
libraries=cblas_libs,
sources=['_k_means.c'],
include_dirs=[join('..', 'src', 'cblas'),
numpy.get_include(),
blas_info.pop('include_dirs', [])],
extra_compile_args=blas_info.pop('extra_compile_args', []),
**blas_info
)
return config
if __name__ == '__main__':
from numpy.distutils.core import setup
setup(**configuration(top_path='').todict())
| bsd-3-clause |
dingocuster/scikit-learn | sklearn/datasets/species_distributions.py | 198 | 7923 | """
=============================
Species distribution dataset
=============================
This dataset represents the geographic distribution of species.
The dataset is provided by Phillips et. al. (2006).
The two species are:
- `"Bradypus variegatus"
<http://www.iucnredlist.org/apps/redlist/details/3038/0>`_ ,
the Brown-throated Sloth.
- `"Microryzomys minutus"
<http://www.iucnredlist.org/apps/redlist/details/13408/0>`_ ,
also known as the Forest Small Rice Rat, a rodent that lives in Peru,
Colombia, Ecuador, Peru, and Venezuela.
References:
* `"Maximum entropy modeling of species geographic distributions"
<http://www.cs.princeton.edu/~schapire/papers/ecolmod.pdf>`_
S. J. Phillips, R. P. Anderson, R. E. Schapire - Ecological Modelling,
190:231-259, 2006.
Notes:
* See examples/applications/plot_species_distribution_modeling.py
for an example of using this dataset
"""
# Authors: Peter Prettenhofer <peter.prettenhofer@gmail.com>
# Jake Vanderplas <vanderplas@astro.washington.edu>
#
# License: BSD 3 clause
from io import BytesIO
from os import makedirs
from os.path import join
from os.path import exists
try:
# Python 2
from urllib2 import urlopen
PY2 = True
except ImportError:
# Python 3
from urllib.request import urlopen
PY2 = False
import numpy as np
from sklearn.datasets.base import get_data_home, Bunch
from sklearn.externals import joblib
DIRECTORY_URL = "http://www.cs.princeton.edu/~schapire/maxent/datasets/"
SAMPLES_URL = join(DIRECTORY_URL, "samples.zip")
COVERAGES_URL = join(DIRECTORY_URL, "coverages.zip")
DATA_ARCHIVE_NAME = "species_coverage.pkz"
def _load_coverage(F, header_length=6, dtype=np.int16):
"""Load a coverage file from an open file object.
This will return a numpy array of the given dtype
"""
header = [F.readline() for i in range(header_length)]
make_tuple = lambda t: (t.split()[0], float(t.split()[1]))
header = dict([make_tuple(line) for line in header])
M = np.loadtxt(F, dtype=dtype)
nodata = header[b'NODATA_value']
if nodata != -9999:
print(nodata)
M[nodata] = -9999
return M
def _load_csv(F):
"""Load csv file.
Parameters
----------
F : file object
CSV file open in byte mode.
Returns
-------
rec : np.ndarray
record array representing the data
"""
if PY2:
# Numpy recarray wants Python 2 str but not unicode
names = F.readline().strip().split(',')
else:
# Numpy recarray wants Python 3 str but not bytes...
names = F.readline().decode('ascii').strip().split(',')
rec = np.loadtxt(F, skiprows=0, delimiter=',', dtype='a22,f4,f4')
rec.dtype.names = names
return rec
def construct_grids(batch):
"""Construct the map grid from the batch object
Parameters
----------
batch : Batch object
The object returned by :func:`fetch_species_distributions`
Returns
-------
(xgrid, ygrid) : 1-D arrays
The grid corresponding to the values in batch.coverages
"""
# x,y coordinates for corner cells
xmin = batch.x_left_lower_corner + batch.grid_size
xmax = xmin + (batch.Nx * batch.grid_size)
ymin = batch.y_left_lower_corner + batch.grid_size
ymax = ymin + (batch.Ny * batch.grid_size)
# x coordinates of the grid cells
xgrid = np.arange(xmin, xmax, batch.grid_size)
# y coordinates of the grid cells
ygrid = np.arange(ymin, ymax, batch.grid_size)
return (xgrid, ygrid)
def fetch_species_distributions(data_home=None,
download_if_missing=True):
"""Loader for species distribution dataset from Phillips et. al. (2006)
Read more in the :ref:`User Guide <datasets>`.
Parameters
----------
data_home : optional, default: None
Specify another download and cache folder for the datasets. By default
all scikit learn data is stored in '~/scikit_learn_data' subfolders.
download_if_missing: optional, True by default
If False, raise a IOError if the data is not locally available
instead of trying to download the data from the source site.
Returns
--------
The data is returned as a Bunch object with the following attributes:
coverages : array, shape = [14, 1592, 1212]
These represent the 14 features measured at each point of the map grid.
The latitude/longitude values for the grid are discussed below.
Missing data is represented by the value -9999.
train : record array, shape = (1623,)
The training points for the data. Each point has three fields:
- train['species'] is the species name
- train['dd long'] is the longitude, in degrees
- train['dd lat'] is the latitude, in degrees
test : record array, shape = (619,)
The test points for the data. Same format as the training data.
Nx, Ny : integers
The number of longitudes (x) and latitudes (y) in the grid
x_left_lower_corner, y_left_lower_corner : floats
The (x,y) position of the lower-left corner, in degrees
grid_size : float
The spacing between points of the grid, in degrees
Notes
------
This dataset represents the geographic distribution of species.
The dataset is provided by Phillips et. al. (2006).
The two species are:
- `"Bradypus variegatus"
<http://www.iucnredlist.org/apps/redlist/details/3038/0>`_ ,
the Brown-throated Sloth.
- `"Microryzomys minutus"
<http://www.iucnredlist.org/apps/redlist/details/13408/0>`_ ,
also known as the Forest Small Rice Rat, a rodent that lives in Peru,
Colombia, Ecuador, Peru, and Venezuela.
References
----------
* `"Maximum entropy modeling of species geographic distributions"
<http://www.cs.princeton.edu/~schapire/papers/ecolmod.pdf>`_
S. J. Phillips, R. P. Anderson, R. E. Schapire - Ecological Modelling,
190:231-259, 2006.
Notes
-----
* See examples/applications/plot_species_distribution_modeling.py
for an example of using this dataset with scikit-learn
"""
data_home = get_data_home(data_home)
if not exists(data_home):
makedirs(data_home)
# Define parameters for the data files. These should not be changed
# unless the data model changes. They will be saved in the npz file
# with the downloaded data.
extra_params = dict(x_left_lower_corner=-94.8,
Nx=1212,
y_left_lower_corner=-56.05,
Ny=1592,
grid_size=0.05)
dtype = np.int16
if not exists(join(data_home, DATA_ARCHIVE_NAME)):
print('Downloading species data from %s to %s' % (SAMPLES_URL,
data_home))
X = np.load(BytesIO(urlopen(SAMPLES_URL).read()))
for f in X.files:
fhandle = BytesIO(X[f])
if 'train' in f:
train = _load_csv(fhandle)
if 'test' in f:
test = _load_csv(fhandle)
print('Downloading coverage data from %s to %s' % (COVERAGES_URL,
data_home))
X = np.load(BytesIO(urlopen(COVERAGES_URL).read()))
coverages = []
for f in X.files:
fhandle = BytesIO(X[f])
print(' - converting', f)
coverages.append(_load_coverage(fhandle))
coverages = np.asarray(coverages, dtype=dtype)
bunch = Bunch(coverages=coverages,
test=test,
train=train,
**extra_params)
joblib.dump(bunch, join(data_home, DATA_ARCHIVE_NAME), compress=9)
else:
bunch = joblib.load(join(data_home, DATA_ARCHIVE_NAME))
return bunch
| bsd-3-clause |
kpolimis/kpolimis.github.io-src | content/downloads/code/utils.py | 1 | 2245 | import os
import sys
import glob
import json
import time
import fiona
import datetime
import warnings
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from IPython.display import Image
from shapely.prepared import prep
from descartes import PolygonPatch
from shapely.geometry import Polygon
from mpl_toolkits.basemap import Basemap
from pysal.esda.mapclassify import Natural_Breaks
from matplotlib.collections import PatchCollection
from shapely.geometry import Point, Polygon, MultiPoint, MultiPolygon
warnings.filterwarnings('ignore')
def custom_colorbar(cmap, ncolors, labels, **kwargs):
"""Create a custom, discretized colorbar with correctly formatted/aligned labels.
cmap: the matplotlib colormap object you plan on using for your graph
ncolors: (int) the number of discrete colors available
labels: the list of labels for the colorbar. Should be the same length as ncolors.
"""
from matplotlib.colors import BoundaryNorm
from matplotlib.cm import ScalarMappable
norm = BoundaryNorm(range(0, ncolors), cmap.N)
mappable = ScalarMappable(cmap=cmap, norm=norm)
mappable.set_array([])
mappable.set_clim(-0.5, ncolors+0.5)
colorbar = plt.colorbar(mappable, **kwargs)
colorbar.set_ticks(np.linspace(0, ncolors, ncolors+1)+0.5)
colorbar.set_ticklabels(range(0, ncolors))
colorbar.set_ticklabels(labels)
return colorbar
def num_of_contained_points(apolygon, city_points):
points = len(list(filter(prep(apolygon).contains, city_points)))
return (points)
def distance_on_unit_sphere(lat1, long1, lat2, long2):
# http://www.johndcook.com/python_longitude_latitude.html
# Convert latitude and longitude to spherical coordinates in radians.
degrees_to_radians = np.pi/180.0
# phi = 90 - latitude
phi1 = (90.0 - lat1)*degrees_to_radians
phi2 = (90.0 - lat2)*degrees_to_radians
# theta = longitude
theta1 = long1*degrees_to_radians
theta2 = long2*degrees_to_radians
cos = (np.sin(phi1)*np.sin(phi2)*np.cos(theta1 - theta2) +
np.cos(phi1)*np.cos(phi2))
arc = np.arccos( cos )
# Remember to multiply arc by the radius of the earth
# in your favorite set of units to get length.
return arc
| gpl-3.0 |
mendax-grip/cfdemUtilities | vonKarmanSingh/fftMultiple.py | 2 | 4375 | # This program analyses the X and Y drag coefficient (drag and lift) from the cylinder immersed
# boundary test cases
# It can be compared visually afterward to experimental data
# Currently is not generic and can only load 2 data set, but anyway more makes it an unreadable mess
#
# USAGE : python ./FOLDERWHEREDATA-1-IS ./FOLDERWHEREDATA-2-IS
#
# Author : Bruno Blais
#Python imports
#----------------
import os
import sys
import numpy
import time
import scipy
import matplotlib.pyplot as plt
import re
#----------------
#TODO
# - Make everything in a single loop instead
#********************************
# OPTIONS AND USER PARAMETERS
#********************************
skip=100
pdf=1
tminFFT=175.
tminFFT2=100.50
#Figure size
plt.rcParams['figure.figsize'] = 10, 7
font = {#'family' : 'normal',
'weight' : 'normal',
'size' : 14}
plt.rc('font', **font)
#======================
# MAIN
#======================
tFold= 0
#Read the logs files
if (len(sys.argv)<1):
print 'Folder must be specified when running this python script'
sys.exit("Crashed because folder was not specified")
if (len(sys.argv)>3):
print 'Too many arguments, only the first two folders will be post-processed'
folder = [sys.argv[1], ' ']
if (len(sys.argv)>2):
tFold= 1
folder = [sys.argv[1], sys.argv[2]]
tx1, dx1 = numpy.loadtxt(folder[0]+'/dragX', unpack=True)
ty1, dy1 = numpy.loadtxt(folder[0]+'/dragY', unpack=True)
dx1=dx1*2
dy1=dy1*2
# Take absolute value
dx1= numpy.abs(dx1)
index = numpy.where(ty1>tminFFT )
# Manual FFT to get amplitude and frequencies right!
Fs = 1. / (tx1[2]-tx1[1]) # Sampling frequency
df = 1. / (ty1[-1]-tminFFT)
N= len(dy1[index]) # Number of points
# First normalise the amplitude with respect to the number of points
spectrum = abs(numpy.fft.fft(dy1[index])) / N
f1 = numpy.arange(0.,Fs/2.-df,df)
print "Number of point for FFT:", N
# Keep positive part of the FFT spectrum
Nf = (N)/2
spectrum1 = 2 * spectrum[0:len(f1)]
if (tFold):
tx2, dx2 = numpy.loadtxt(folder[1]+'/dragX', unpack=True)
ty2, dy2 = numpy.loadtxt(folder[1]+'/dragY', unpack=True)
dx2=dx2*2
dy2=dy2*2
index2 = numpy.where(ty2>tminFFT2)
# Take absolute value
dx2= numpy.abs(dx2)
# Manual FFT to get amplitude and frequencies right!
Fs = 1. / (tx2[2]-tx2[1]) # Sampling frequency
df = 1. / (ty2[-1] -tminFFT2)
N2= len(dy2[index2]) # Number of points
# First normalise the amplitude with respect to the number of points
spectrum = abs(numpy.fft.fft(dy2[index2])) / N2
f2 = numpy.arange(0.,Fs/2.-df,df)
# Keep positive part of the FFT spectrum
Nf2 = (N2)/2
spectrum2 = 2 * spectrum[0:len(f2)]
# Plotting stage
axfft=plt.figure("FFT C_L")
axfftp = axfft.add_subplot(111)
plt.ylabel(' Amplitude ')
plt.xlabel('Strouhal Number ($St$)')
#plt.title('Frequency spectrum of $C_L$ ')
plt.yscale('log')
plt.xscale('log')
if (tFold ==0):
plt.plot(f1,spectrum1,linewidth=2.0)
if (tFold ==1):
plt.plot(f1,spectrum1,'--k',label='Moving cylinder',linewidth=2)
plt.plot(f2,spectrum2,'-k',linewidth=1.5, label='Static cylinder')
#axfftp.grid(b=True, which='minor', color='k', linestyle='--')
axfftp.grid(b=True, which='major', color='k', linestyle='--')
plt.legend(loc=1)
if (pdf): plt.savefig("./fftOnCylinder.pdf")
ax = plt.figure("Drag coefficient") #Create window
axp=ax.add_subplot(111)
plt.ylabel('$C_D$, $C_L$ ')
plt.xlabel('time [s]')
#plt.title('Drag coefficients with time for 2D Kelvin-Helmholtz ')
if (tFold ==0):
plt.plot(tx1[skip:],dx1[skip:],'b-', label='$C_D$',linewidth=2.0)
plt.plot(ty1[skip:],-dy1[skip:],'g-', label='$C_L$',linewidth=2.0)
if (tFold ==1):
plt.plot(tx1[skip:],dx1[skip:],'-', label=('$C_D$-'+sys.argv[1]),linewidth=2.0)
plt.plot(ty1[skip:],-dy1[skip:],'-', label=('$C_L$-'+sys.argv[1]),linewidth=2.0)
plt.plot(tx2[skip:],dx2[skip:],'-', label=('$C_D$-'+sys.argv[2]),linewidth=2.0)
plt.plot(ty2[skip:],-dy2[skip:],'-', label=('$C_L$-'+sys.argv[2]),linewidth=2.0)
plt.legend(loc=3)
print "Averaged CD:\t", numpy.average(dx1[index])
print "Amplitude CD:\t", (numpy.max(dx1[index])-numpy.min(dx1[index]))/2
print "Amplitude CL:\t", (numpy.max(dy1[index])-numpy.min(dy1[index]))/2
print "Average CL:\t", numpy.average(dy1[index])
axp.grid(b=True, which='major', color='k', linestyle='--')
plt.show()
| lgpl-3.0 |
QISKit/qiskit-sdk-py | test/python/visualization/test_circuit_matplotlib_drawer.py | 1 | 6355 | # -*- coding: utf-8 -*-
# This code is part of Qiskit.
#
# (C) Copyright IBM 2017, 2019.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
# pylint: disable=invalid-name,missing-docstring
import unittest
import os
from qiskit import QuantumCircuit, QuantumRegister, ClassicalRegister
from qiskit import visualization
from .visualization import QiskitVisualizationTestCase
if visualization.HAS_MATPLOTLIB:
from matplotlib import pyplot as plt
def _path_to_reference(filename):
return os.path.join(_this_directory(), 'references', filename)
def _this_directory():
return os.path.dirname(os.path.abspath(__file__))
class TestMatplotlibDrawer(QiskitVisualizationTestCase):
def _expected_empty(self):
# Generate blank
expected = plt.figure()
expected.patch.set_facecolor(color='#ffffff')
ax = expected.add_subplot(111)
ax.axis('off')
ax.set_aspect('equal')
ax.tick_params(labelbottom=False, labeltop=False,
labelleft=False, labelright=False)
expected.set_size_inches(2.508333333333333, 0.2508333333333333)
return expected
@unittest.skipIf(not visualization.HAS_MATPLOTLIB, 'matplotlib not available.')
def test_empty_circuit(self):
qc = QuantumCircuit()
filename = self._get_resource_path('current_pulse_matplotlib_ref.png')
visualization.circuit_drawer(qc, output='mpl', filename=filename)
self.addCleanup(os.remove, filename)
expected_filename = self._get_resource_path('expected_current_pulse_matplotlib_ref.png')
expected = self._expected_empty()
expected.savefig(expected_filename)
self.addCleanup(os.remove, expected_filename)
self.assertImagesAreEqual(filename, expected_filename)
@unittest.skipIf(not visualization.HAS_MATPLOTLIB, 'matplotlib not available.')
def test_plot_barriers(self):
"""Test to see that plotting barriers works.
If it is set to False, no blank columns are introduced"""
# generate a circuit with barriers and other barrier like instructions in
q = QuantumRegister(2, 'q')
c = ClassicalRegister(2, 'c')
qc = QuantumCircuit(q, c)
# check for barriers
qc.h(q[0])
qc.barrier()
# check for other barrier like commands
qc.h(q[1])
# this import appears to be unused, but is actually needed to get snapshot instruction
import qiskit.extensions.simulator # pylint: disable=unused-import
qc.snapshot('1')
# check the barriers plot properly when plot_barriers= True
filename = self._get_resource_path('visualization/references/current_matplotlib_ref.png')
visualization.circuit_drawer(qc, output='mpl', plot_barriers=True, filename=filename)
self.addCleanup(os.remove, filename)
ref_filename = self._get_resource_path(
'visualization/references/matplotlib_barriers_ref.png')
self.assertImagesAreEqual(filename, ref_filename)
# check that the barrier aren't plotted when plot_barriers = False
filename = self._get_resource_path('current_matplotlib_ref.png')
visualization.circuit_drawer(qc, output='mpl', plot_barriers=False, filename=filename)
self.addCleanup(os.remove, filename)
# generate the same circuit but without the barrier commands as this is what the
# circuit should look like when displayed with plot barriers false
q1 = QuantumRegister(2, 'q')
c1 = ClassicalRegister(2, 'c')
qc1 = QuantumCircuit(q1, c1)
qc1.h(q1[0])
qc1.h(q1[1])
no_barriers_filename = self._get_resource_path('current_no_barriers_matplotlib_ref.png')
visualization.circuit_drawer(qc1, output='mpl', justify='None',
filename=no_barriers_filename)
self.addCleanup(os.remove, no_barriers_filename)
self.assertImagesAreEqual(filename, no_barriers_filename)
@unittest.skipIf(not visualization.HAS_MATPLOTLIB,
'matplotlib not available.')
def test_long_name(self):
"""Test to see that long register names can be seen completely
As reported in #2605
"""
# add a register with a very long name
qr = QuantumRegister(4, 'veryLongQuantumRegisterName')
# add another to make sure adjustments are made based on longest
qrr = QuantumRegister(1, 'q0')
circuit = QuantumCircuit(qr, qrr)
# check gates are shifted over accordingly
circuit.h(qr)
circuit.h(qr)
circuit.h(qr)
filename = self._get_resource_path('current_%s_long_name_matplotlib.png' % os.name)
visualization.circuit_drawer(circuit, output='mpl', filename=filename)
self.addCleanup(os.remove, filename)
ref_filename = self._get_resource_path(
'visualization/references/%s_long_name_matplotlib.png' % os.name)
self.assertImagesAreEqual(ref_filename, filename)
@unittest.skipIf(not visualization.HAS_MATPLOTLIB,
'matplotlib not available.')
def test_conditional(self):
"""Test that circuits with conditionals draw correctly
"""
qr = QuantumRegister(2, 'q')
cr = ClassicalRegister(2, 'c')
circuit = QuantumCircuit(qr, cr)
# check gates are shifted over accordingly
circuit.h(qr)
circuit.measure(qr, cr)
circuit.h(qr[0]).c_if(cr, 2)
conditional_filename = self._get_resource_path('current_conditional_matplotlib_ref.png')
visualization.circuit_drawer(circuit, output='mpl',
filename=conditional_filename)
self.addCleanup(os.remove, conditional_filename)
ref_filename = self._get_resource_path(
'visualization/references/matplotlib_conditional_ref.png')
self.assertImagesAreEqual(ref_filename, conditional_filename)
| apache-2.0 |
nkmk/python-snippets | notebook/pandas_to_csv.py | 1 | 1404 | import pandas as pd
df = pd.read_csv('data/src/sample_pandas_normal.csv', index_col=0)
print(df)
# age state point
# name
# Alice 24 NY 64
# Bob 42 CA 92
# Charlie 18 CA 70
# Dave 68 TX 70
# Ellen 24 CA 88
# Frank 30 NY 57
df.to_csv('data/dst/to_csv_out.csv')
df.to_csv('data/dst/to_csv_out_columns.csv', columns=['age'])
df.to_csv('data/dst/to_csv_out_header_index.csv', header=False, index=False)
df.to_csv('data/dst/to_csv_out.tsv', sep='\t')
df.to_csv('data/dst/to_csv_out_a.csv')
df.to_csv('data/dst/to_csv_out_a.csv', mode='a', header=False)
df.to_csv('data/dst/to_csv_out_a_new_column.csv')
df = pd.read_csv('data/dst/to_csv_out_a_new_column.csv', index_col=0)
print(df)
# age state point
# name
# Alice 24 NY 64
# Bob 42 CA 92
# Charlie 18 CA 70
# Dave 68 TX 70
# Ellen 24 CA 88
# Frank 30 NY 57
df['new_col'] = 'new data'
print(df)
# age state point new_col
# name
# Alice 24 NY 64 new data
# Bob 42 CA 92 new data
# Charlie 18 CA 70 new data
# Dave 68 TX 70 new data
# Ellen 24 CA 88 new data
# Frank 30 NY 57 new data
df.to_csv('data/dst/to_csv_out_a_new_column.csv')
| mit |
khyrulimam/pemrograman-linear-optimasi-gizi-anak-kos | nutrisi.py | 1 | 1544 | import numpy as np
import pulp
import seaborn as sns
from matplotlib import pyplot as plt
from matplotlib.patches import PathPatch
from matplotlib.path import Path
import solver
bayam = 'bayam'
tempe = 'tempe'
problem_name = 'Optimasi Gizi Anak Kos'
# decision variables (variabel keputusan)
x = pulp.LpVariable(bayam, lowBound=0, cat=pulp.LpInteger) # define variable as kuadran 1
y = pulp.LpVariable(tempe, lowBound=0, cat=pulp.LpInteger) # define variable as kuadran 1
# objective / fungsi tujuan
max_protein = 14 * x + 57 * y
max_magnesium = 248 * x + 243 * y
maximize = max_protein + max_magnesium
# constraints / fungsi kendala
protein = 14 * x + 57 * y >= 50 # 14x+57y >= 50
magnesium = 248 * x + 243 * y >= 400 # 248x+243y >= 400
harga = 2500 * x + 2000 * y <= 15000 # 2500x+2000y <= 15000
# magics
solver = solver.Solver(problem_name, pulp.LpMaximize) # init the solver with maximize solution
solver.tujuan(maximize) # add objective/fungsi tujuan
solver.kendala(protein, magnesium, harga) # add constraints/fungsi kendala
solver.hasil(x, y) # print result if solved
# print max
max_protein = 14 * x.value() + 57 * y.value()
max_magnesium = 248 * x.value() + 243 * y.value()
print "Max protein yang didapat sehari adalah {}g dari total minimal yang dibutuhkan (50g)".format(max_protein)
print "Max magnesium yang didapat sehari adalah {}mg dari total minimal yang dibutuhkan (400mg)".format(max_magnesium)
print "Dengan pengeluaran {:.0f}/hari dari jatah belanja 15000/perhari".format(2500 * x.value() + 2000 * y.value()) | apache-2.0 |
SanPen/GridCal | src/GridCal/Gui/GuiFunctions.py | 1 | 37446 | # This file is part of GridCal.
#
# GridCal is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# GridCal is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GridCal. If not, see <http://www.gnu.org/licenses/>.
import numpy as np
import numba as nb
import pandas as pd
from PySide2.QtWidgets import *
from PySide2 import QtCore, QtWidgets, QtGui
from PySide2.QtGui import *
from warnings import warn
from enum import EnumMeta
from collections import defaultdict
from matplotlib import pyplot as plt
from GridCal.Engine.Devices import DeviceType, BranchTemplate, BranchType, Bus, Area, Substation, Zone, Country
from GridCal.Engine.Simulations.result_types import ResultTypes
from GridCal.Engine.basic_structures import CDF
class TreeDelegate(QItemDelegate):
commitData = QtCore.Signal(object)
"""
A delegate that places a fully functioning QComboBox in every
cell of the column to which it's applied
"""
def __init__(self, parent, data=defaultdict()):
"""
Constructor
:param parent: QTableView parent object
:param data: dictionary of lists
"""
QItemDelegate.__init__(self, parent)
# dictionary of lists
self.data = data
@QtCore.Slot()
def double_click(self):
print('double clicked!')
self.commitData.emit(self.sender())
def createEditor(self, parent, option, index):
tree = QTreeView(parent)
model = QStandardItemModel()
model.setHorizontalHeaderLabels(['Template'])
for key in self.data.keys():
# add parent node
parent1 = QStandardItem(str(key))
# add children to parent
for elm in self.data[key]:
child1 = QStandardItem(str(elm))
parent1.appendRow([child1])
model.appendRow(parent1)
tree.setModel(model)
tree.doubleClicked.connect(self.double_click)
return tree
def setEditorData(self, editor, index):
print(editor)
print(index)
def setModelData(self, editor, model, index):
print(editor)
print(model)
print(index)
# model.setData(index, self.object_names[editor.currentIndex()])
class ComboDelegate(QItemDelegate):
commitData = QtCore.Signal(object)
"""
A delegate that places a fully functioning QComboBox in every
cell of the column to which it's applied
"""
def __init__(self, parent, objects, object_names):
"""
Constructor
:param parent: QTableView parent object
:param objects: List of objects to set. i.e. [True, False]
:param object_names: List of Object names to display. i.e. ['True', 'False']
"""
QItemDelegate.__init__(self, parent)
# objects to sent to the model associated to the combobox. i.e. [True, False]
self.objects = objects
# object description to display in the combobox. i.e. ['True', 'False']
self.object_names = object_names
@QtCore.Slot()
def currentIndexChanged(self):
self.commitData.emit(self.sender())
def createEditor(self, parent, option, index):
combo = QComboBox(parent)
combo.addItems(self.object_names)
combo.currentIndexChanged.connect(self.currentIndexChanged)
return combo
def setEditorData(self, editor, index):
editor.blockSignals(True)
val = index.model().data(index, role=QtCore.Qt.DisplayRole)
idx = self.object_names.index(val)
editor.setCurrentIndex(idx)
editor.blockSignals(False)
def setModelData(self, editor, model, index):
model.setData(index, self.objects[editor.currentIndex()])
class TextDelegate(QItemDelegate):
commitData = QtCore.Signal(object)
"""
A delegate that places a fully functioning QLineEdit in every
cell of the column to which it's applied
"""
def __init__(self, parent):
"""
Constructor
:param parent: QTableView parent object
"""
QItemDelegate.__init__(self, parent)
@QtCore.Slot()
def returnPressed(self):
self.commitData.emit(self.sender())
def createEditor(self, parent, option, index):
editor = QLineEdit(parent)
editor.returnPressed.connect(self.returnPressed)
return editor
def setEditorData(self, editor, index):
editor.blockSignals(True)
val = index.model().data(index, role=QtCore.Qt.DisplayRole)
editor.setText(val)
editor.blockSignals(False)
def setModelData(self, editor, model, index):
model.setData(index, editor.text())
class FloatDelegate(QItemDelegate):
commitData = QtCore.Signal(object)
"""
A delegate that places a fully functioning QDoubleSpinBox in every
cell of the column to which it's applied
"""
def __init__(self, parent, min_=-9999, max_=9999):
"""
Constructoe
:param parent: QTableView parent object
"""
QItemDelegate.__init__(self, parent)
self.min = min_
self.max = max_
@QtCore.Slot()
def returnPressed(self):
self.commitData.emit(self.sender())
def createEditor(self, parent, option, index):
editor = QDoubleSpinBox(parent)
editor.setMaximum(self.max)
editor.setMinimum(self.min)
editor.setDecimals(8)
editor.editingFinished.connect(self.returnPressed)
return editor
def setEditorData(self, editor, index):
editor.blockSignals(True)
val = float(index.model().data(index, role=QtCore.Qt.DisplayRole))
editor.setValue(val)
editor.blockSignals(False)
def setModelData(self, editor, model, index):
model.setData(index, editor.value())
class ComplexDelegate(QItemDelegate):
commitData = QtCore.Signal(object)
"""
A delegate that places a fully functioning Complex Editor in every
cell of the column to which it's applied
"""
def __init__(self, parent):
"""
Constructor
:param parent: QTableView parent object
"""
QItemDelegate.__init__(self, parent)
@QtCore.Slot()
def returnPressed(self):
"""
:return:
"""
self.commitData.emit(self.sender())
def createEditor(self, parent, option, index):
"""
:param parent:
:param option:
:param index:
:return:
"""
editor = QFrame(parent)
main_layout = QHBoxLayout(editor)
main_layout.layout().setContentsMargins(0, 0, 0, 0)
real = QDoubleSpinBox()
real.setMaximum(9999)
real.setMinimum(-9999)
real.setDecimals(8)
imag = QDoubleSpinBox()
imag.setMaximum(9999)
imag.setMinimum(-9999)
imag.setDecimals(8)
main_layout.addWidget(real)
main_layout.addWidget(imag)
# main_layout.addWidget(button)
# button.clicked.connect(self.returnPressed)
return editor
def setEditorData(self, editor, index):
"""
:param editor:
:param index:
:return:
"""
editor.blockSignals(True)
val = complex(index.model().data(index, role=QtCore.Qt.DisplayRole))
editor.children()[1].setValue(val.real)
editor.children()[2].setValue(val.imag)
editor.blockSignals(False)
def setModelData(self, editor, model, index):
"""
:param editor:
:param model:
:param index:
:return:
"""
val = complex(editor.children()[1].value(), editor.children()[2].value())
model.setData(index, val)
class PandasModel(QtCore.QAbstractTableModel):
"""
Class to populate a Qt table view with a pandas data frame
"""
def __init__(self, data: pd.DataFrame, parent=None, editable=False, editable_min_idx=-1, decimals=6):
"""
:param data:
:param parent:
:param editable:
:param editable_min_idx:
:param decimals:
"""
QtCore.QAbstractTableModel.__init__(self, parent)
self.data_c = data.values
self.cols_c = data.columns
self.index_c = data.index.values
self.editable = editable
self.editable_min_idx = editable_min_idx
self.r, self.c = self.data_c.shape
self.isDate = False
if self.r > 0 and self.c > 0:
if isinstance(self.index_c[0], np.datetime64):
self.index_c = pd.to_datetime(self.index_c)
self.isDate = True
self.format_string = '.' + str(decimals) + 'f'
self.formatter = lambda x: "%.2f" % x
def flags(self, index):
if self.editable and index.column() > self.editable_min_idx:
return QtCore.Qt.ItemIsEditable | QtCore.Qt.ItemIsEnabled | QtCore.Qt.ItemIsSelectable
else:
return QtCore.Qt.ItemIsEnabled
def rowCount(self, parent=None):
"""
:param parent:
:return:
"""
return self.r
def columnCount(self, parent=None):
"""
:param parent:
:return:
"""
return self.c
def data(self, index, role=QtCore.Qt.DisplayRole):
"""
:param index:
:param role:
:return:
"""
if index.isValid() and role == QtCore.Qt.DisplayRole:
val = self.data_c[index.row(), index.column()]
if isinstance(val, str):
return val
elif isinstance(val, complex):
if val.real != 0 or val.imag != 0:
return val.__format__(self.format_string)
else:
return '0'
else:
if val != 0:
return val.__format__(self.format_string)
else:
return '0'
return None
def setData(self, index, value, role=QtCore.Qt.DisplayRole):
"""
:param index:
:param value:
:param role:
:return:
"""
self.data_c[index.row(), index.column()] = value
return None
def headerData(self, p_int, orientation, role):
"""
:param p_int:
:param orientation:
:param role:
:return:
"""
if role == QtCore.Qt.DisplayRole:
if orientation == QtCore.Qt.Horizontal:
return self.cols_c[p_int]
elif orientation == QtCore.Qt.Vertical:
if self.index_c is None:
return p_int
else:
if self.isDate:
return self.index_c[p_int].strftime('%Y/%m/%d %H:%M.%S')
else:
return str(self.index_c[p_int])
return None
def copy_to_column(self, row, col):
"""
Copies one value to all the column
@param row: Row of the value
@param col: Column of the value
@return: Nothing
"""
self.data_c[:, col] = self.data_c[row, col]
def get_data(self, mode=None):
"""
Args:
mode: 'real', 'imag', 'abs'
Returns: index, columns, data
"""
n = len(self.cols_c)
if n > 0:
# gather values
if type(self.cols_c) == pd.Index:
names = self.cols_c.values
if len(names) > 0:
if type(names[0]) == ResultTypes:
names = [val.name for val in names]
elif type(self.cols_c) == ResultTypes:
names = [val.name for val in self.cols_c]
else:
names = [val.name for val in self.cols_c]
if self.data_c.dtype == complex:
if mode == 'real':
values = self.data_c.real
elif mode == 'imag':
values = self.data_c.imag
elif mode == 'abs':
values = np.abs(self.data_c)
else:
values = np.abs(self.data_c)
else:
values = self.data_c
return self.index_c, names, values
else:
# there are no elements
return list(), list(), list()
def save_to_excel(self, file_name, mode):
"""
Args:
file_name:
mode: 'real', 'imag', 'abs'
Returns:
"""
index, columns, data = self.get_data(mode=mode)
df = pd.DataFrame(data=data, index=index, columns=columns)
df.to_excel(file_name)
def copy_to_clipboard(self, mode=None):
"""
Copy profiles to clipboard
Args:
mode: 'real', 'imag', 'abs'
"""
n = len(self.cols_c)
if n > 0:
index, columns, data = self.get_data(mode=mode)
data = data.astype(str)
# header first
txt = '\t' + '\t'.join(columns) + '\n'
# data
for t, index_value in enumerate(index):
if data[t, :].sum() != 0.0:
txt += str(index_value) + '\t' + '\t'.join(data[t, :]) + '\n'
# copy to clipboard
cb = QApplication.clipboard()
cb.clear(mode=cb.Clipboard)
cb.setText(txt, mode=cb.Clipboard)
else:
# there are no elements
pass
class ObjectsModel(QtCore.QAbstractTableModel):
"""
Class to populate a Qt table view with the properties of objects
"""
def __init__(self, objects, editable_headers, parent=None, editable=False,
non_editable_attributes=list(), transposed=False, check_unique=list(),
dictionary_of_lists={}):
"""
:param objects: list of objects associated to the editor
:param editable_headers: Dictionary with the properties and the units and type {attribute: ('unit', type)}
:param parent: Parent object: the QTableView object
:param editable: Is the table editable?
:param non_editable_attributes: List of attributes that are not enabled for editing
:param transposed: Display the table transposed?
:param dictionary_of_lists: dictionary of lists for the Delegates
"""
QtCore.QAbstractTableModel.__init__(self, parent)
self.parent = parent
self.attributes = list(editable_headers.keys())
self.attribute_types = [editable_headers[attr].tpe for attr in self.attributes]
self.units = [editable_headers[attr].units for attr in self.attributes]
self.tips = [editable_headers[attr].definition for attr in self.attributes]
self.objects = objects
self.editable = editable
self.non_editable_attributes = non_editable_attributes
self.check_unique = check_unique
self.r = len(self.objects)
self.c = len(self.attributes)
self.formatter = lambda x: "%.2f" % x
self.transposed = transposed
self.dictionary_of_lists = dictionary_of_lists
self.set_delegates()
def set_delegates(self):
"""
Set the cell editor types depending on the attribute_types array
:return:
"""
if self.transposed:
F = self.parent.setItemDelegateForRow
else:
F = self.parent.setItemDelegateForColumn
for i in range(self.c):
tpe = self.attribute_types[i]
if tpe is bool:
delegate = ComboDelegate(self.parent, [True, False], ['True', 'False'])
F(i, delegate)
elif tpe is BranchTemplate or tpe is str:
delegate = TextDelegate(self.parent)
F(i, delegate)
elif tpe is BranchTemplate:
F(i, None)
elif tpe is float:
delegate = FloatDelegate(self.parent)
F(i, delegate)
elif tpe is complex:
delegate = ComplexDelegate(self.parent)
F(i, delegate)
elif tpe is None:
F(i, None)
if len(self.non_editable_attributes) == 0:
self.non_editable_attributes.append(self.attributes[i])
elif isinstance(tpe, EnumMeta):
objects = list(tpe)
values = [x.value for x in objects]
delegate = ComboDelegate(self.parent, objects, values)
F(i, delegate)
elif tpe in [DeviceType.SubstationDevice, DeviceType.AreaDevice,
DeviceType.ZoneDevice, DeviceType.CountryDevice]:
objects = self.dictionary_of_lists[tpe.value]
values = [x.name for x in objects]
delegate = ComboDelegate(self.parent, objects, values)
F(i, delegate)
else:
F(i, None)
def update(self):
"""
update table
"""
row = self.rowCount()
self.beginInsertRows(QtCore.QModelIndex(), row, row)
# whatever code
self.endInsertRows()
def flags(self, index):
"""
Get the display mode
:param index:
:return:
"""
if self.transposed:
attr_idx = index.row()
else:
attr_idx = index.column()
if self.editable and self.attributes[attr_idx] not in self.non_editable_attributes:
return QtCore.Qt.ItemIsEditable | QtCore.Qt.ItemIsEnabled | QtCore.Qt.ItemIsSelectable
else:
return QtCore.Qt.ItemIsEnabled
def rowCount(self, parent=None):
"""
Get number of rows
:param parent:
:return:
"""
if self.transposed:
return self.c
else:
return self.r
def columnCount(self, parent=None):
"""
Get number of columns
:param parent:
:return:
"""
if self.transposed:
return self.r
else:
return self.c
def data_with_type(self, index):
"""
Get the data to display
:param index:
:return:
"""
if self.transposed:
obj_idx = index.column()
attr_idx = index.row()
else:
obj_idx = index.row()
attr_idx = index.column()
attr = self.attributes[attr_idx]
tpe = self.attribute_types[attr_idx]
if tpe is Bus:
return getattr(self.objects[obj_idx], attr).name
elif tpe is BranchType:
# conv = BranchType(None)
return BranchType(getattr(self.objects[obj_idx], attr))
else:
return getattr(self.objects[obj_idx], attr)
def data(self, index, role=None):
"""
Get the data to display
:param index:
:param role:
:return:
"""
if index.isValid() and role == QtCore.Qt.DisplayRole:
return str(self.data_with_type(index))
return None
def setData(self, index, value, role=None):
"""
Set data by simple editor (whatever text)
:param index:
:param value:
:param role:
:return:
"""
if self.transposed:
obj_idx = index.column()
attr_idx = index.row()
else:
obj_idx = index.row()
attr_idx = index.column()
tpe = self.attribute_types[attr_idx]
# check taken values
if self.attributes[attr_idx] in self.check_unique:
taken = self.attr_taken(self.attributes[attr_idx], value)
else:
taken = False
if not taken:
if self.attributes[attr_idx] not in self.non_editable_attributes:
if tpe is BranchType:
setattr(self.objects[obj_idx], self.attributes[attr_idx], BranchType(value))
self.objects[obj_idx].graphic_obj.update_symbol()
else:
setattr(self.objects[obj_idx], self.attributes[attr_idx], value)
else:
pass # the column cannot be edited
return True
def attr_taken(self, attr, val):
"""
Checks if the attribute value is taken
:param attr:
:param val:
:return:
"""
for obj in self.objects:
if val == getattr(obj, attr):
return True
return False
def headerData(self, p_int, orientation, role):
"""
Get the headers to display
:param p_int:
:param orientation:
:param role:
:return:
"""
if role == QtCore.Qt.DisplayRole:
if self.transposed:
# for the properties in the schematic view
if orientation == QtCore.Qt.Horizontal:
return 'Value'
elif orientation == QtCore.Qt.Vertical:
if self.units[p_int] != '':
return self.attributes[p_int] + ' [' + self.units[p_int] + ']'
else:
return self.attributes[p_int]
else:
# Normal
if orientation == QtCore.Qt.Horizontal:
if self.units[p_int] != '':
return self.attributes[p_int] + ' [' + self.units[p_int] + ']'
else:
return self.attributes[p_int]
elif orientation == QtCore.Qt.Vertical:
return str(p_int) + ':' + str(self.objects[p_int])
# add a tooltip
if role == QtCore.Qt.ToolTipRole:
if p_int < self.c:
if self.units[p_int] != "":
unit = '\nUnits: ' + self.units[p_int]
else:
unit = ''
return self.attributes[p_int] + unit + ' \n' + self.tips[p_int]
else:
# somehow the index is out of range
return ""
return None
def copy_to_column(self, index):
"""
Copy the value pointed by the index to all the other cells in the column
:param index: QModelIndex instance
:return:
"""
value = self.data_with_type(index=index)
col = index.column()
for row in range(self.rowCount()):
if self.transposed:
obj_idx = col
attr_idx = row
else:
obj_idx = row
attr_idx = col
if self.attributes[attr_idx] not in self.non_editable_attributes:
setattr(self.objects[obj_idx], self.attributes[attr_idx], value)
else:
pass # the column cannot be edited
class BranchObjectModel(ObjectsModel):
def __init__(self, objects, editable_headers, parent=None, editable=False,
non_editable_attributes=list(), transposed=False, check_unique=list(), catalogue_dict=defaultdict()):
# type templates catalogue
self.catalogue_dict = catalogue_dict
super(BranchObjectModel, self).__init__(objects, editable_headers=editable_headers, parent=parent,
editable=editable, non_editable_attributes=non_editable_attributes,
transposed=transposed, check_unique=check_unique)
class ObjectHistory:
def __init__(self, max_undo_states=100):
"""
Constructor
:param max_undo_states: maximum number of undo states
"""
self.max_undo_states = max_undo_states
self.position = 0
self.undo_stack = list()
self.redo_stack = list()
def add_state(self, action_name, data: dict):
"""
Add an undo state
:param action_name: name of the action that was performed
:param data: dictionary {column index -> profile array}
"""
# if the stack is too long delete the oldest entry
if len(self.undo_stack) > (self.max_undo_states + 1):
self.undo_stack.pop(0)
# stack the newest entry
self.undo_stack.append((action_name, data))
self.position = len(self.undo_stack) - 1
# print('Stored', action_name)
def redo(self):
"""
Re-do table
:return: table instance
"""
val = self.redo_stack.pop()
self.undo_stack.append(val)
return val
def undo(self):
"""
Un-do table
:return: table instance
"""
val = self.undo_stack.pop()
self.redo_stack.append(val)
return val
def can_redo(self):
"""
is it possible to redo?
:return: True / False
"""
return len(self.redo_stack) > 0
def can_undo(self):
"""
Is it possible to undo?
:return: True / False
"""
return len(self.undo_stack) > 0
class ProfilesModel(QtCore.QAbstractTableModel):
"""
Class to populate a Qt table view with profiles from objects
"""
def __init__(self, multi_circuit, device_type: DeviceType, magnitude, format, parent, max_undo_states=100):
"""
Args:
multi_circuit: MultiCircuit instance
device_type: string with Load, StaticGenerator, etc...
magnitude: magnitude to display 'S', 'P', etc...
parent: Parent object: the QTableView object
"""
QtCore.QAbstractTableModel.__init__(self, parent)
self.parent = parent
self.format = format
self.circuit = multi_circuit
self.device_type = device_type
self.magnitude = magnitude
self.non_editable_indices = list()
self.editable = True
self.r = len(self.circuit.time_profile)
self.elements = self.circuit.get_elements_by_type(device_type)
self.c = len(self.elements)
self.formatter = lambda x: "%.2f" % x
# contains copies of the table
self.history = ObjectHistory(max_undo_states)
# add the initial state
self.add_state(columns=range(self.columnCount()), action_name='initial')
self.set_delegates()
def set_delegates(self):
"""
Set the cell editor types depending on the attribute_types array
:return:
"""
if self.format is bool:
delegate = ComboDelegate(self.parent, [True, False], ['True', 'False'])
self.parent.setItemDelegate(delegate)
elif self.format is float:
delegate = FloatDelegate(self.parent)
self.parent.setItemDelegate(delegate)
elif self.format is str:
delegate = TextDelegate(self.parent)
self.parent.setItemDelegate(delegate)
elif self.format is complex:
delegate = ComplexDelegate(self.parent)
self.parent.setItemDelegate(delegate)
def update(self):
"""
update
"""
# row = self.rowCount()
# self.beginInsertRows(QtCore.QModelIndex(), row, row)
# # whatever code
# self.endInsertRows()
self.layoutAboutToBeChanged.emit()
self.layoutChanged.emit()
def flags(self, index):
"""
Get the display mode
:param index:
:return:
"""
if self.editable and index.column() not in self.non_editable_indices:
return QtCore.Qt.ItemIsEditable | QtCore.Qt.ItemIsEnabled | QtCore.Qt.ItemIsSelectable
else:
return QtCore.Qt.ItemIsEnabled
def rowCount(self, parent=None):
"""
Get number of rows
:param parent:
:return:
"""
return self.r
def columnCount(self, parent=None):
"""
Get number of columns
:param parent:
:return:
"""
return self.c
def data(self, index, role=QtCore.Qt.DisplayRole):
"""
Get the data to display
:param index:
:param role:
:return:
"""
if index.isValid():
if role == QtCore.Qt.DisplayRole:
profile_property = self.elements[index.column()].properties_with_profile[self.magnitude]
array = getattr(self.elements[index.column()], profile_property)
return str(array[index.row()])
return None
def setData(self, index, value, role=QtCore.Qt.DisplayRole):
"""
Set data by simple editor (whatever text)
:param index:
:param value:
:param role:
:return:
"""
c = index.column()
r = index.row()
if c not in self.non_editable_indices:
profile_property = self.elements[c].properties_with_profile[self.magnitude]
getattr(self.elements[c], profile_property)[r] = value
self.add_state(columns=[c], action_name='')
else:
pass # the column cannot be edited
return True
def headerData(self, p_int, orientation, role):
"""
Get the headers to display
:param p_int:
:param orientation:
:param role:
:return:
"""
if role == QtCore.Qt.DisplayRole:
if orientation == QtCore.Qt.Horizontal:
return str(self.elements[p_int].name)
elif orientation == QtCore.Qt.Vertical:
if self.circuit.time_profile is None:
return str(p_int)
else:
return pd.to_datetime(self.circuit.time_profile[p_int]).strftime('%d-%m-%Y %H:%M')
return None
def paste_from_clipboard(self, row_idx=0, col_idx=0):
"""
Args:
row_idx:
col_idx:
"""
n = len(self.elements)
nt = len(self.circuit.time_profile)
if n > 0:
profile_property = self.elements[0].properties_with_profile[self.magnitude]
formatter = self.elements[0].editable_headers[self.magnitude].tpe
# copy to clipboard
cb = QtWidgets.QApplication.clipboard()
text = cb.text(mode=cb.Clipboard)
rows = text.split('\n')
mod_cols = list()
# gather values
for r, row in enumerate(rows):
values = row.split('\t')
r2 = r + row_idx
for c, val in enumerate(values):
c2 = c + col_idx
try:
val2 = formatter(val)
parsed = True
except:
warn("could not parse '" + str(val) + "'")
parsed = False
if parsed:
if c2 < n and r2 < nt:
mod_cols.append((c2))
getattr(self.elements[c2], profile_property)[r2] = val2
else:
print('Out of profile bounds')
if len(mod_cols) > 0:
self.add_state(mod_cols, 'paste')
else:
# there are no elements
pass
def copy_to_clipboard(self):
"""
Copy profiles to clipboard
"""
n = len(self.elements)
if n > 0:
profile_property = self.elements[0].properties_with_profile[self.magnitude]
# gather values
names = [None] * n
values = [None] * n
for c in range(n):
names[c] = self.elements[c].name
values[c] = getattr(self.elements[c], profile_property)
values = np.array(values).transpose().astype(str)
# header first
data = '\t' + '\t'.join(names) + '\n'
# data
for t, date in enumerate(self.circuit.time_profile):
data += str(date) + '\t' + '\t'.join(values[t, :]) + '\n'
# copy to clipboard
cb = QtWidgets.QApplication.clipboard()
cb.clear(mode=cb.Clipboard)
cb.setText(data, mode=cb.Clipboard)
else:
# there are no elements
pass
def add_state(self, columns, action_name=''):
"""
Compile data of an action and store the data in the undo history
:param columns: list of column indices changed
:param action_name: name of the action
:return: None
"""
data = dict()
for col in columns:
profile_property = self.elements[col].properties_with_profile[self.magnitude]
data[col] = getattr(self.elements[col], profile_property).copy()
self.history.add_state(action_name, data)
def restore(self, data: dict):
"""
Set profiles data from undo history
:param data: dictionary comming from the history
:return:
"""
for col, array in data.items():
profile_property = self.elements[col].properties_with_profile[self.magnitude]
setattr(self.elements[col], profile_property, array)
def undo(self):
"""
Un-do table changes
"""
if self.history.can_undo():
action, data = self.history.undo()
self.restore(data)
print('Undo ', action)
self.update()
def redo(self):
"""
Re-do table changes
"""
if self.history.can_redo():
action, data = self.history.redo()
self.restore(data)
print('Redo ', action)
self.update()
class EnumModel(QtCore.QAbstractListModel):
def __init__(self, list_of_enums):
"""
Enumeration model
:param list_of_enums: list of enumeration values to show
"""
QtCore.QAbstractListModel.__init__(self)
self.items = list_of_enums
def rowCount(self, parent=QtCore.QModelIndex()):
return len(self.items)
def data(self, index, role=QtCore.Qt.DisplayRole):
if index.isValid() and role == QtCore.Qt.DisplayRole:
return self.items[index.row()].value[0]
return None
class MeasurementsModel(QtCore.QAbstractListModel):
def __init__(self, circuit):
"""
Enumeration model
:param circuit: MultiCircuit instance
"""
QtCore.QAbstractListModel.__init__(self)
self.circuit = circuit
def rowCount(self, parent=QtCore.QModelIndex()):
return len(self.items)
def data(self, index, role=QtCore.Qt.DisplayRole):
if index.isValid() and role == QtCore.Qt.DisplayRole:
return self.items[index.row()].value[0]
return None
def get_list_model(lst, checks=False):
"""
Pass a list to a list model
"""
list_model = QtGui.QStandardItemModel()
if lst is not None:
if not checks:
for val in lst:
# for the list model
item = QtGui.QStandardItem(str(val))
item.setEditable(False)
list_model.appendRow(item)
else:
for val in lst:
# for the list model
item = QtGui.QStandardItem(str(val))
item.setEditable(False)
item.setCheckable(True)
item.setCheckState(QtCore.Qt.Checked)
list_model.appendRow(item)
return list_model
def get_checked_indices(mdl: QtGui.QStandardItemModel()):
"""
Get a list of the selected indices in a QStandardItemModel
:param mdl:
:return:
"""
idx = list()
for row in range(mdl.rowCount()):
item = mdl.item(row)
if item.checkState() == QtCore.Qt.Checked:
idx.append(row)
return np.array(idx)
def fill_model_from_dict(parent, d, editable=False):
"""
Fill TreeViewModel from dictionary
:param parent: Parent QStandardItem
:param d: item
:return: Nothing
"""
if isinstance(d, dict):
for k, v in d.items():
child = QtGui.QStandardItem(str(k))
child.setEditable(editable)
parent.appendRow(child)
fill_model_from_dict(child, v)
elif isinstance(d, list):
for v in d:
fill_model_from_dict(parent, v)
else:
item = QtGui.QStandardItem(str(d))
item.setEditable(editable)
parent.appendRow(item)
def get_tree_model(d, top='Results'):
model = QtGui.QStandardItemModel()
model.setHorizontalHeaderLabels([top])
fill_model_from_dict(model.invisibleRootItem(), d)
return model
def get_tree_item_path(item: QtGui.QStandardItem):
"""
:param item:
:return:
"""
item_parent = item.parent()
path = [item.text()]
while item_parent is not None:
parent_text = item_parent.text()
path.append(parent_text)
item_parent = item_parent.parent()
path.reverse()
return path | gpl-3.0 |
subutai/nupic | external/linux32/lib/python2.6/site-packages/matplotlib/lines.py | 69 | 48233 | """
This module contains all the 2D line class which can draw with a
variety of line styles, markers and colors.
"""
# TODO: expose cap and join style attrs
from __future__ import division
import numpy as np
from numpy import ma
from matplotlib import verbose
import artist
from artist import Artist
from cbook import iterable, is_string_like, is_numlike, ls_mapper, dedent,\
flatten
from colors import colorConverter
from path import Path
from transforms import Affine2D, Bbox, TransformedPath, IdentityTransform
from matplotlib import rcParams
# special-purpose marker identifiers:
(TICKLEFT, TICKRIGHT, TICKUP, TICKDOWN,
CARETLEFT, CARETRIGHT, CARETUP, CARETDOWN) = range(8)
# COVERAGE NOTE: Never called internally or from examples
def unmasked_index_ranges(mask, compressed = True):
warnings.warn("Import this directly from matplotlib.cbook",
DeprecationWarning)
# Warning added 2008/07/22
from matplotlib.cbook import unmasked_index_ranges as _unmasked_index_ranges
return _unmasked_index_ranges(mask, compressed=compressed)
def segment_hits(cx, cy, x, y, radius):
"""
Determine if any line segments are within radius of a
point. Returns the list of line segments that are within that
radius.
"""
# Process single points specially
if len(x) < 2:
res, = np.nonzero( (cx - x)**2 + (cy - y)**2 <= radius**2 )
return res
# We need to lop the last element off a lot.
xr,yr = x[:-1],y[:-1]
# Only look at line segments whose nearest point to C on the line
# lies within the segment.
dx,dy = x[1:]-xr, y[1:]-yr
Lnorm_sq = dx**2+dy**2 # Possibly want to eliminate Lnorm==0
u = ( (cx-xr)*dx + (cy-yr)*dy )/Lnorm_sq
candidates = (u>=0) & (u<=1)
#if any(candidates): print "candidates",xr[candidates]
# Note that there is a little area near one side of each point
# which will be near neither segment, and another which will
# be near both, depending on the angle of the lines. The
# following radius test eliminates these ambiguities.
point_hits = (cx - x)**2 + (cy - y)**2 <= radius**2
#if any(point_hits): print "points",xr[candidates]
candidates = candidates & ~(point_hits[:-1] | point_hits[1:])
# For those candidates which remain, determine how far they lie away
# from the line.
px,py = xr+u*dx,yr+u*dy
line_hits = (cx-px)**2 + (cy-py)**2 <= radius**2
#if any(line_hits): print "lines",xr[candidates]
line_hits = line_hits & candidates
points, = point_hits.ravel().nonzero()
lines, = line_hits.ravel().nonzero()
#print points,lines
return np.concatenate((points,lines))
class Line2D(Artist):
"""
A line - the line can have both a solid linestyle connecting all
the vertices, and a marker at each vertex. Additionally, the
drawing of the solid line is influenced by the drawstyle, eg one
can create "stepped" lines in various styles.
"""
lineStyles = _lineStyles = { # hidden names deprecated
'-' : '_draw_solid',
'--' : '_draw_dashed',
'-.' : '_draw_dash_dot',
':' : '_draw_dotted',
'None' : '_draw_nothing',
' ' : '_draw_nothing',
'' : '_draw_nothing',
}
_drawStyles_l = {
'default' : '_draw_lines',
'steps-mid' : '_draw_steps_mid',
'steps-pre' : '_draw_steps_pre',
'steps-post' : '_draw_steps_post',
}
_drawStyles_s = {
'steps' : '_draw_steps_pre',
}
drawStyles = {}
drawStyles.update(_drawStyles_l)
drawStyles.update(_drawStyles_s)
markers = _markers = { # hidden names deprecated
'.' : '_draw_point',
',' : '_draw_pixel',
'o' : '_draw_circle',
'v' : '_draw_triangle_down',
'^' : '_draw_triangle_up',
'<' : '_draw_triangle_left',
'>' : '_draw_triangle_right',
'1' : '_draw_tri_down',
'2' : '_draw_tri_up',
'3' : '_draw_tri_left',
'4' : '_draw_tri_right',
's' : '_draw_square',
'p' : '_draw_pentagon',
'*' : '_draw_star',
'h' : '_draw_hexagon1',
'H' : '_draw_hexagon2',
'+' : '_draw_plus',
'x' : '_draw_x',
'D' : '_draw_diamond',
'd' : '_draw_thin_diamond',
'|' : '_draw_vline',
'_' : '_draw_hline',
TICKLEFT : '_draw_tickleft',
TICKRIGHT : '_draw_tickright',
TICKUP : '_draw_tickup',
TICKDOWN : '_draw_tickdown',
CARETLEFT : '_draw_caretleft',
CARETRIGHT : '_draw_caretright',
CARETUP : '_draw_caretup',
CARETDOWN : '_draw_caretdown',
'None' : '_draw_nothing',
' ' : '_draw_nothing',
'' : '_draw_nothing',
}
filled_markers = ('o', '^', 'v', '<', '>',
's', 'd', 'D', 'h', 'H', 'p', '*')
zorder = 2
validCap = ('butt', 'round', 'projecting')
validJoin = ('miter', 'round', 'bevel')
def __str__(self):
if self._label != "":
return "Line2D(%s)"%(self._label)
elif hasattr(self, '_x') and len(self._x) > 3:
return "Line2D((%g,%g),(%g,%g),...,(%g,%g))"\
%(self._x[0],self._y[0],self._x[0],self._y[0],self._x[-1],self._y[-1])
elif hasattr(self, '_x'):
return "Line2D(%s)"\
%(",".join(["(%g,%g)"%(x,y) for x,y in zip(self._x,self._y)]))
else:
return "Line2D()"
def __init__(self, xdata, ydata,
linewidth = None, # all Nones default to rc
linestyle = None,
color = None,
marker = None,
markersize = None,
markeredgewidth = None,
markeredgecolor = None,
markerfacecolor = None,
antialiased = None,
dash_capstyle = None,
solid_capstyle = None,
dash_joinstyle = None,
solid_joinstyle = None,
pickradius = 5,
drawstyle = None,
**kwargs
):
"""
Create a :class:`~matplotlib.lines.Line2D` instance with *x*
and *y* data in sequences *xdata*, *ydata*.
The kwargs are :class:`~matplotlib.lines.Line2D` properties:
%(Line2D)s
See :meth:`set_linestyle` for a decription of the line styles,
:meth:`set_marker` for a description of the markers, and
:meth:`set_drawstyle` for a description of the draw styles.
"""
Artist.__init__(self)
#convert sequences to numpy arrays
if not iterable(xdata):
raise RuntimeError('xdata must be a sequence')
if not iterable(ydata):
raise RuntimeError('ydata must be a sequence')
if linewidth is None : linewidth=rcParams['lines.linewidth']
if linestyle is None : linestyle=rcParams['lines.linestyle']
if marker is None : marker=rcParams['lines.marker']
if color is None : color=rcParams['lines.color']
if markersize is None : markersize=rcParams['lines.markersize']
if antialiased is None : antialiased=rcParams['lines.antialiased']
if dash_capstyle is None : dash_capstyle=rcParams['lines.dash_capstyle']
if dash_joinstyle is None : dash_joinstyle=rcParams['lines.dash_joinstyle']
if solid_capstyle is None : solid_capstyle=rcParams['lines.solid_capstyle']
if solid_joinstyle is None : solid_joinstyle=rcParams['lines.solid_joinstyle']
if drawstyle is None : drawstyle='default'
self.set_dash_capstyle(dash_capstyle)
self.set_dash_joinstyle(dash_joinstyle)
self.set_solid_capstyle(solid_capstyle)
self.set_solid_joinstyle(solid_joinstyle)
self.set_linestyle(linestyle)
self.set_drawstyle(drawstyle)
self.set_linewidth(linewidth)
self.set_color(color)
self.set_marker(marker)
self.set_antialiased(antialiased)
self.set_markersize(markersize)
self._dashSeq = None
self.set_markerfacecolor(markerfacecolor)
self.set_markeredgecolor(markeredgecolor)
self.set_markeredgewidth(markeredgewidth)
self._point_size_reduction = 0.5
self.verticalOffset = None
# update kwargs before updating data to give the caller a
# chance to init axes (and hence unit support)
self.update(kwargs)
self.pickradius = pickradius
if is_numlike(self._picker):
self.pickradius = self._picker
self._xorig = np.asarray([])
self._yorig = np.asarray([])
self._invalid = True
self.set_data(xdata, ydata)
def contains(self, mouseevent):
"""
Test whether the mouse event occurred on the line. The pick
radius determines the precision of the location test (usually
within five points of the value). Use
:meth:`~matplotlib.lines.Line2D.get_pickradius` or
:meth:`~matplotlib.lines.Line2D.set_pickradius` to view or
modify it.
Returns *True* if any values are within the radius along with
``{'ind': pointlist}``, where *pointlist* is the set of points
within the radius.
TODO: sort returned indices by distance
"""
if callable(self._contains): return self._contains(self,mouseevent)
if not is_numlike(self.pickradius):
raise ValueError,"pick radius should be a distance"
# Make sure we have data to plot
if self._invalid:
self.recache()
if len(self._xy)==0: return False,{}
# Convert points to pixels
path, affine = self._transformed_path.get_transformed_path_and_affine()
path = affine.transform_path(path)
xy = path.vertices
xt = xy[:, 0]
yt = xy[:, 1]
# Convert pick radius from points to pixels
if self.figure == None:
warning.warn('no figure set when check if mouse is on line')
pixels = self.pickradius
else:
pixels = self.figure.dpi/72. * self.pickradius
# Check for collision
if self._linestyle in ['None',None]:
# If no line, return the nearby point(s)
d = (xt-mouseevent.x)**2 + (yt-mouseevent.y)**2
ind, = np.nonzero(np.less_equal(d, pixels**2))
else:
# If line, return the nearby segment(s)
ind = segment_hits(mouseevent.x,mouseevent.y,xt,yt,pixels)
# Debugging message
if False and self._label != u'':
print "Checking line",self._label,"at",mouseevent.x,mouseevent.y
print 'xt', xt
print 'yt', yt
#print 'dx,dy', (xt-mouseevent.x)**2., (yt-mouseevent.y)**2.
print 'ind',ind
# Return the point(s) within radius
return len(ind)>0,dict(ind=ind)
def get_pickradius(self):
'return the pick radius used for containment tests'
return self.pickradius
def setpickradius(self,d):
"""Sets the pick radius used for containment tests
ACCEPTS: float distance in points
"""
self.pickradius = d
def set_picker(self,p):
"""Sets the event picker details for the line.
ACCEPTS: float distance in points or callable pick function
``fn(artist, event)``
"""
if callable(p):
self._contains = p
else:
self.pickradius = p
self._picker = p
def get_window_extent(self, renderer):
bbox = Bbox.unit()
bbox.update_from_data_xy(self.get_transform().transform(self.get_xydata()),
ignore=True)
# correct for marker size, if any
if self._marker is not None:
ms = (self._markersize / 72.0 * self.figure.dpi) * 0.5
bbox = bbox.padded(ms)
return bbox
def set_axes(self, ax):
Artist.set_axes(self, ax)
if ax.xaxis is not None:
self._xcid = ax.xaxis.callbacks.connect('units', self.recache)
if ax.yaxis is not None:
self._ycid = ax.yaxis.callbacks.connect('units', self.recache)
set_axes.__doc__ = Artist.set_axes.__doc__
def set_data(self, *args):
"""
Set the x and y data
ACCEPTS: 2D array
"""
if len(args)==1:
x, y = args[0]
else:
x, y = args
not_masked = 0
if not ma.isMaskedArray(x):
x = np.asarray(x)
not_masked += 1
if not ma.isMaskedArray(y):
y = np.asarray(y)
not_masked += 1
if (not_masked < 2 or
(x is not self._xorig and
(x.shape != self._xorig.shape or np.any(x != self._xorig))) or
(y is not self._yorig and
(y.shape != self._yorig.shape or np.any(y != self._yorig)))):
self._xorig = x
self._yorig = y
self._invalid = True
def recache(self):
#if self.axes is None: print 'recache no axes'
#else: print 'recache units', self.axes.xaxis.units, self.axes.yaxis.units
if ma.isMaskedArray(self._xorig) or ma.isMaskedArray(self._yorig):
x = ma.asarray(self.convert_xunits(self._xorig), float)
y = ma.asarray(self.convert_yunits(self._yorig), float)
x = ma.ravel(x)
y = ma.ravel(y)
else:
x = np.asarray(self.convert_xunits(self._xorig), float)
y = np.asarray(self.convert_yunits(self._yorig), float)
x = np.ravel(x)
y = np.ravel(y)
if len(x)==1 and len(y)>1:
x = x * np.ones(y.shape, float)
if len(y)==1 and len(x)>1:
y = y * np.ones(x.shape, float)
if len(x) != len(y):
raise RuntimeError('xdata and ydata must be the same length')
x = x.reshape((len(x), 1))
y = y.reshape((len(y), 1))
if ma.isMaskedArray(x) or ma.isMaskedArray(y):
self._xy = ma.concatenate((x, y), 1)
else:
self._xy = np.concatenate((x, y), 1)
self._x = self._xy[:, 0] # just a view
self._y = self._xy[:, 1] # just a view
# Masked arrays are now handled by the Path class itself
self._path = Path(self._xy)
self._transformed_path = TransformedPath(self._path, self.get_transform())
self._invalid = False
def set_transform(self, t):
"""
set the Transformation instance used by this artist
ACCEPTS: a :class:`matplotlib.transforms.Transform` instance
"""
Artist.set_transform(self, t)
self._invalid = True
# self._transformed_path = TransformedPath(self._path, self.get_transform())
def _is_sorted(self, x):
"return true if x is sorted"
if len(x)<2: return 1
return np.alltrue(x[1:]-x[0:-1]>=0)
def draw(self, renderer):
if self._invalid:
self.recache()
renderer.open_group('line2d')
if not self._visible: return
gc = renderer.new_gc()
self._set_gc_clip(gc)
gc.set_foreground(self._color)
gc.set_antialiased(self._antialiased)
gc.set_linewidth(self._linewidth)
gc.set_alpha(self._alpha)
if self.is_dashed():
cap = self._dashcapstyle
join = self._dashjoinstyle
else:
cap = self._solidcapstyle
join = self._solidjoinstyle
gc.set_joinstyle(join)
gc.set_capstyle(cap)
gc.set_snap(self.get_snap())
funcname = self._lineStyles.get(self._linestyle, '_draw_nothing')
if funcname != '_draw_nothing':
tpath, affine = self._transformed_path.get_transformed_path_and_affine()
self._lineFunc = getattr(self, funcname)
funcname = self.drawStyles.get(self._drawstyle, '_draw_lines')
drawFunc = getattr(self, funcname)
drawFunc(renderer, gc, tpath, affine.frozen())
if self._marker is not None:
gc = renderer.new_gc()
self._set_gc_clip(gc)
gc.set_foreground(self.get_markeredgecolor())
gc.set_linewidth(self._markeredgewidth)
gc.set_alpha(self._alpha)
funcname = self._markers.get(self._marker, '_draw_nothing')
if funcname != '_draw_nothing':
tpath, affine = self._transformed_path.get_transformed_points_and_affine()
markerFunc = getattr(self, funcname)
markerFunc(renderer, gc, tpath, affine.frozen())
renderer.close_group('line2d')
def get_antialiased(self): return self._antialiased
def get_color(self): return self._color
def get_drawstyle(self): return self._drawstyle
def get_linestyle(self): return self._linestyle
def get_linewidth(self): return self._linewidth
def get_marker(self): return self._marker
def get_markeredgecolor(self):
if (is_string_like(self._markeredgecolor) and
self._markeredgecolor == 'auto'):
if self._marker in self.filled_markers:
return 'k'
else:
return self._color
else:
return self._markeredgecolor
return self._markeredgecolor
def get_markeredgewidth(self): return self._markeredgewidth
def get_markerfacecolor(self):
if (self._markerfacecolor is None or
(is_string_like(self._markerfacecolor) and
self._markerfacecolor.lower()=='none') ):
return self._markerfacecolor
elif (is_string_like(self._markerfacecolor) and
self._markerfacecolor.lower() == 'auto'):
return self._color
else:
return self._markerfacecolor
def get_markersize(self): return self._markersize
def get_data(self, orig=True):
"""
Return the xdata, ydata.
If *orig* is *True*, return the original data
"""
return self.get_xdata(orig=orig), self.get_ydata(orig=orig)
def get_xdata(self, orig=True):
"""
Return the xdata.
If *orig* is *True*, return the original data, else the
processed data.
"""
if orig:
return self._xorig
if self._invalid:
self.recache()
return self._x
def get_ydata(self, orig=True):
"""
Return the ydata.
If *orig* is *True*, return the original data, else the
processed data.
"""
if orig:
return self._yorig
if self._invalid:
self.recache()
return self._y
def get_path(self):
"""
Return the :class:`~matplotlib.path.Path` object associated
with this line.
"""
if self._invalid:
self.recache()
return self._path
def get_xydata(self):
"""
Return the *xy* data as a Nx2 numpy array.
"""
if self._invalid:
self.recache()
return self._xy
def set_antialiased(self, b):
"""
True if line should be drawin with antialiased rendering
ACCEPTS: [True | False]
"""
self._antialiased = b
def set_color(self, color):
"""
Set the color of the line
ACCEPTS: any matplotlib color
"""
self._color = color
def set_drawstyle(self, drawstyle):
"""
Set the drawstyle of the plot
'default' connects the points with lines. The steps variants
produce step-plots. 'steps' is equivalent to 'steps-pre' and
is maintained for backward-compatibility.
ACCEPTS: [ 'default' | 'steps' | 'steps-pre' | 'steps-mid' | 'steps-post' ]
"""
self._drawstyle = drawstyle
def set_linewidth(self, w):
"""
Set the line width in points
ACCEPTS: float value in points
"""
self._linewidth = w
def set_linestyle(self, linestyle):
"""
Set the linestyle of the line (also accepts drawstyles)
================ =================
linestyle description
================ =================
'-' solid
'--' dashed
'-.' dash_dot
':' dotted
'None' draw nothing
' ' draw nothing
'' draw nothing
================ =================
'steps' is equivalent to 'steps-pre' and is maintained for
backward-compatibility.
.. seealso::
:meth:`set_drawstyle`
ACCEPTS: [ '-' | '--' | '-.' | ':' | 'None' | ' ' | '' ] and
any drawstyle in combination with a linestyle, e.g. 'steps--'.
"""
# handle long drawstyle names before short ones !
for ds in flatten([k.keys() for k in (self._drawStyles_l,
self._drawStyles_s)], is_string_like):
if linestyle.startswith(ds):
self.set_drawstyle(ds)
if len(linestyle) > len(ds):
linestyle = linestyle[len(ds):]
else:
linestyle = '-'
if linestyle not in self._lineStyles:
if linestyle in ls_mapper:
linestyle = ls_mapper[linestyle]
else:
verbose.report('Unrecognized line style %s, %s' %
(linestyle, type(linestyle)))
if linestyle in [' ','']:
linestyle = 'None'
self._linestyle = linestyle
def set_marker(self, marker):
"""
Set the line marker
========== ==========================
marker description
========== ==========================
'.' point
',' pixel
'o' circle
'v' triangle_down
'^' triangle_up
'<' triangle_left
'>' triangle_right
'1' tri_down
'2' tri_up
'3' tri_left
'4' tri_right
's' square
'p' pentagon
'*' star
'h' hexagon1
'H' hexagon2
'+' plus
'x' x
'D' diamond
'd' thin_diamond
'|' vline
'_' hline
TICKLEFT tickleft
TICKRIGHT tickright
TICKUP tickup
TICKDOWN tickdown
CARETLEFT caretleft
CARETRIGHT caretright
CARETUP caretup
CARETDOWN caretdown
'None' nothing
' ' nothing
'' nothing
========== ==========================
ACCEPTS: [ '+' | '*' | ',' | '.' | '1' | '2' | '3' | '4'
| '<' | '>' | 'D' | 'H' | '^' | '_' | 'd'
| 'h' | 'o' | 'p' | 's' | 'v' | 'x' | '|'
| TICKUP | TICKDOWN | TICKLEFT | TICKRIGHT
| 'None' | ' ' | '' ]
"""
if marker not in self._markers:
verbose.report('Unrecognized marker style %s, %s' %
(marker, type(marker)))
if marker in [' ','']:
marker = 'None'
self._marker = marker
self._markerFunc = self._markers[marker]
def set_markeredgecolor(self, ec):
"""
Set the marker edge color
ACCEPTS: any matplotlib color
"""
if ec is None :
ec = 'auto'
self._markeredgecolor = ec
def set_markeredgewidth(self, ew):
"""
Set the marker edge width in points
ACCEPTS: float value in points
"""
if ew is None :
ew = rcParams['lines.markeredgewidth']
self._markeredgewidth = ew
def set_markerfacecolor(self, fc):
"""
Set the marker face color
ACCEPTS: any matplotlib color
"""
if fc is None :
fc = 'auto'
self._markerfacecolor = fc
def set_markersize(self, sz):
"""
Set the marker size in points
ACCEPTS: float
"""
self._markersize = sz
def set_xdata(self, x):
"""
Set the data np.array for x
ACCEPTS: 1D array
"""
x = np.asarray(x)
self.set_data(x, self._yorig)
def set_ydata(self, y):
"""
Set the data np.array for y
ACCEPTS: 1D array
"""
y = np.asarray(y)
self.set_data(self._xorig, y)
def set_dashes(self, seq):
"""
Set the dash sequence, sequence of dashes with on off ink in
points. If seq is empty or if seq = (None, None), the
linestyle will be set to solid.
ACCEPTS: sequence of on/off ink in points
"""
if seq == (None, None) or len(seq)==0:
self.set_linestyle('-')
else:
self.set_linestyle('--')
self._dashSeq = seq # TODO: offset ignored for now
def _draw_lines(self, renderer, gc, path, trans):
self._lineFunc(renderer, gc, path, trans)
def _draw_steps_pre(self, renderer, gc, path, trans):
vertices = self._xy
steps = ma.zeros((2*len(vertices)-1, 2), np.float_)
steps[0::2, 0], steps[1::2, 0] = vertices[:, 0], vertices[:-1, 0]
steps[0::2, 1], steps[1:-1:2, 1] = vertices[:, 1], vertices[1:, 1]
path = Path(steps)
path = path.transformed(self.get_transform())
self._lineFunc(renderer, gc, path, IdentityTransform())
def _draw_steps_post(self, renderer, gc, path, trans):
vertices = self._xy
steps = ma.zeros((2*len(vertices)-1, 2), np.float_)
steps[::2, 0], steps[1:-1:2, 0] = vertices[:, 0], vertices[1:, 0]
steps[0::2, 1], steps[1::2, 1] = vertices[:, 1], vertices[:-1, 1]
path = Path(steps)
path = path.transformed(self.get_transform())
self._lineFunc(renderer, gc, path, IdentityTransform())
def _draw_steps_mid(self, renderer, gc, path, trans):
vertices = self._xy
steps = ma.zeros((2*len(vertices), 2), np.float_)
steps[1:-1:2, 0] = 0.5 * (vertices[:-1, 0] + vertices[1:, 0])
steps[2::2, 0] = 0.5 * (vertices[:-1, 0] + vertices[1:, 0])
steps[0, 0] = vertices[0, 0]
steps[-1, 0] = vertices[-1, 0]
steps[0::2, 1], steps[1::2, 1] = vertices[:, 1], vertices[:, 1]
path = Path(steps)
path = path.transformed(self.get_transform())
self._lineFunc(renderer, gc, path, IdentityTransform())
def _draw_nothing(self, *args, **kwargs):
pass
def _draw_solid(self, renderer, gc, path, trans):
gc.set_linestyle('solid')
renderer.draw_path(gc, path, trans)
def _draw_dashed(self, renderer, gc, path, trans):
gc.set_linestyle('dashed')
if self._dashSeq is not None:
gc.set_dashes(0, self._dashSeq)
renderer.draw_path(gc, path, trans)
def _draw_dash_dot(self, renderer, gc, path, trans):
gc.set_linestyle('dashdot')
renderer.draw_path(gc, path, trans)
def _draw_dotted(self, renderer, gc, path, trans):
gc.set_linestyle('dotted')
renderer.draw_path(gc, path, trans)
def _draw_point(self, renderer, gc, path, path_trans):
w = renderer.points_to_pixels(self._markersize) * \
self._point_size_reduction * 0.5
gc.set_snap(renderer.points_to_pixels(self._markersize) > 3.0)
rgbFace = self._get_rgb_face()
transform = Affine2D().scale(w)
renderer.draw_markers(
gc, Path.unit_circle(), transform, path, path_trans,
rgbFace)
_draw_pixel_transform = Affine2D().translate(-0.5, -0.5)
def _draw_pixel(self, renderer, gc, path, path_trans):
rgbFace = self._get_rgb_face()
gc.set_snap(False)
renderer.draw_markers(gc, Path.unit_rectangle(),
self._draw_pixel_transform,
path, path_trans, rgbFace)
def _draw_circle(self, renderer, gc, path, path_trans):
w = renderer.points_to_pixels(self._markersize) * 0.5
gc.set_snap(renderer.points_to_pixels(self._markersize) > 3.0)
rgbFace = self._get_rgb_face()
transform = Affine2D().scale(w, w)
renderer.draw_markers(
gc, Path.unit_circle(), transform, path, path_trans,
rgbFace)
_triangle_path = Path([[0.0, 1.0], [-1.0, -1.0], [1.0, -1.0], [0.0, 1.0]])
def _draw_triangle_up(self, renderer, gc, path, path_trans):
gc.set_snap(renderer.points_to_pixels(self._markersize) >= 5.0)
offset = 0.5*renderer.points_to_pixels(self._markersize)
transform = Affine2D().scale(offset, offset)
rgbFace = self._get_rgb_face()
renderer.draw_markers(gc, self._triangle_path, transform,
path, path_trans, rgbFace)
def _draw_triangle_down(self, renderer, gc, path, path_trans):
gc.set_snap(renderer.points_to_pixels(self._markersize) >= 5.0)
offset = 0.5*renderer.points_to_pixels(self._markersize)
transform = Affine2D().scale(offset, -offset)
rgbFace = self._get_rgb_face()
renderer.draw_markers(gc, self._triangle_path, transform,
path, path_trans, rgbFace)
def _draw_triangle_left(self, renderer, gc, path, path_trans):
gc.set_snap(renderer.points_to_pixels(self._markersize) >= 5.0)
offset = 0.5*renderer.points_to_pixels(self._markersize)
transform = Affine2D().scale(offset, offset).rotate_deg(90)
rgbFace = self._get_rgb_face()
renderer.draw_markers(gc, self._triangle_path, transform,
path, path_trans, rgbFace)
def _draw_triangle_right(self, renderer, gc, path, path_trans):
gc.set_snap(renderer.points_to_pixels(self._markersize) >= 5.0)
offset = 0.5*renderer.points_to_pixels(self._markersize)
transform = Affine2D().scale(offset, offset).rotate_deg(-90)
rgbFace = self._get_rgb_face()
renderer.draw_markers(gc, self._triangle_path, transform,
path, path_trans, rgbFace)
def _draw_square(self, renderer, gc, path, path_trans):
gc.set_snap(renderer.points_to_pixels(self._markersize) >= 2.0)
side = renderer.points_to_pixels(self._markersize)
transform = Affine2D().translate(-0.5, -0.5).scale(side)
rgbFace = self._get_rgb_face()
renderer.draw_markers(gc, Path.unit_rectangle(), transform,
path, path_trans, rgbFace)
def _draw_diamond(self, renderer, gc, path, path_trans):
gc.set_snap(renderer.points_to_pixels(self._markersize) >= 5.0)
side = renderer.points_to_pixels(self._markersize)
transform = Affine2D().translate(-0.5, -0.5).rotate_deg(45).scale(side)
rgbFace = self._get_rgb_face()
renderer.draw_markers(gc, Path.unit_rectangle(), transform,
path, path_trans, rgbFace)
def _draw_thin_diamond(self, renderer, gc, path, path_trans):
gc.set_snap(renderer.points_to_pixels(self._markersize) >= 3.0)
offset = renderer.points_to_pixels(self._markersize)
transform = Affine2D().translate(-0.5, -0.5) \
.rotate_deg(45).scale(offset * 0.6, offset)
rgbFace = self._get_rgb_face()
renderer.draw_markers(gc, Path.unit_rectangle(), transform,
path, path_trans, rgbFace)
def _draw_pentagon(self, renderer, gc, path, path_trans):
gc.set_snap(renderer.points_to_pixels(self._markersize) >= 5.0)
offset = 0.5 * renderer.points_to_pixels(self._markersize)
transform = Affine2D().scale(offset)
rgbFace = self._get_rgb_face()
renderer.draw_markers(gc, Path.unit_regular_polygon(5), transform,
path, path_trans, rgbFace)
def _draw_star(self, renderer, gc, path, path_trans):
gc.set_snap(renderer.points_to_pixels(self._markersize) >= 5.0)
offset = 0.5 * renderer.points_to_pixels(self._markersize)
transform = Affine2D().scale(offset)
rgbFace = self._get_rgb_face()
_starpath = Path.unit_regular_star(5, innerCircle=0.381966)
renderer.draw_markers(gc, _starpath, transform,
path, path_trans, rgbFace)
def _draw_hexagon1(self, renderer, gc, path, path_trans):
gc.set_snap(renderer.points_to_pixels(self._markersize) >= 5.0)
offset = 0.5 * renderer.points_to_pixels(self._markersize)
transform = Affine2D().scale(offset)
rgbFace = self._get_rgb_face()
renderer.draw_markers(gc, Path.unit_regular_polygon(6), transform,
path, path_trans, rgbFace)
def _draw_hexagon2(self, renderer, gc, path, path_trans):
gc.set_snap(renderer.points_to_pixels(self._markersize) >= 5.0)
offset = 0.5 * renderer.points_to_pixels(self._markersize)
transform = Affine2D().scale(offset).rotate_deg(30)
rgbFace = self._get_rgb_face()
renderer.draw_markers(gc, Path.unit_regular_polygon(6), transform,
path, path_trans, rgbFace)
_line_marker_path = Path([[0.0, -1.0], [0.0, 1.0]])
def _draw_vline(self, renderer, gc, path, path_trans):
gc.set_snap(renderer.points_to_pixels(self._markersize) >= 1.0)
offset = 0.5*renderer.points_to_pixels(self._markersize)
transform = Affine2D().scale(offset)
renderer.draw_markers(gc, self._line_marker_path, transform,
path, path_trans)
def _draw_hline(self, renderer, gc, path, path_trans):
gc.set_snap(renderer.points_to_pixels(self._markersize) >= 1.0)
offset = 0.5*renderer.points_to_pixels(self._markersize)
transform = Affine2D().scale(offset).rotate_deg(90)
renderer.draw_markers(gc, self._line_marker_path, transform,
path, path_trans)
_tickhoriz_path = Path([[0.0, 0.0], [1.0, 0.0]])
def _draw_tickleft(self, renderer, gc, path, path_trans):
gc.set_snap(renderer.points_to_pixels(self._markersize) >= 1.0)
offset = renderer.points_to_pixels(self._markersize)
marker_transform = Affine2D().scale(-offset, 1.0)
renderer.draw_markers(gc, self._tickhoriz_path, marker_transform,
path, path_trans)
def _draw_tickright(self, renderer, gc, path, path_trans):
gc.set_snap(renderer.points_to_pixels(self._markersize) >= 1.0)
offset = renderer.points_to_pixels(self._markersize)
marker_transform = Affine2D().scale(offset, 1.0)
renderer.draw_markers(gc, self._tickhoriz_path, marker_transform,
path, path_trans)
_tickvert_path = Path([[-0.0, 0.0], [-0.0, 1.0]])
def _draw_tickup(self, renderer, gc, path, path_trans):
gc.set_snap(renderer.points_to_pixels(self._markersize) >= 1.0)
offset = renderer.points_to_pixels(self._markersize)
marker_transform = Affine2D().scale(1.0, offset)
renderer.draw_markers(gc, self._tickvert_path, marker_transform,
path, path_trans)
def _draw_tickdown(self, renderer, gc, path, path_trans):
gc.set_snap(renderer.points_to_pixels(self._markersize) >= 1.0)
offset = renderer.points_to_pixels(self._markersize)
marker_transform = Affine2D().scale(1.0, -offset)
renderer.draw_markers(gc, self._tickvert_path, marker_transform,
path, path_trans)
_plus_path = Path([[-1.0, 0.0], [1.0, 0.0],
[0.0, -1.0], [0.0, 1.0]],
[Path.MOVETO, Path.LINETO,
Path.MOVETO, Path.LINETO])
def _draw_plus(self, renderer, gc, path, path_trans):
gc.set_snap(renderer.points_to_pixels(self._markersize) >= 3.0)
offset = 0.5*renderer.points_to_pixels(self._markersize)
transform = Affine2D().scale(offset)
renderer.draw_markers(gc, self._plus_path, transform,
path, path_trans)
_tri_path = Path([[0.0, 0.0], [0.0, -1.0],
[0.0, 0.0], [0.8, 0.5],
[0.0, 0.0], [-0.8, 0.5]],
[Path.MOVETO, Path.LINETO,
Path.MOVETO, Path.LINETO,
Path.MOVETO, Path.LINETO])
def _draw_tri_down(self, renderer, gc, path, path_trans):
gc.set_snap(renderer.points_to_pixels(self._markersize) >= 5.0)
offset = 0.5*renderer.points_to_pixels(self._markersize)
transform = Affine2D().scale(offset)
renderer.draw_markers(gc, self._tri_path, transform,
path, path_trans)
def _draw_tri_up(self, renderer, gc, path, path_trans):
gc.set_snap(renderer.points_to_pixels(self._markersize) >= 5.0)
offset = 0.5*renderer.points_to_pixels(self._markersize)
transform = Affine2D().scale(offset).rotate_deg(180)
renderer.draw_markers(gc, self._tri_path, transform,
path, path_trans)
def _draw_tri_left(self, renderer, gc, path, path_trans):
gc.set_snap(renderer.points_to_pixels(self._markersize) >= 5.0)
offset = 0.5*renderer.points_to_pixels(self._markersize)
transform = Affine2D().scale(offset).rotate_deg(90)
renderer.draw_markers(gc, self._tri_path, transform,
path, path_trans)
def _draw_tri_right(self, renderer, gc, path, path_trans):
gc.set_snap(renderer.points_to_pixels(self._markersize) >= 5.0)
offset = 0.5*renderer.points_to_pixels(self._markersize)
transform = Affine2D().scale(offset).rotate_deg(270)
renderer.draw_markers(gc, self._tri_path, transform,
path, path_trans)
_caret_path = Path([[-1.0, 1.5], [0.0, 0.0], [1.0, 1.5]])
def _draw_caretdown(self, renderer, gc, path, path_trans):
gc.set_snap(renderer.points_to_pixels(self._markersize) >= 3.0)
offset = 0.5*renderer.points_to_pixels(self._markersize)
transform = Affine2D().scale(offset)
renderer.draw_markers(gc, self._caret_path, transform,
path, path_trans)
def _draw_caretup(self, renderer, gc, path, path_trans):
gc.set_snap(renderer.points_to_pixels(self._markersize) >= 3.0)
offset = 0.5*renderer.points_to_pixels(self._markersize)
transform = Affine2D().scale(offset).rotate_deg(180)
renderer.draw_markers(gc, self._caret_path, transform,
path, path_trans)
def _draw_caretleft(self, renderer, gc, path, path_trans):
gc.set_snap(renderer.points_to_pixels(self._markersize) >= 3.0)
offset = 0.5*renderer.points_to_pixels(self._markersize)
transform = Affine2D().scale(offset).rotate_deg(270)
renderer.draw_markers(gc, self._caret_path, transform,
path, path_trans)
def _draw_caretright(self, renderer, gc, path, path_trans):
gc.set_snap(renderer.points_to_pixels(self._markersize) >= 3.0)
offset = 0.5*renderer.points_to_pixels(self._markersize)
transform = Affine2D().scale(offset).rotate_deg(90)
renderer.draw_markers(gc, self._caret_path, transform,
path, path_trans)
_x_path = Path([[-1.0, -1.0], [1.0, 1.0],
[-1.0, 1.0], [1.0, -1.0]],
[Path.MOVETO, Path.LINETO,
Path.MOVETO, Path.LINETO])
def _draw_x(self, renderer, gc, path, path_trans):
gc.set_snap(renderer.points_to_pixels(self._markersize) >= 3.0)
offset = 0.5*renderer.points_to_pixels(self._markersize)
transform = Affine2D().scale(offset)
renderer.draw_markers(gc, self._x_path, transform,
path, path_trans)
def update_from(self, other):
'copy properties from other to self'
Artist.update_from(self, other)
self._linestyle = other._linestyle
self._linewidth = other._linewidth
self._color = other._color
self._markersize = other._markersize
self._markerfacecolor = other._markerfacecolor
self._markeredgecolor = other._markeredgecolor
self._markeredgewidth = other._markeredgewidth
self._dashSeq = other._dashSeq
self._dashcapstyle = other._dashcapstyle
self._dashjoinstyle = other._dashjoinstyle
self._solidcapstyle = other._solidcapstyle
self._solidjoinstyle = other._solidjoinstyle
self._linestyle = other._linestyle
self._marker = other._marker
self._drawstyle = other._drawstyle
def _get_rgb_face(self):
facecolor = self.get_markerfacecolor()
if is_string_like(facecolor) and facecolor.lower()=='none':
rgbFace = None
else:
rgbFace = colorConverter.to_rgb(facecolor)
return rgbFace
# some aliases....
def set_aa(self, val):
'alias for set_antialiased'
self.set_antialiased(val)
def set_c(self, val):
'alias for set_color'
self.set_color(val)
def set_ls(self, val):
'alias for set_linestyle'
self.set_linestyle(val)
def set_lw(self, val):
'alias for set_linewidth'
self.set_linewidth(val)
def set_mec(self, val):
'alias for set_markeredgecolor'
self.set_markeredgecolor(val)
def set_mew(self, val):
'alias for set_markeredgewidth'
self.set_markeredgewidth(val)
def set_mfc(self, val):
'alias for set_markerfacecolor'
self.set_markerfacecolor(val)
def set_ms(self, val):
'alias for set_markersize'
self.set_markersize(val)
def get_aa(self):
'alias for get_antialiased'
return self.get_antialiased()
def get_c(self):
'alias for get_color'
return self.get_color()
def get_ls(self):
'alias for get_linestyle'
return self.get_linestyle()
def get_lw(self):
'alias for get_linewidth'
return self.get_linewidth()
def get_mec(self):
'alias for get_markeredgecolor'
return self.get_markeredgecolor()
def get_mew(self):
'alias for get_markeredgewidth'
return self.get_markeredgewidth()
def get_mfc(self):
'alias for get_markerfacecolor'
return self.get_markerfacecolor()
def get_ms(self):
'alias for get_markersize'
return self.get_markersize()
def set_dash_joinstyle(self, s):
"""
Set the join style for dashed linestyles
ACCEPTS: ['miter' | 'round' | 'bevel']
"""
s = s.lower()
if s not in self.validJoin:
raise ValueError('set_dash_joinstyle passed "%s";\n' % (s,)
+ 'valid joinstyles are %s' % (self.validJoin,))
self._dashjoinstyle = s
def set_solid_joinstyle(self, s):
"""
Set the join style for solid linestyles
ACCEPTS: ['miter' | 'round' | 'bevel']
"""
s = s.lower()
if s not in self.validJoin:
raise ValueError('set_solid_joinstyle passed "%s";\n' % (s,)
+ 'valid joinstyles are %s' % (self.validJoin,))
self._solidjoinstyle = s
def get_dash_joinstyle(self):
"""
Get the join style for dashed linestyles
"""
return self._dashjoinstyle
def get_solid_joinstyle(self):
"""
Get the join style for solid linestyles
"""
return self._solidjoinstyle
def set_dash_capstyle(self, s):
"""
Set the cap style for dashed linestyles
ACCEPTS: ['butt' | 'round' | 'projecting']
"""
s = s.lower()
if s not in self.validCap:
raise ValueError('set_dash_capstyle passed "%s";\n' % (s,)
+ 'valid capstyles are %s' % (self.validCap,))
self._dashcapstyle = s
def set_solid_capstyle(self, s):
"""
Set the cap style for solid linestyles
ACCEPTS: ['butt' | 'round' | 'projecting']
"""
s = s.lower()
if s not in self.validCap:
raise ValueError('set_solid_capstyle passed "%s";\n' % (s,)
+ 'valid capstyles are %s' % (self.validCap,))
self._solidcapstyle = s
def get_dash_capstyle(self):
"""
Get the cap style for dashed linestyles
"""
return self._dashcapstyle
def get_solid_capstyle(self):
"""
Get the cap style for solid linestyles
"""
return self._solidcapstyle
def is_dashed(self):
'return True if line is dashstyle'
return self._linestyle in ('--', '-.', ':')
class VertexSelector:
"""
Manage the callbacks to maintain a list of selected vertices for
:class:`matplotlib.lines.Line2D`. Derived classes should override
:meth:`~matplotlib.lines.VertexSelector.process_selected` to do
something with the picks.
Here is an example which highlights the selected verts with red
circles::
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.lines as lines
class HighlightSelected(lines.VertexSelector):
def __init__(self, line, fmt='ro', **kwargs):
lines.VertexSelector.__init__(self, line)
self.markers, = self.axes.plot([], [], fmt, **kwargs)
def process_selected(self, ind, xs, ys):
self.markers.set_data(xs, ys)
self.canvas.draw()
fig = plt.figure()
ax = fig.add_subplot(111)
x, y = np.random.rand(2, 30)
line, = ax.plot(x, y, 'bs-', picker=5)
selector = HighlightSelected(line)
plt.show()
"""
def __init__(self, line):
"""
Initialize the class with a :class:`matplotlib.lines.Line2D`
instance. The line should already be added to some
:class:`matplotlib.axes.Axes` instance and should have the
picker property set.
"""
if not hasattr(line, 'axes'):
raise RuntimeError('You must first add the line to the Axes')
if line.get_picker() is None:
raise RuntimeError('You must first set the picker property of the line')
self.axes = line.axes
self.line = line
self.canvas = self.axes.figure.canvas
self.cid = self.canvas.mpl_connect('pick_event', self.onpick)
self.ind = set()
def process_selected(self, ind, xs, ys):
"""
Default "do nothing" implementation of the
:meth:`process_selected` method.
*ind* are the indices of the selected vertices. *xs* and *ys*
are the coordinates of the selected vertices.
"""
pass
def onpick(self, event):
'When the line is picked, update the set of selected indicies.'
if event.artist is not self.line: return
for i in event.ind:
if i in self.ind:
self.ind.remove(i)
else:
self.ind.add(i)
ind = list(self.ind)
ind.sort()
xdata, ydata = self.line.get_data()
self.process_selected(ind, xdata[ind], ydata[ind])
lineStyles = Line2D._lineStyles
lineMarkers = Line2D._markers
drawStyles = Line2D.drawStyles
artist.kwdocd['Line2D'] = artist.kwdoc(Line2D)
# You can not set the docstring of an instancemethod,
# but you can on the underlying function. Go figure.
Line2D.__init__.im_func.__doc__ = dedent(Line2D.__init__.__doc__) % artist.kwdocd
| agpl-3.0 |
seanli9jan/tensorflow | tensorflow/python/kernel_tests/constant_op_eager_test.py | 33 | 21448 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for ConstantOp."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.eager import context
from tensorflow.python.eager import test
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes as dtypes_lib
from tensorflow.python.framework import errors_impl
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.util import compat
# TODO(josh11b): add tests with lists/tuples, Shape.
# TODO(ashankar): Collapse with tests in constant_op_test.py and use something
# like the test_util.run_in_graph_and_eager_modes decorator to confirm
# equivalence between graph and eager execution.
class ConstantTest(test.TestCase):
def _testCpu(self, x):
np_ans = np.array(x)
with context.device("/device:CPU:0"):
tf_ans = ops.convert_to_tensor(x).numpy()
if np_ans.dtype in [np.float32, np.float64, np.complex64, np.complex128]:
self.assertAllClose(np_ans, tf_ans)
else:
self.assertAllEqual(np_ans, tf_ans)
def _testGpu(self, x):
device = test_util.gpu_device_name()
if device:
np_ans = np.array(x)
with context.device(device):
tf_ans = ops.convert_to_tensor(x).numpy()
if np_ans.dtype in [np.float32, np.float64, np.complex64, np.complex128]:
self.assertAllClose(np_ans, tf_ans)
else:
self.assertAllEqual(np_ans, tf_ans)
def _testAll(self, x):
self._testCpu(x)
self._testGpu(x)
def testFloat(self):
self._testAll(np.arange(-15, 15).reshape([2, 3, 5]).astype(np.float32))
self._testAll(
np.random.normal(size=30).reshape([2, 3, 5]).astype(np.float32))
self._testAll(np.empty((2, 0, 5)).astype(np.float32))
orig = [-1.0, 2.0, 0.0]
tf_ans = constant_op.constant(orig)
self.assertEqual(dtypes_lib.float32, tf_ans.dtype)
self.assertAllClose(np.array(orig), tf_ans.numpy())
# Mix floats and ints
orig = [-1.5, 2, 0]
tf_ans = constant_op.constant(orig)
self.assertEqual(dtypes_lib.float32, tf_ans.dtype)
self.assertAllClose(np.array(orig), tf_ans.numpy())
orig = [-5, 2.5, 0]
tf_ans = constant_op.constant(orig)
self.assertEqual(dtypes_lib.float32, tf_ans.dtype)
self.assertAllClose(np.array(orig), tf_ans.numpy())
# Mix floats and ints that don't fit in int32
orig = [1, 2**42, 0.5]
tf_ans = constant_op.constant(orig)
self.assertEqual(dtypes_lib.float32, tf_ans.dtype)
self.assertAllClose(np.array(orig), tf_ans.numpy())
def testDouble(self):
self._testAll(np.arange(-15, 15).reshape([2, 3, 5]).astype(np.float64))
self._testAll(
np.random.normal(size=30).reshape([2, 3, 5]).astype(np.float64))
self._testAll(np.empty((2, 0, 5)).astype(np.float64))
orig = [-5, 2.5, 0]
tf_ans = constant_op.constant(orig, dtypes_lib.float64)
self.assertEqual(dtypes_lib.float64, tf_ans.dtype)
self.assertAllClose(np.array(orig), tf_ans.numpy())
# This integer is not exactly representable as a double, gets rounded.
tf_ans = constant_op.constant(2**54 + 1, dtypes_lib.float64)
self.assertEqual(2**54, tf_ans.numpy())
# This integer is larger than all non-infinite numbers representable
# by a double, raises an exception.
with self.assertRaisesRegexp(ValueError, "out-of-range integer"):
constant_op.constant(10**310, dtypes_lib.float64)
def testInt32(self):
self._testAll(np.arange(-15, 15).reshape([2, 3, 5]).astype(np.int32))
self._testAll(
(100 * np.random.normal(size=30)).reshape([2, 3, 5]).astype(np.int32))
self._testAll(np.empty((2, 0, 5)).astype(np.int32))
self._testAll([-1, 2])
def testInt64(self):
self._testAll(np.arange(-15, 15).reshape([2, 3, 5]).astype(np.int64))
self._testAll(
(100 * np.random.normal(size=30)).reshape([2, 3, 5]).astype(np.int64))
self._testAll(np.empty((2, 0, 5)).astype(np.int64))
# Should detect out of range for int32 and use int64 instead.
orig = [2, 2**48, -2**48]
tf_ans = constant_op.constant(orig)
self.assertEqual(dtypes_lib.int64, tf_ans.dtype)
self.assertAllClose(np.array(orig), tf_ans.numpy())
# Out of range for an int64
with self.assertRaisesRegexp(ValueError, "out-of-range integer"):
constant_op.constant([2**72])
def testComplex64(self):
self._testAll(
np.complex(1, 2) *
np.arange(-15, 15).reshape([2, 3, 5]).astype(np.complex64))
self._testAll(
np.complex(1, 2) *
np.random.normal(size=30).reshape([2, 3, 5]).astype(np.complex64))
self._testAll(np.empty((2, 0, 5)).astype(np.complex64))
def testComplex128(self):
self._testAll(
np.complex(1, 2) * np.arange(-15, 15).reshape([2, 3, 5
]).astype(np.complex128))
self._testAll(
np.complex(1, 2) * np.random.normal(size=30).reshape(
[2, 3, 5]).astype(np.complex128))
self._testAll(np.empty((2, 0, 5)).astype(np.complex128))
def testString(self):
val = [compat.as_bytes(str(x)) for x in np.arange(-15, 15)]
self._testCpu(np.array(val).reshape([2, 3, 5]))
self._testCpu(np.empty((2, 0, 5)).astype(np.str_))
def testStringWithNulls(self):
val = ops.convert_to_tensor(b"\0\0\0\0").numpy()
self.assertEqual(len(val), 4)
self.assertEqual(val, b"\0\0\0\0")
val = ops.convert_to_tensor(b"xx\0xx").numpy()
self.assertEqual(len(val), 5)
self.assertAllEqual(val, b"xx\0xx")
nested = [[b"\0\0\0\0", b"xx\0xx"], [b"\0_\0_\0_\0", b"\0"]]
val = ops.convert_to_tensor(nested).numpy()
# NOTE(mrry): Do not use assertAllEqual, because it converts nested to a
# numpy array, which loses the null terminators.
self.assertEqual(val.tolist(), nested)
def testExplicitShapeNumPy(self):
c = constant_op.constant(
np.arange(-15, 15).reshape([2, 3, 5]).astype(np.float32),
shape=[2, 3, 5])
self.assertEqual(c.get_shape(), [2, 3, 5])
def testImplicitShapeNumPy(self):
c = constant_op.constant(
np.arange(-15, 15).reshape([2, 3, 5]).astype(np.float32))
self.assertEqual(c.get_shape(), [2, 3, 5])
def testExplicitShapeList(self):
c = constant_op.constant([1, 2, 3, 4, 5, 6, 7], shape=[7])
self.assertEqual(c.get_shape(), [7])
def testExplicitShapeFill(self):
c = constant_op.constant(12, shape=[7])
self.assertEqual(c.get_shape(), [7])
self.assertAllEqual([12, 12, 12, 12, 12, 12, 12], c.numpy())
def testExplicitShapeReshape(self):
c = constant_op.constant(
np.arange(-15, 15).reshape([2, 3, 5]).astype(np.float32),
shape=[5, 2, 3])
self.assertEqual(c.get_shape(), [5, 2, 3])
def testImplicitShapeList(self):
c = constant_op.constant([1, 2, 3, 4, 5, 6, 7])
self.assertEqual(c.get_shape(), [7])
def testExplicitShapeNumber(self):
c = constant_op.constant(1, shape=[1])
self.assertEqual(c.get_shape(), [1])
def testImplicitShapeNumber(self):
c = constant_op.constant(1)
self.assertEqual(c.get_shape(), [])
def testShapeTooBig(self):
with self.assertRaises(TypeError):
constant_op.constant([1, 2, 3, 4, 5, 6, 7], shape=[10])
def testShapeTooSmall(self):
with self.assertRaises(TypeError):
constant_op.constant([1, 2, 3, 4, 5, 6, 7], shape=[5])
def testShapeWrong(self):
with self.assertRaisesRegexp(TypeError, None):
constant_op.constant([1, 2, 3, 4, 5, 6, 7], shape=[5])
def testShape(self):
self._testAll(constant_op.constant([1]).get_shape())
def testDimension(self):
x = constant_op.constant([1]).shape[0]
self._testAll(x)
def testDimensionList(self):
x = [constant_op.constant([1]).shape[0]]
self._testAll(x)
# Mixing with regular integers is fine too
self._testAll([1] + x)
self._testAll(x + [1])
def testDimensionTuple(self):
x = constant_op.constant([1]).shape[0]
self._testAll((x,))
self._testAll((1, x))
self._testAll((x, 1))
def testInvalidLength(self):
class BadList(list):
def __init__(self):
super(BadList, self).__init__([1, 2, 3]) # pylint: disable=invalid-length-returned
def __len__(self):
return -1
with self.assertRaisesRegexp(ValueError, "should return >= 0"):
constant_op.constant([BadList()])
with self.assertRaisesRegexp(ValueError, "mixed types"):
constant_op.constant([1, 2, BadList()])
with self.assertRaisesRegexp(ValueError, "should return >= 0"):
constant_op.constant(BadList())
with self.assertRaisesRegexp(ValueError, "should return >= 0"):
constant_op.constant([[BadList(), 2], 3])
with self.assertRaisesRegexp(ValueError, "should return >= 0"):
constant_op.constant([BadList(), [1, 2, 3]])
with self.assertRaisesRegexp(ValueError, "should return >= 0"):
constant_op.constant([BadList(), []])
# TODO(allenl, josh11b): These cases should return exceptions rather than
# working (currently shape checking only checks the first element of each
# sequence recursively). Maybe the first one is fine, but the second one
# silently truncating is rather bad.
# with self.assertRaisesRegexp(ValueError, "should return >= 0"):
# constant_op.constant([[3, 2, 1], BadList()])
# with self.assertRaisesRegexp(ValueError, "should return >= 0"):
# constant_op.constant([[], BadList()])
def testSparseValuesRaiseErrors(self):
with self.assertRaisesRegexp(ValueError, "non-rectangular Python sequence"):
constant_op.constant([[1, 2], [3]], dtype=dtypes_lib.int32)
with self.assertRaisesRegexp(ValueError, None):
constant_op.constant([[1, 2], [3]])
with self.assertRaisesRegexp(ValueError, None):
constant_op.constant([[1, 2], [3], [4, 5]])
# TODO(ashankar): This test fails with graph construction since
# tensor_util.make_tensor_proto (invoked from constant_op.constant)
# does not handle iterables (it relies on numpy conversion).
# For consistency, should graph construction handle Python objects
# that implement the sequence protocol (but not numpy conversion),
# or should eager execution fail on such sequences?
def testCustomSequence(self):
# This is inspired by how many objects in pandas are implemented:
# - They implement the Python sequence protocol
# - But may raise a KeyError on __getitem__(self, 0)
# See https://github.com/tensorflow/tensorflow/issues/20347
class MySeq(object):
def __getitem__(self, key):
if key != 1 and key != 3:
raise KeyError(key)
return key
def __len__(self):
return 2
def __iter__(self):
l = list([1, 3])
return l.__iter__()
self.assertAllEqual([1, 3], self.evaluate(constant_op.constant(MySeq())))
class AsTensorTest(test.TestCase):
def testAsTensorForTensorInput(self):
t = constant_op.constant(10.0)
x = ops.convert_to_tensor(t)
self.assertIs(t, x)
def testAsTensorForNonTensorInput(self):
x = ops.convert_to_tensor(10.0)
self.assertTrue(isinstance(x, ops.EagerTensor))
class ZerosTest(test.TestCase):
def _Zeros(self, shape):
ret = array_ops.zeros(shape)
self.assertEqual(shape, ret.get_shape())
return ret.numpy()
def testConst(self):
self.assertTrue(
np.array_equal(self._Zeros([2, 3]), np.array([[0] * 3] * 2)))
def testScalar(self):
self.assertEqual(0, self._Zeros([]))
self.assertEqual(0, self._Zeros(()))
scalar = array_ops.zeros(constant_op.constant([], dtype=dtypes_lib.int32))
self.assertEqual(0, scalar.numpy())
def testDynamicSizes(self):
np_ans = np.array([[0] * 3] * 2)
# Creates a tensor of 2 x 3.
d = array_ops.fill([2, 3], 12., name="fill")
# Constructs a tensor of zeros of the same dimensions as "d".
z = array_ops.zeros(array_ops.shape(d))
out = z.numpy()
self.assertAllEqual(np_ans, out)
self.assertShapeEqual(np_ans, d)
self.assertShapeEqual(np_ans, z)
def testDtype(self):
d = array_ops.fill([2, 3], 12., name="fill")
self.assertEqual(d.get_shape(), [2, 3])
# Test default type for both constant size and dynamic size
z = array_ops.zeros([2, 3])
self.assertEqual(z.dtype, dtypes_lib.float32)
self.assertEqual([2, 3], z.get_shape())
self.assertAllEqual(z.numpy(), np.zeros([2, 3]))
z = array_ops.zeros(array_ops.shape(d))
self.assertEqual(z.dtype, dtypes_lib.float32)
self.assertEqual([2, 3], z.get_shape())
self.assertAllEqual(z.numpy(), np.zeros([2, 3]))
# Test explicit type control
for dtype in [
dtypes_lib.float32, dtypes_lib.float64, dtypes_lib.int32,
dtypes_lib.uint8, dtypes_lib.int16, dtypes_lib.int8,
dtypes_lib.complex64, dtypes_lib.complex128, dtypes_lib.int64,
dtypes_lib.bool,
# TODO(josh11b): Support string type here.
# dtypes_lib.string
]:
z = array_ops.zeros([2, 3], dtype=dtype)
self.assertEqual(z.dtype, dtype)
self.assertEqual([2, 3], z.get_shape())
z_value = z.numpy()
self.assertFalse(np.any(z_value))
self.assertEqual((2, 3), z_value.shape)
z = array_ops.zeros(array_ops.shape(d), dtype=dtype)
self.assertEqual(z.dtype, dtype)
self.assertEqual([2, 3], z.get_shape())
z_value = z.numpy()
self.assertFalse(np.any(z_value))
self.assertEqual((2, 3), z_value.shape)
class ZerosLikeTest(test.TestCase):
def _compareZeros(self, dtype, use_gpu):
# Creates a tensor of non-zero values with shape 2 x 3.
# NOTE(kearnes): The default numpy dtype associated with tf.string is
# np.object (and can't be changed without breaking a lot things), which
# causes a TypeError in constant_op.constant below. Here we catch the
# special case of tf.string and set the numpy dtype appropriately.
if dtype == dtypes_lib.string:
numpy_dtype = np.string_
else:
numpy_dtype = dtype.as_numpy_dtype
d = constant_op.constant(np.ones((2, 3), dtype=numpy_dtype), dtype=dtype)
# Constructs a tensor of zeros of the same dimensions and type as "d".
z_var = array_ops.zeros_like(d)
# Test that the type is correct
self.assertEqual(z_var.dtype, dtype)
# Test that the shape is correct
self.assertEqual([2, 3], z_var.get_shape())
# Test that the value is correct
z_value = z_var.numpy()
self.assertFalse(np.any(z_value))
self.assertEqual((2, 3), z_value.shape)
def testZerosLikeCPU(self):
for dtype in [
dtypes_lib.float32, dtypes_lib.float64, dtypes_lib.int32,
dtypes_lib.uint8, dtypes_lib.int16, dtypes_lib.int8,
dtypes_lib.complex64, dtypes_lib.complex128, dtypes_lib.int64,
# TODO(josh11b): Support string type here.
# dtypes_lib.string
]:
self._compareZeros(dtype, use_gpu=False)
def testZerosLikeGPU(self):
for dtype in [
dtypes_lib.float32, dtypes_lib.float64, dtypes_lib.int32,
dtypes_lib.bool, dtypes_lib.int64,
# TODO(josh11b): Support string type here.
# dtypes_lib.string
]:
self._compareZeros(dtype, use_gpu=True)
def testZerosLikeDtype(self):
# Make sure zeros_like works even for dtypes that cannot be cast between
shape = (3, 5)
dtypes = np.float32, np.complex64
for in_type in dtypes:
x = np.arange(15).astype(in_type).reshape(*shape)
for out_type in dtypes:
y = array_ops.zeros_like(x, dtype=out_type).numpy()
self.assertEqual(y.dtype, out_type)
self.assertEqual(y.shape, shape)
self.assertAllEqual(y, np.zeros(shape, dtype=out_type))
class OnesTest(test.TestCase):
def _Ones(self, shape):
ret = array_ops.ones(shape)
self.assertEqual(shape, ret.get_shape())
return ret.numpy()
def testConst(self):
self.assertTrue(np.array_equal(self._Ones([2, 3]), np.array([[1] * 3] * 2)))
def testScalar(self):
self.assertEqual(1, self._Ones([]))
self.assertEqual(1, self._Ones(()))
scalar = array_ops.ones(constant_op.constant([], dtype=dtypes_lib.int32))
self.assertEqual(1, scalar.numpy())
def testDynamicSizes(self):
np_ans = np.array([[1] * 3] * 2)
# Creates a tensor of 2 x 3.
d = array_ops.fill([2, 3], 12., name="fill")
# Constructs a tensor of ones of the same dimensions as "d".
z = array_ops.ones(array_ops.shape(d))
out = z.numpy()
self.assertAllEqual(np_ans, out)
self.assertShapeEqual(np_ans, d)
self.assertShapeEqual(np_ans, z)
def testDtype(self):
d = array_ops.fill([2, 3], 12., name="fill")
self.assertEqual(d.get_shape(), [2, 3])
# Test default type for both constant size and dynamic size
z = array_ops.ones([2, 3])
self.assertEqual(z.dtype, dtypes_lib.float32)
self.assertEqual([2, 3], z.get_shape())
self.assertAllEqual(z.numpy(), np.ones([2, 3]))
z = array_ops.ones(array_ops.shape(d))
self.assertEqual(z.dtype, dtypes_lib.float32)
self.assertEqual([2, 3], z.get_shape())
self.assertAllEqual(z.numpy(), np.ones([2, 3]))
# Test explicit type control
for dtype in (dtypes_lib.float32, dtypes_lib.float64, dtypes_lib.int32,
dtypes_lib.uint8, dtypes_lib.int16, dtypes_lib.int8,
dtypes_lib.complex64, dtypes_lib.complex128, dtypes_lib.int64,
dtypes_lib.bool):
z = array_ops.ones([2, 3], dtype=dtype)
self.assertEqual(z.dtype, dtype)
self.assertEqual([2, 3], z.get_shape())
self.assertAllEqual(z.numpy(), np.ones([2, 3]))
z = array_ops.ones(array_ops.shape(d), dtype=dtype)
self.assertEqual(z.dtype, dtype)
self.assertEqual([2, 3], z.get_shape())
self.assertAllEqual(z.numpy(), np.ones([2, 3]))
class OnesLikeTest(test.TestCase):
def testOnesLike(self):
for dtype in [
dtypes_lib.float32, dtypes_lib.float64, dtypes_lib.int32,
dtypes_lib.uint8, dtypes_lib.int16, dtypes_lib.int8,
dtypes_lib.complex64, dtypes_lib.complex128, dtypes_lib.int64
]:
numpy_dtype = dtype.as_numpy_dtype
# Creates a tensor of non-zero values with shape 2 x 3.
d = constant_op.constant(np.ones((2, 3), dtype=numpy_dtype), dtype=dtype)
# Constructs a tensor of zeros of the same dimensions and type as "d".
z_var = array_ops.ones_like(d)
# Test that the type is correct
self.assertEqual(z_var.dtype, dtype)
z_value = z_var.numpy()
# Test that the value is correct
self.assertTrue(np.array_equal(z_value, np.array([[1] * 3] * 2)))
self.assertEqual([2, 3], z_var.get_shape())
class FillTest(test.TestCase):
def _compare(self, dims, val, np_ans, use_gpu):
ctx = context.context()
device = "GPU:0" if (use_gpu and ctx.num_gpus()) else "CPU:0"
with ops.device(device):
tf_ans = array_ops.fill(dims, val, name="fill")
out = tf_ans.numpy()
self.assertAllClose(np_ans, out)
def _compareAll(self, dims, val, np_ans):
self._compare(dims, val, np_ans, False)
self._compare(dims, val, np_ans, True)
def testFillFloat(self):
np_ans = np.array([[3.1415] * 3] * 2).astype(np.float32)
self._compareAll([2, 3], np_ans[0][0], np_ans)
def testFillDouble(self):
np_ans = np.array([[3.1415] * 3] * 2).astype(np.float64)
self._compareAll([2, 3], np_ans[0][0], np_ans)
def testFillInt32(self):
np_ans = np.array([[42] * 3] * 2).astype(np.int32)
self._compareAll([2, 3], np_ans[0][0], np_ans)
def testFillInt64(self):
np_ans = np.array([[-42] * 3] * 2).astype(np.int64)
self._compareAll([2, 3], np_ans[0][0], np_ans)
def testFillComplex64(self):
np_ans = np.array([[0.15] * 3] * 2).astype(np.complex64)
self._compare([2, 3], np_ans[0][0], np_ans, use_gpu=False)
def testFillComplex128(self):
np_ans = np.array([[0.15] * 3] * 2).astype(np.complex128)
self._compare([2, 3], np_ans[0][0], np_ans, use_gpu=False)
def testFillString(self):
np_ans = np.array([[b"yolo"] * 3] * 2)
tf_ans = array_ops.fill([2, 3], np_ans[0][0], name="fill").numpy()
self.assertAllEqual(np_ans, tf_ans)
def testFillNegative(self):
for shape in (-1,), (2, -1), (-1, 2), (-2), (-3):
with self.assertRaises(errors_impl.InvalidArgumentError):
array_ops.fill(shape, 7)
def testShapeFunctionEdgeCases(self):
# Non-vector dimensions.
with self.assertRaises(errors_impl.InvalidArgumentError):
array_ops.fill([[0, 1], [2, 3]], 1.0)
# Non-scalar value.
with self.assertRaises(errors_impl.InvalidArgumentError):
array_ops.fill([3, 2], [1.0, 2.0])
if __name__ == "__main__":
test.main()
| apache-2.0 |
erykoff/redmapper | tests/test_centeringcal.py | 1 | 2880 | import matplotlib
matplotlib.use('Agg')
import unittest
import os
import shutil
import numpy.testing as testing
import numpy as np
import fitsio
import tempfile
from numpy import random
from redmapper.configuration import Configuration
from redmapper.calibration import WcenCalibrator
class CenteringCalibratorTestCase(unittest.TestCase):
"""
Test the centering model calibrator code (WcenCalibrator).
"""
def test_centeringcal(self):
"""
Test WcenCalibrator.
"""
file_path = 'data_for_tests'
conf_filename = 'testconfig_wcen.yaml'
config = Configuration(os.path.join(file_path, conf_filename))
randcatfile = os.path.join(file_path, 'test_dr8_randcat.fit')
randsatcatfile = os.path.join(file_path, 'test_dr8_randsatcat.fit')
self.test_dir = tempfile.mkdtemp(dir='./', prefix='TestRedmapper-')
config.outpath = self.test_dir
config.wcenfile = os.path.join(config.outpath, '%s_testwcen.fit' % (config.d.outbase))
# Get repeatability here
random.seed(seed=1000)
wc = WcenCalibrator(config, 1, randcatfile=randcatfile, randsatcatfile=randsatcatfile)
wc.run(testing=True)
# check outputs...
# First, the schechter monte carlo.
# These are very approximate, but checking for any unexpected changes
testing.assert_almost_equal(wc.phi1_mmstar_m, -1.13451727, 5)
testing.assert_almost_equal(wc.phi1_mmstar_slope, -0.37794289, 5)
testing.assert_almost_equal(wc.phi1_msig_m, 0.49644922, 5)
testing.assert_almost_equal(wc.phi1_msig_slope, -0.13314551, 5)
# Make sure the output file is there...
self.assertTrue(os.path.isfile(config.wcenfile))
# Test the reading from the config.
vals = config._wcen_vals()
config._set_vars_from_dict(vals)
testing.assert_almost_equal(config.wcen_Delta0, -1.41107068614, 4)
testing.assert_almost_equal(config.wcen_Delta1, -0.324385870342, 4)
testing.assert_almost_equal(config.wcen_sigma_m, 0.3654790486, 4)
testing.assert_almost_equal(config.wcen_pivot, 30.0, 4)
testing.assert_almost_equal(config.lnw_fg_mean, -0.269133136174, 4)
testing.assert_almost_equal(config.lnw_fg_sigma, 0.295286695144, 4)
testing.assert_almost_equal(config.lnw_sat_mean, 0.0273435015217, 4)
testing.assert_almost_equal(config.lnw_sat_sigma, 0.273395687497, 4)
testing.assert_almost_equal(config.lnw_cen_mean, 0.219192726372, 4)
testing.assert_almost_equal(config.lnw_cen_sigma, 0.136759762564, 4)
def setUp(self):
self.test_dir = None
def tearDown(self):
if self.test_dir is not None:
if os.path.exists(self.test_dir):
shutil.rmtree(self.test_dir, True)
if __name__=='__main__':
unittest.main()
| apache-2.0 |
ajrichards/notebook | deep-learning/draw_simple_nn.py | 2 | 3075 | #!/usr/bin/env python
import matplotlib.pyplot as plt
import matplotlib.patches as mpatches
def draw_neural_net(ax, left, right, bottom, top, layer_sizes):
'''
Draw a neural network cartoon using matplotilb.
:usage:
>>> fig = plt.figure(figsize=(12, 12))
>>> draw_neural_net(fig.gca(), .1, .9, .1, .9, [4, 7, 2])
:parameters:
- ax : matplotlib.axes.AxesSubplot
The axes on which to plot the cartoon (get e.g. by plt.gca())
- left : float
The center of the leftmost node(s) will be placed here
- right : float
The center of the rightmost node(s) will be placed here
- bottom : float
The center of the bottommost node(s) will be placed here
- top : float
The center of the topmost node(s) will be placed here
- layer_sizes : list of int
List of layer sizes, including input and output dimensionality
'''
n_layers = len(layer_sizes)
v_spacing = (top - bottom)/float(max(layer_sizes))
h_spacing = (right - left)/float(len(layer_sizes) - 1)
## create the nodes with patches
patch_keys = {}
for n, layer_size in enumerate(layer_sizes):
layer_top = v_spacing*(layer_size - 1)/2. + (top + bottom)/2.
for m in range(layer_size):
x = n*h_spacing + left
y = layer_top - m*v_spacing
if n == 0:
color = 'darkorange'
label = 'input'
elif n == len(layer_sizes)-1:
color = 'dodgerblue'
label = 'output'
else:
color = 'mediumpurple'
label = 'hidden'
p = mpatches.Circle((x, y), v_spacing/3.5, ec='k',fc=color)
patch_keys[label] = p
ax.add_patch(p)
## create the edges with annotations
for n, (layer_size_a, layer_size_b) in enumerate(zip(layer_sizes[:-1], layer_sizes[1:])):
layer_top_a = v_spacing*(layer_size_a - 1)/2. + (top + bottom)/2.
layer_top_b = v_spacing*(layer_size_b - 1)/2. + (top + bottom)/2.
for m in range(layer_size_a):
for o in range(layer_size_b):
a = n*h_spacing + left
b = (n + 1)*h_spacing + left
c = layer_top_a - m*v_spacing
d = layer_top_b - o*v_spacing
ax.annotate('', xy=(b,d), xycoords='data',
xytext=(a,c), textcoords='data',
arrowprops=dict(facecolor='black',
arrowstyle='->',
shrinkA=18,
shrinkB=18,
)
)
ax.legend(patch_keys.values(), patch_keys.keys())
if __name__ == "__main__":
fig = plt.figure(figsize=(10, 10))
ax = fig.gca()
ax.axis('off')
draw_neural_net(ax, .1, .9, .1, .9, [4, 7, 5, 2])
fig.savefig('nn.png')
plt.show()
| bsd-3-clause |
callmewillig/NetViz | Interface/interface.py | 1 | 6173 | """"
interface.py
This program will create a user interface with the capablilities to choose an
address and gain various information based on that address.
libraries(sudo aot-get install...)
python3-pyqt5(PyQt5)
matplotlib
Authors
Tommy Slota
Mathew Willig
Nicholas Miller
"""
#-----imports-----
import sys
from PyQt5.QtWidgets import *
from PyQt5 import *
import optparse
import os
from threading import Thread
#-----import graph/table programs----
from tablesANDgraphs import *
adr = []
"""
-----IPWindow----
=======================================================================
- Makes a second window with IP as title
=======================================================================
"""
class IPWindow(QWidget):
def __init__(self, address):
super().__init__()
self.IPWIN(address)
#=====Creates the IPWindow=====
#address-Button with address clicked
def IPWIN(self, address):
#-----Set up window-----
self.setFixedSize(360,110)
self.setGeometry(265, 1, 0, 0)
self.setWindowTitle(address)
global ip
ip = address
#-----Labeling-----
graphs = QLabel("Graphs",self)
graphs.move(55,10)
tables = QLabel("Tables",self)
tables.move(205,10)
#-----Create Button----
tuB = QPushButton("TCP vs UDP", self)
tuB.setFixedSize(140,30)
tuB.move(10,30)
tuB.clicked[bool].connect(self.action)
DestB = QPushButton("Destinations", self)
DestB.setFixedSize(140,30)
DestB.move(160,30)
DestB.clicked[bool].connect(self.action)
ProtoB = QPushButton("Top 10 Ports Used",self)
ProtoB.setFixedSize(140,30)
ProtoB.move(10,70)
ProtoB.clicked[bool].connect(self.action)
self.show()
"""
-----Action-----
==========================================================================
- Pulls up specific table/graph depending on what button was clicked
==========================================================================
"""
def action(self):
global ip
source = self.sender()
if source.text() == "TCP vs UDP":
tcpVudp(ip)
elif source.text() == "Top 10 Ports Used":
barGraph(ip)
elif source.text() == "Destinations":
setip(ip)
self.destT = DestTable()
self.destT.show()
else:
print(" ")
"""
-----MainWindow-----
===================================================================================
- Makes a small taskbar window
- Adds IP addresses from parser as clickable buttons
- Will open an "IP Window" if button is clicked(One at a time, but will be changed)
===================================================================================
"""
class MainWindow(QWidget):
def __init__(self):
super().__init__()
self.initUI()
#====Creates Main Taskbar====
def initUI(self):
#-----Initialize the taskbar layout-----
box = QVBoxLayout(self)
self.setLayout(box)
scroll = QScrollArea(self)
box.addWidget(scroll)
scrollContent = QWidget(scroll)
scrollLayout = QVBoxLayout(scrollContent)
scrollContent.setLayout(scrollLayout)
#-----Make "adr" static array of addresses-----
sys.path.insert(0, '../Parser/')
from getIPs import getIPsStart
global adr
adr = getIPsStart()
self.setFixedSize(200, 300)
#-----Makes Buttons and makes button a scrollable object-----
buttons = {}
size = 40
for count in range(0,len(adr)):
buttons[count] = QPushButton(adr[count], self)
buttons[count].setFixedSize(140,30)
buttons[count].clicked[bool].connect(self.action)
scrollLayout.addWidget(buttons[count])
scroll.setWidget(scrollContent)
#-----Sets the title window-----
self.setGeometry(1, 1,0,0)
self.setWindowTitle('IP Addresses')
self.show()
"""
-----Action-----
=====================================================================
- When button is clicked, it opens a windows based on the button
- Uses the IP address array
=====================================================================
"""
def action(self):
source = self.sender()
global adr
for i in range(0,len(adr)):
if source.text() == adr[i]:
self.IPwin = IPWindow(adr[i])
self.IPwin.show()
"""
-----MAIN----
======================================================================
- MainWindow is executed and is also terminated when closed
======================================================================
"""
if __name__ == "__main__":
parser = optparse.OptionParser(usage='''
-s <start_time> time window start for filtering packets
yyyy/MM/dd.hh:mm:ss
-e <end_time> time window end for filtering packets
yyyy/MM/dd.hh:mm:ss
''')
parser.add_option('-s', '--start_time',
dest='start_time', action='store', type='string')
parser.add_option('-e', '--end_time',
dest='end_time', action='store', type='string')
(options, args) = parser.parse_args()
if not options.start_time:
if options.end_time:
sys.exit("End time given but no start time was given.\n")
else:
os.system("nfdump -R ../nfcapd/ > ../Temp/SipDip.txt")
else:
if not options.end_time:
thisstring = "nfdump -t " + options.start_time + " -R ../nfcapd/ > ../Temp/SipDip.txt"
print(thisstring)
os.system(thisstring)
else:
thisstring = "nfdump -t " + options.start_time + "-" + options.end_time + " -R ../nfcapd/ > ../Temp/SipDip.txt"
print(thisstring)
os.system(thisstring)
app = QApplication(sys.argv)
window = MainWindow()
sys.exit(app.exec_())
| mit |
victorbergelin/scikit-learn | examples/decomposition/plot_sparse_coding.py | 247 | 3846 | """
===========================================
Sparse coding with a precomputed dictionary
===========================================
Transform a signal as a sparse combination of Ricker wavelets. This example
visually compares different sparse coding methods using the
:class:`sklearn.decomposition.SparseCoder` estimator. The Ricker (also known
as Mexican hat or the second derivative of a Gaussian) is not a particularly
good kernel to represent piecewise constant signals like this one. It can
therefore be seen how much adding different widths of atoms matters and it
therefore motivates learning the dictionary to best fit your type of signals.
The richer dictionary on the right is not larger in size, heavier subsampling
is performed in order to stay on the same order of magnitude.
"""
print(__doc__)
import numpy as np
import matplotlib.pylab as pl
from sklearn.decomposition import SparseCoder
def ricker_function(resolution, center, width):
"""Discrete sub-sampled Ricker (Mexican hat) wavelet"""
x = np.linspace(0, resolution - 1, resolution)
x = ((2 / ((np.sqrt(3 * width) * np.pi ** 1 / 4)))
* (1 - ((x - center) ** 2 / width ** 2))
* np.exp((-(x - center) ** 2) / (2 * width ** 2)))
return x
def ricker_matrix(width, resolution, n_components):
"""Dictionary of Ricker (Mexican hat) wavelets"""
centers = np.linspace(0, resolution - 1, n_components)
D = np.empty((n_components, resolution))
for i, center in enumerate(centers):
D[i] = ricker_function(resolution, center, width)
D /= np.sqrt(np.sum(D ** 2, axis=1))[:, np.newaxis]
return D
resolution = 1024
subsampling = 3 # subsampling factor
width = 100
n_components = resolution / subsampling
# Compute a wavelet dictionary
D_fixed = ricker_matrix(width=width, resolution=resolution,
n_components=n_components)
D_multi = np.r_[tuple(ricker_matrix(width=w, resolution=resolution,
n_components=np.floor(n_components / 5))
for w in (10, 50, 100, 500, 1000))]
# Generate a signal
y = np.linspace(0, resolution - 1, resolution)
first_quarter = y < resolution / 4
y[first_quarter] = 3.
y[np.logical_not(first_quarter)] = -1.
# List the different sparse coding methods in the following format:
# (title, transform_algorithm, transform_alpha, transform_n_nozero_coefs)
estimators = [('OMP', 'omp', None, 15), ('Lasso', 'lasso_cd', 2, None), ]
pl.figure(figsize=(13, 6))
for subplot, (D, title) in enumerate(zip((D_fixed, D_multi),
('fixed width', 'multiple widths'))):
pl.subplot(1, 2, subplot + 1)
pl.title('Sparse coding against %s dictionary' % title)
pl.plot(y, ls='dotted', label='Original signal')
# Do a wavelet approximation
for title, algo, alpha, n_nonzero in estimators:
coder = SparseCoder(dictionary=D, transform_n_nonzero_coefs=n_nonzero,
transform_alpha=alpha, transform_algorithm=algo)
x = coder.transform(y)
density = len(np.flatnonzero(x))
x = np.ravel(np.dot(x, D))
squared_error = np.sum((y - x) ** 2)
pl.plot(x, label='%s: %s nonzero coefs,\n%.2f error'
% (title, density, squared_error))
# Soft thresholding debiasing
coder = SparseCoder(dictionary=D, transform_algorithm='threshold',
transform_alpha=20)
x = coder.transform(y)
_, idx = np.where(x != 0)
x[0, idx], _, _, _ = np.linalg.lstsq(D[idx, :].T, y)
x = np.ravel(np.dot(x, D))
squared_error = np.sum((y - x) ** 2)
pl.plot(x,
label='Thresholding w/ debiasing:\n%d nonzero coefs, %.2f error' %
(len(idx), squared_error))
pl.axis('tight')
pl.legend()
pl.subplots_adjust(.04, .07, .97, .90, .09, .2)
pl.show()
| bsd-3-clause |
xzh86/scikit-learn | sklearn/metrics/__init__.py | 214 | 3440 | """
The :mod:`sklearn.metrics` module includes score functions, performance metrics
and pairwise metrics and distance computations.
"""
from .ranking import auc
from .ranking import average_precision_score
from .ranking import coverage_error
from .ranking import label_ranking_average_precision_score
from .ranking import label_ranking_loss
from .ranking import precision_recall_curve
from .ranking import roc_auc_score
from .ranking import roc_curve
from .classification import accuracy_score
from .classification import classification_report
from .classification import cohen_kappa_score
from .classification import confusion_matrix
from .classification import f1_score
from .classification import fbeta_score
from .classification import hamming_loss
from .classification import hinge_loss
from .classification import jaccard_similarity_score
from .classification import log_loss
from .classification import matthews_corrcoef
from .classification import precision_recall_fscore_support
from .classification import precision_score
from .classification import recall_score
from .classification import zero_one_loss
from .classification import brier_score_loss
from . import cluster
from .cluster import adjusted_mutual_info_score
from .cluster import adjusted_rand_score
from .cluster import completeness_score
from .cluster import consensus_score
from .cluster import homogeneity_completeness_v_measure
from .cluster import homogeneity_score
from .cluster import mutual_info_score
from .cluster import normalized_mutual_info_score
from .cluster import silhouette_samples
from .cluster import silhouette_score
from .cluster import v_measure_score
from .pairwise import euclidean_distances
from .pairwise import pairwise_distances
from .pairwise import pairwise_distances_argmin
from .pairwise import pairwise_distances_argmin_min
from .pairwise import pairwise_kernels
from .regression import explained_variance_score
from .regression import mean_absolute_error
from .regression import mean_squared_error
from .regression import median_absolute_error
from .regression import r2_score
from .scorer import make_scorer
from .scorer import SCORERS
from .scorer import get_scorer
__all__ = [
'accuracy_score',
'adjusted_mutual_info_score',
'adjusted_rand_score',
'auc',
'average_precision_score',
'classification_report',
'cluster',
'completeness_score',
'confusion_matrix',
'consensus_score',
'coverage_error',
'euclidean_distances',
'explained_variance_score',
'f1_score',
'fbeta_score',
'get_scorer',
'hamming_loss',
'hinge_loss',
'homogeneity_completeness_v_measure',
'homogeneity_score',
'jaccard_similarity_score',
'label_ranking_average_precision_score',
'label_ranking_loss',
'log_loss',
'make_scorer',
'matthews_corrcoef',
'mean_absolute_error',
'mean_squared_error',
'median_absolute_error',
'mutual_info_score',
'normalized_mutual_info_score',
'pairwise_distances',
'pairwise_distances_argmin',
'pairwise_distances_argmin_min',
'pairwise_distances_argmin_min',
'pairwise_kernels',
'precision_recall_curve',
'precision_recall_fscore_support',
'precision_score',
'r2_score',
'recall_score',
'roc_auc_score',
'roc_curve',
'SCORERS',
'silhouette_samples',
'silhouette_score',
'v_measure_score',
'zero_one_loss',
'brier_score_loss',
]
| bsd-3-clause |
navoj/ecell4 | ecell4/egfrd/legacy/samples/hardbody/plot.py | 6 | 3594 | #!/usr/bin/env/python
import sys
import numpy
import scipy.io
from matplotlib.pylab import *
N_A = 6.0221367e23
def plot_data(N, T, fmt):
T = numpy.array(T)
mean = T.mean(1)
std_err = T.std()/math.sqrt(len(T))
#errorbar(N, mean, yerr=std_err, fmt=fmt)
print N, mean
loglog(N, mean, fmt)
from out_C import *
from out_V import *
from out_N300 import *
from out_N3000 import *
from out_BD import *
#from out_BD2 import *
from run_all import Nv, Nc, V300, N300, V3000, N3000, VBD, NBD, TBD, TBD2
# (40e-18 ** (1/3.0))**2 / 1e-12
# = 11.69607095285148
X = numpy.array([30,100,300,1000,3000,10000,30000,100000,1e8])
axes([.12,.14,.86,.83])
#for i in range(len(Nv)):
plot_data(Nv, data_V,'kx')
loglog(X, 0.004* X**(5.0/3), 'k--')
figtext(.25, .18, r'(2) V = 1 pL')
figtext(.82, .85, r'$t \ \propto \ N^{5/3}$', color='k')
#for i in range(len(Nc)):
plot_data(Nc, data_C,'ko')
loglog(X, 4* X, 'k-')
figtext(.14, .4, r'(1) C = 50 nM')
figtext(.8, .59, r'$t \ \propto \ N$', color='k')
# plot BD data
#plot_data(NBD, data_BD2,'k.')
plot_data(NBD, data_BD,'k.')
# loglog(X, 2e6* X, 'b:') # 1e-6 tau
# loglog(X, 2e4* X, 'b:') # 1e-4 tau
loglog(X, 2e5* X, 'b:') # 1e-5 tau
#figtext(.2, .82, r'BD', color='k')
#figtext(.19, .64, r'BD (relaxed)', color='k')
figtext(.19, .64, r'BD', color='k')
#loglog(data1[0] , data1[1], 'o-', label='Vol. = 1e-15 L')
#loglog(data2[0] , data2[1], 'o-', label='# particles = 600')
#loglog(data3[0] , data3[1], 'o-', label='Conc. = 1e-6 M')
xlabel('N [# particles]', size=22)
#xlabel('Concentration [M]')
#ylabel('time [s]', size=22)
#legend()
# xlim(4,9e6)
# ylim(1.1,2e11)
xlim(40,9e7)
ylim(0,2e11)
xticks(size=18)
yticks([60,3600,3600*24,3600*24*30, 3600*24*30*12],
['minute', 'hour', 'day', 'month', 'year'], size=16)
#grid()
C300 = numpy.array(N300) / (numpy.array(V300)*N_A)
C3000 = numpy.array(N3000) / (numpy.array(V3000)*N_A)
print C300, C3000
# Cx3000=numpy.array([
# # 9.35e-11,
# 9.35e-10,
# 9.35e-9,
# 9.35e-8,#N=3000,V=40e-15
# 9.35e-7,#N=3000,V=40e-16
# 9.35e-6,#N=3000,V=40e-17
# 9.35e-5,#N=3000,V=40e-18
# 9.35e-4,
# 9.35e-4*3
# ])
# Cx300=numpy.array([
# 9.35e-10,#N=300,V=40e-14
# 9.35e-9,#N=300,V=40e-15
# 9.35e-8,#16
# 9.35e-7,#17
# 9.35e-6,#18
# 9.35e-5,#19
# 9.35e-4,#20
# 9.35e-4*3
# # 3.74e-3,#1e-21
# # 9.35e-3,#4e-21
# ])
#data_N3000 *= 11696
#data_N300 *= 11696
axes([.63,.19,.33,.31])
# M-> uM
C300 *= 1e6
C3000 *= 1e6
for i in range(len(C3000)):
plot_data(C3000, data_N3000,'k+')
#loglog(C3000, 5e1** C3000, 'b:')
bd3000 = numpy.array(data_BD[3]).mean()
loglog([1e-4,1e4],[bd3000,bd3000], 'b:')
for i in range(len(C300)):
plot_data(C300, data_N300,'kd')
loglog(C300, 1.0e4* C300**(2.0/3.0), 'k-.', label='C^(2/3)')
#loglog(C300, 1e5* C300, 'k-.', label='C^1')
#loglog(C300, 2.5e4* C300**(4.0/3.0), 'k-.', label='C^(4/3)')
figtext(.75, .195, r'(3a) N = 300')
figtext(.84, .25, r'$t \ \propto \ C^{2/3}$', color='k')
figtext(.66, .32, r'(3b) N = 3000')
#bd 300
bd300 = numpy.array(data_BD[1]).mean()
loglog([1e-5,1e5],[bd300,bd300], 'b:')
figtext(.65, .455, r'BD', color='k')
#xlabel('Concentration')
#ylabel('time [s]')
#xlim(5e-10,5e-2)
#ylim(1e2,5e9)
#xlim(5e-10,5e-3)
xlim(2e-4,9e3)
ylim(2e2,8e11)
xticks([1e-3, 1e0, 1e3], ['nM','uM','mM'], size=16)
yticks([60,3600,3600*24,3600*24*30, 3600*24*30*12],
['m', 'h', 'd', 'm', 'y'], size=16)
show()
#savefig('fig1.eps')
#>>> _gfrd.S_irr(.0001 * 1e-8**2/1e-12, 1e-8, 10 * 1e-8 * 1e-12, 1e-12, 1e-8)
#0.99116163945434221
| gpl-2.0 |
jrleeman/MetPy | setup.py | 1 | 3270 | # Copyright (c) 2008,2010,2015,2016 MetPy Developers.
# Distributed under the terms of the BSD 3-Clause License.
# SPDX-License-Identifier: BSD-3-Clause
"""Setup script for installing MetPy."""
from __future__ import print_function
from setuptools import find_packages, setup
import versioneer
ver = versioneer.get_version()
setup(
name='MetPy',
version=ver,
description='Collection of tools for reading, visualizing and'
'performing calculations with weather data.',
long_description='The space MetPy aims for is GEMPAK '
'(and maybe NCL)-like functionality, in a way that '
'plugs easily into the existing scientific Python '
'ecosystem (numpy, scipy, matplotlib).',
url='http://github.com/Unidata/MetPy',
author='Ryan May, Patrick Marsh, Sean Arms, Eric Bruning',
author_email='python-users@unidata.ucar.edu',
maintainer='MetPy Developers',
maintainer_email='python-users@unidata.ucar.edu',
license='BSD',
classifiers=['Development Status :: 4 - Beta',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Topic :: Scientific/Engineering',
'Topic :: Scientific/Engineering :: Atmospheric Science',
'Intended Audience :: Science/Research',
'Operating System :: OS Independent',
'License :: OSI Approved :: BSD License'],
keywords='meteorology weather',
packages=find_packages(exclude=['doc', 'examples']),
package_data={'metpy.plots': ['colortable_files/*.tbl', 'nexrad_tables/*.tbl',
'fonts/*.ttf', '_static/metpy_75x75.png',
'_static/metpy_150x150.png', '_static/unidata_75x75.png',
'_static/unidata_150x150.png'],
'metpy': ['static-data-manifest.txt']},
python_requires='>=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*, !=3.5.*',
install_requires=['matplotlib>=2.0.0', 'numpy>=1.12.0', 'scipy>=0.17.0',
'pint!=0.9', 'xarray>=0.10.7', 'enum34;python_version<"3.4"',
'pooch>=0.1, <0.3', 'traitlets>=4.3.0'],
extras_require={
'cdm': ['pyproj>=1.9.4,!=2.0.0'],
'dev': ['ipython[all]>=3.1'],
'doc': ['sphinx>=1.4', 'sphinx-gallery', 'doc8', 'm2r',
'netCDF4'],
'examples': ['cartopy>=0.13.1', 'matplotlib>=2.2.0'],
'test': ['pytest>=2.4', 'pytest-runner', 'pytest-mpl', 'pytest-flake8',
'cartopy>=0.16.0', 'flake8>3.2.0', 'flake8-builtins!=1.4.0',
'flake8-comprehensions', 'flake8-copyright',
'flake8-docstrings', 'flake8-import-order', 'flake8-mutable',
'flake8-pep3101', 'flake8-print', 'flake8-quotes',
'pep8-naming', 'netCDF4']
},
cmdclass=versioneer.get_cmdclass(),
zip_safe=True,
download_url='https://github.com/Unidata/MetPy/archive/v{}.tar.gz'.format(ver), )
| bsd-3-clause |
dnet/proxmark3 | fpga/tests/plot_edgedetect.py | 14 | 1553 | #!/usr/bin/env python
#-----------------------------------------------------------------------------
# Copyright (C) 2014 iZsh <izsh at fail0verflow.com>
#
# This code is licensed to you under the terms of the GNU GPL, version 2 or,
# at your option, any later version. See the LICENSE.txt file for the text of
# the license.
#-----------------------------------------------------------------------------
import numpy
import matplotlib.pyplot as plt
import sys
if len(sys.argv) != 2:
print "Usage: %s <basename>" % sys.argv[0]
sys.exit(1)
BASENAME = sys.argv[1]
nx = numpy.fromfile(BASENAME + ".time")
def plot_time(dat1):
plt.plot(nx, dat1)
sig = open(BASENAME + ".filtered").read()
sig = map(lambda x: ord(x), sig)
min_vals = open(BASENAME + ".min").read()
min_vals = map(lambda x: ord(x), min_vals)
max_vals = open(BASENAME + ".max").read()
max_vals = map(lambda x: ord(x), max_vals)
states = open(BASENAME + ".state").read()
states = map(lambda x: ord(x) * 10 + 65, states)
toggles = open(BASENAME+ ".toggle").read()
toggles = map(lambda x: ord(x) * 10 + 80, toggles)
high = open(BASENAME + ".high").read()
high = map(lambda x: ord(x), high)
highz = open(BASENAME + ".highz").read()
highz = map(lambda x: ord(x), highz)
lowz = open(BASENAME + ".lowz").read()
lowz = map(lambda x: ord(x), lowz)
low = open(BASENAME + ".low").read()
low = map(lambda x: ord(x), low)
plot_time(sig)
plot_time(min_vals)
plot_time(max_vals)
plot_time(states)
plot_time(toggles)
plot_time(high)
plot_time(highz)
plot_time(lowz)
plot_time(low)
plt.show()
| gpl-2.0 |
PhilReinhold/pyqt_utils | plot_widgets.py | 1 | 16851 | from PyQt4 import QtGui, QtCore
import warnings
import numpy as np
import pyqtgraph as pg
pg.setConfigOption("useWeave", False)
from pyqtgraph.dockarea import Dock, DockArea
from matplotlib.figure import Figure
from matplotlib.backends.backend_qt4agg import FigureCanvasQTAgg, NavigationToolbar2QTAgg
class CrosshairPlotWidget(pg.PlotWidget):
crosshair_moved = QtCore.pyqtSignal(float, float)
def __init__(self, parametric=False, *args, **kwargs):
super(CrosshairPlotWidget, self).__init__(*args, **kwargs)
self.scene().sigMouseClicked.connect(self.toggle_search)
self.scene().sigMouseMoved.connect(self.handle_mouse_move)
self.cross_section_enabled = False
self.parametric = parametric
self.search_mode = True
self.label = None
self.selected_point = None
def set_data(self, data):
if data is not None and len(data) > 0:
self.clear()
self.plot(data)
def toggle_search(self, mouse_event):
if mouse_event.double():
if self.cross_section_enabled:
self.hide_cross_hair()
else:
self.add_cross_hair()
elif self.cross_section_enabled:
self.search_mode = not self.search_mode
if self.search_mode:
self.handle_mouse_move(mouse_event.scenePos())
def handle_mouse_move(self, mouse_event):
if self.cross_section_enabled and self.search_mode:
item = self.getPlotItem()
vb = item.getViewBox()
view_coords = vb.mapSceneToView(mouse_event)
view_x, view_y = view_coords.x(), view_coords.y()
best_guesses = []
for data_item in item.items:
if isinstance(data_item, pg.PlotDataItem):
xdata, ydata = data_item.xData, data_item.yData
index_distance = lambda i: (xdata[i]-view_x)**2 + (ydata[i] - view_y)**2
if self.parametric:
index = min(range(len(xdata)), key=index_distance)
else:
index = min(np.searchsorted(xdata, view_x), len(xdata)-1)
if index and xdata[index] - view_x > view_x - xdata[index - 1]:
index -= 1
pt_x, pt_y = xdata[index], ydata[index]
best_guesses.append(((pt_x, pt_y), index_distance(index)))
if not best_guesses:
return
(pt_x, pt_y), _ = min(best_guesses, key=lambda x: x[1])
self.selected_point = (pt_x, pt_y)
self.v_line.setPos(pt_x)
self.h_line.setPos(pt_y)
self.label.setText("x=%.2e, y=%.2e" % (pt_x, pt_y))
self.crosshair_moved.emit(pt_x, pt_y)
def add_cross_hair(self):
self.h_line = pg.InfiniteLine(angle=0, movable=False)
self.v_line = pg.InfiniteLine(angle=90, movable=False)
self.addItem(self.h_line, ignoreBounds=False)
self.addItem(self.v_line, ignoreBounds=False)
if self.label is None:
self.label = pg.LabelItem(justify="right")
self.getPlotItem().layout.addItem(self.label, 4, 1)
self.x_cross_index = 0
self.y_cross_index = 0
self.cross_section_enabled = True
def hide_cross_hair(self):
self.removeItem(self.h_line)
self.removeItem(self.v_line)
self.cross_section_enabled = False
class CrossSectionImageView(pg.ImageView):
def __init__(self, trace_size=80, **kwargs):
kwargs['view'] = pg.PlotItem(labels=kwargs.pop('labels', None))
super(CrossSectionImageView, self).__init__(**kwargs)
self.view.setAspectLocked(lock=False)
self.search_mode = False
self.signals_connected = False
self.set_histogram(False)
histogram_action = QtGui.QAction('Histogram', self)
histogram_action.setCheckable(True)
histogram_action.triggered.connect(self.set_histogram)
self.scene.contextMenu.append(histogram_action)
self.ui.histogram.gradient.loadPreset('thermal')
try:
self.connect_signal()
except RuntimeError:
warnings.warn('Scene not set up, cross section signals not connected')
self.y_cross_index = 0
self.x_cross_index = 0
self.h_cross_section_widget = CrosshairPlotWidget()
self.h_cross_section_widget.add_cross_hair()
self.h_cross_section_widget.search_mode = False
self.h_cross_section_widget_data = self.h_cross_section_widget.plot([0,0])
self.h_line = pg.InfiniteLine(pos=0, angle=0, movable=False)
self.view.addItem(self.h_line, ignoreBounds=False)
self.v_cross_section_widget = CrosshairPlotWidget()
self.v_cross_section_widget.add_cross_hair()
self.v_cross_section_widget.search_mode = False
self.v_cross_section_widget_data = self.v_cross_section_widget.plot([0,0])
self.v_line = pg.InfiniteLine(pos=0, angle=90, movable=False)
self.view.addItem(self.v_line, ignoreBounds=False)
self.h_cross_section_widget.crosshair_moved.connect(lambda x, _: self.set_position(x=x))
self.v_cross_section_widget.crosshair_moved.connect(lambda y, _: self.set_position(y=y))
def set_data(self, data):
self.setImage(data)
def setLabels(self, xlabel="X", ylabel="Y", zlabel="Z"):
self.plot_item.setLabels(bottom=(xlabel,), left=(ylabel,))
self.h_cross_section_widget.plotItem.setLabels(bottom=xlabel, left=zlabel)
self.v_cross_section_widget.plotItem.setLabels(bottom=ylabel, left=zlabel)
self.ui.histogram.item.axis.setLabel(text=zlabel)
def setImage(self, *args, **kwargs):
if 'pos' in kwargs:
self._x0, self._y0 = kwargs['pos']
else:
self._x0, self._y0 = 0, 0
if 'scale' in kwargs:
self._xscale, self._yscale = kwargs['scale']
else:
self._xscale, self._yscale = 1, 1
if self.imageItem.image is not None:
(min_x, max_x), (min_y, max_y) = self.imageItem.getViewBox().viewRange()
mid_x, mid_y = (max_x + min_x)/2., (max_y + min_y)/2.
else:
mid_x, mid_y = 0, 0
self.h_line.setPos(mid_y)
self.v_line.setPos(mid_x)
super(CrossSectionImageView, self).setImage(*args, **kwargs)
self.update_cross_section()
def set_histogram(self, visible):
self.ui.histogram.setVisible(visible)
self.ui.roiBtn.setVisible(visible)
self.ui.normBtn.setVisible(visible)
def connect_signal(self):
"""This can only be run after the item has been embedded in a scene"""
if self.signals_connected:
warnings.warn("")
if self.imageItem.scene() is None:
raise RuntimeError('Signal can only be connected after it has been embedded in a scene.')
self.imageItem.scene().sigMouseClicked.connect(self.toggle_search)
self.imageItem.scene().sigMouseMoved.connect(self.handle_mouse_move)
self.timeLine.sigPositionChanged.connect(self.update_cross_section)
self.signals_connected = True
def toggle_search(self, mouse_event):
if mouse_event.double():
return
self.search_mode = not self.search_mode
if self.search_mode:
self.handle_mouse_move(mouse_event.scenePos())
def handle_mouse_move(self, mouse_event):
if self.search_mode:
view_coords = self.imageItem.getViewBox().mapSceneToView(mouse_event)
view_x, view_y = view_coords.x(), view_coords.y()
self.set_position(view_x, view_y)
def set_position(self, x=None, y=None):
if x is None:
x = self.v_line.getXPos()
if y is None:
y = self.h_line.getYPos()
item_coords = self.imageItem.getViewBox().mapFromViewToItem(self.imageItem, QtCore.QPointF(x, y))
#item_coords = self.imageItem.mapFromScene(mouse_event)
item_x, item_y = item_coords.x(), item_coords.y()
max_x, max_y = self.imageItem.image.shape
if item_x < 0 or item_x > max_x or item_y < 0 or item_y > max_y:
return
self.v_line.setPos(x)
self.h_line.setPos(y)
#(min_view_x, max_view_x), (min_view_y, max_view_y) = self.imageItem.getViewBox().viewRange()
self.x_cross_index = max(min(int(item_x), max_x-1), 0)
self.y_cross_index = max(min(int(item_y), max_y-1), 0)
z_val = self.imageItem.image[self.x_cross_index, self.y_cross_index]
self.update_cross_section()
#self.text_item.setText("x=%.2e, y=%.2e, z=%.2e" % (view_x, view_y, z_val))
def update_cross_section(self):
nx, ny = self.imageItem.image.shape
x0, y0, xscale, yscale = self._x0, self._y0, self._xscale, self._yscale
xdata = np.linspace(x0, x0+(xscale*(nx-1)), nx)
ydata = np.linspace(y0, y0+(yscale*(ny-1)), ny)
zval = self.imageItem.image[self.x_cross_index, self.y_cross_index]
self.h_cross_section_widget_data.setData(xdata, self.imageItem.image[:, self.y_cross_index])
self.h_cross_section_widget.v_line.setPos(xdata[self.x_cross_index])
self.h_cross_section_widget.h_line.setPos(zval)
self.v_cross_section_widget_data.setData(ydata, self.imageItem.image[self.x_cross_index, :])
self.v_cross_section_widget.v_line.setPos(ydata[self.y_cross_index])
self.v_cross_section_widget.h_line.setPos(zval)
class MoviePlotWidget(CrossSectionImageView):
def __init__(self, *args, **kwargs):
super(MoviePlotWidget, self).__init__(*args, **kwargs)
self.play_button = QtGui.QPushButton("Play")
self.stop_button = QtGui.QPushButton("Stop")
self.stop_button.hide()
self.play_timer = QtCore.QTimer()
self.play_timer.setInterval(50)
self.play_timer.timeout.connect(self.increment)
self.play_button.clicked.connect(self.play_timer.start)
self.play_button.clicked.connect(self.play_button.hide)
self.play_button.clicked.connect(self.stop_button.show)
self.stop_button.clicked.connect(self.play_timer.stop)
self.stop_button.clicked.connect(self.play_button.show)
self.stop_button.clicked.connect(self.stop_button.hide)
def setImage(self, array, *args, **kwargs):
super(MoviePlotWidget, self).setImage(array, *args, **kwargs)
self.tpts = len(array)
def increment(self):
self.setCurrentIndex((self.currentIndex + 1) % self.tpts)
class CloseableDock(Dock):
def __init__(self, name, *args, **kwargs):
super(CloseableDock, self).__init__(name, *args, **kwargs)
style = QtGui.QStyleFactory().create("windows")
icon = style.standardIcon(QtGui.QStyle.SP_TitleBarCloseButton)
button = QtGui.QPushButton(icon, "", self)
button.clicked.connect(self.close)
button.setGeometry(0, 0, 20, 20)
button.raise_()
self.closeClicked = button.clicked
def close(self):
self.setParent(None)
self.closed = True
if self._container is not self.area.topContainer:
self._container.apoptose()
class CrossSectionDock(CloseableDock):
def __init__(self, name, **kwargs):
widget = self.widget = kwargs['widget'] = CrossSectionImageView()
super(CrossSectionDock, self).__init__(name, **kwargs)
self.cross_section_enabled = False
self.closeClicked.connect(self.hide_cross_section)
self.h_cross_dock = CloseableDock(name='x trace', widget=widget.h_cross_section_widget, area=self.area)
self.v_cross_dock = CloseableDock(name='y trace', widget=widget.v_cross_section_widget, area=self.area)
widget.imageItem.scene().sigMouseClicked.connect(self.handle_mouse_click)
widget.removeItem(widget.h_line)
widget.removeItem(widget.v_line)
widget.search_mode = False
self.cross_section_enabled = False
def set_data(self, array):
self.widget.setImage(array)
def toggle_cross_section(self):
if self.cross_section_enabled:
self.hide_cross_section()
else:
self.add_cross_section()
def hide_cross_section(self):
if self.cross_section_enabled:
self.widget.removeItem(self.widget.h_line)
self.widget.removeItem(self.widget.v_line)
#self.ui.graphicsView.removeItem(self.text_item)
self.cross_section_enabled = False
self.h_cross_dock.close()
self.v_cross_dock.close()
def add_cross_section(self):
image_item = self.widget.imageItem
if image_item.image is not None:
(min_x, max_x), (min_y, max_y) = image_item.getViewBox().viewRange()
mid_x, mid_y = (max_x + min_x)/2., (max_y + min_y)/2.
else:
mid_x, mid_y = 0, 0
self.widget.addItem(self.widget.h_line, ignoreBounds=False)
self.widget.addItem(self.widget.v_line, ignoreBounds=False)
self.x_cross_index = 0
self.y_cross_index = 0
self.cross_section_enabled = True
#self.text_item = pg.LabelItem(justify="right")
#self.img_view.ui.gridLayout.addWidget(self.text_item, 2, 1, 1, 2)
#self.img_view.ui.graphicsView.addItem(self.text_item)#, 2, 1)
#self.widget.layout().addItem(self.text_item, 4, 1)
#self.cs_layout.addItem(self.label, 2, 1) #TODO: Find a way of displaying this label
self.search_mode = True
self.area.addDock(self.h_cross_dock)
self.area.addDock(self.v_cross_dock, position='right', relativeTo=self.h_cross_dock)
self.cross_section_enabled = True
def handle_mouse_click(self, mouse_event):
if mouse_event.double():
self.toggle_cross_section()
class BackendSwitchableDock(CloseableDock):
def __init__(self, *args, **kwargs):
super(BackendSwitchableDock, self).__init__(*args, **kwargs)
style = QtGui.QStyleFactory().create("windows")
icon = style.standardIcon(QtGui.QStyle.SP_BrowserReload)
switch_button = QtGui.QPushButton(icon, "", self)
switch_button.clicked.connect(lambda: self.widgets[0].toggle_backend())
switch_button.setGeometry(20, 0, 20, 20)
switch_button.raise_()
class MPLPlotWidget(QtGui.QWidget):
def __init__(self):
super(MPLPlotWidget, self).__init__()
layout = QtGui.QVBoxLayout(self)
fig = Figure()
self.axes = fig.add_subplot(111)
self.axes.hold(False)
self.canvas = FigureCanvasQTAgg(fig)
self.navbar = NavigationToolbar2QTAgg(self.canvas, self)
layout.addWidget(self.canvas)
layout.addWidget(self.navbar)
#self.setSizePolicy(QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Expanding)
def set_data(self, data):
self.axes.plot(data)
class MPLImageView(MPLPlotWidget):
def set_data(self, data):
self.axes.imshow(data, interpolation='nearest', aspect='auto')
class BackendSwitchablePlot(QtGui.QWidget):
MPLWidget = MPLPlotWidget
PGWidget = CrosshairPlotWidget
def __init__(self):
super(BackendSwitchablePlot, self).__init__()
layout = QtGui.QVBoxLayout(self)
self.widget = self.MPLWidget()
layout.addWidget(self.widget)
self.is_mpl = True
self._data = None
def set_data(self, data):
self._data = data
self.widget.set_data(data)
def toggle_backend(self):
self.widget.setParent(None)
if self.is_mpl:
self.widget = self.PGWidget()
else:
self.widget = self.MPLWidget()
self.is_mpl = not self.is_mpl
if self._data is not None:
self.widget.set_data(self._data)
self.layout().addWidget(self.widget)
class BackendSwitchableImageView(BackendSwitchablePlot):
MPLWidget = MPLImageView
PGWidget = CrossSectionImageView
if __name__ == '__main__':
import sys
import numpy as np
app = QtGui.QApplication([])
w = DockArea()
w1 = CrosshairPlotWidget()
w1.set_data(np.sin(np.linspace(0, 10, 100)))
d = CloseableDock("Crosshair Plot", widget=w1)
w.addDock(d)
d2 = CrossSectionDock("Cross Section Dock")
xs, ys = np.mgrid[-500:500, -500:500]/100.
rs = np.sqrt(xs**2 + ys**2)
d2.set_data(rs)
w.addDock(d2)
#w2 = CrossSectionImageView()
#w2.set_data(rs)
#ts, xs, ys = np.mgrid[0:100, -50:50, -50:50]/20.
#zs = np.sinc(xs**2 + ys**2 + ts)
#w2 = MoviePlotWidget()
#w2.set_data(zs)
#l.addWidget(w2)
#l.addWidget(w2.play_button)
#l.addWidget(w2.stop_button)
#l.addWidget(w2.h_cross_section_widget)
#l.addWidget(w2.v_cross_section_widget)
w.show()
sys.exit(app.exec_())
| mit |
clemkoa/scikit-learn | examples/ensemble/plot_adaboost_twoclass.py | 72 | 3333 | """
==================
Two-class AdaBoost
==================
This example fits an AdaBoosted decision stump on a non-linearly separable
classification dataset composed of two "Gaussian quantiles" clusters
(see :func:`sklearn.datasets.make_gaussian_quantiles`) and plots the decision
boundary and decision scores. The distributions of decision scores are shown
separately for samples of class A and B. The predicted class label for each
sample is determined by the sign of the decision score. Samples with decision
scores greater than zero are classified as B, and are otherwise classified
as A. The magnitude of a decision score determines the degree of likeness with
the predicted class label. Additionally, a new dataset could be constructed
containing a desired purity of class B, for example, by only selecting samples
with a decision score above some value.
"""
print(__doc__)
# Author: Noel Dawe <noel.dawe@gmail.com>
#
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn.ensemble import AdaBoostClassifier
from sklearn.tree import DecisionTreeClassifier
from sklearn.datasets import make_gaussian_quantiles
# Construct dataset
X1, y1 = make_gaussian_quantiles(cov=2.,
n_samples=200, n_features=2,
n_classes=2, random_state=1)
X2, y2 = make_gaussian_quantiles(mean=(3, 3), cov=1.5,
n_samples=300, n_features=2,
n_classes=2, random_state=1)
X = np.concatenate((X1, X2))
y = np.concatenate((y1, - y2 + 1))
# Create and fit an AdaBoosted decision tree
bdt = AdaBoostClassifier(DecisionTreeClassifier(max_depth=1),
algorithm="SAMME",
n_estimators=200)
bdt.fit(X, y)
plot_colors = "br"
plot_step = 0.02
class_names = "AB"
plt.figure(figsize=(10, 5))
# Plot the decision boundaries
plt.subplot(121)
x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1
y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, plot_step),
np.arange(y_min, y_max, plot_step))
Z = bdt.predict(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
cs = plt.contourf(xx, yy, Z, cmap=plt.cm.Paired)
plt.axis("tight")
# Plot the training points
for i, n, c in zip(range(2), class_names, plot_colors):
idx = np.where(y == i)
plt.scatter(X[idx, 0], X[idx, 1],
c=c, cmap=plt.cm.Paired,
s=20, edgecolor='k',
label="Class %s" % n)
plt.xlim(x_min, x_max)
plt.ylim(y_min, y_max)
plt.legend(loc='upper right')
plt.xlabel('x')
plt.ylabel('y')
plt.title('Decision Boundary')
# Plot the two-class decision scores
twoclass_output = bdt.decision_function(X)
plot_range = (twoclass_output.min(), twoclass_output.max())
plt.subplot(122)
for i, n, c in zip(range(2), class_names, plot_colors):
plt.hist(twoclass_output[y == i],
bins=10,
range=plot_range,
facecolor=c,
label='Class %s' % n,
alpha=.5,
edgecolor='k')
x1, x2, y1, y2 = plt.axis()
plt.axis((x1, x2, y1, y2 * 1.2))
plt.legend(loc='upper right')
plt.ylabel('Samples')
plt.xlabel('Score')
plt.title('Decision Scores')
plt.tight_layout()
plt.subplots_adjust(wspace=0.35)
plt.show()
| bsd-3-clause |
canavandl/bokeh | bokeh/protocol.py | 3 | 4691 | from __future__ import absolute_import
import json
import logging
import datetime as dt
import calendar
import decimal
import numpy as np
try:
import pandas as pd
is_pandas = True
except ImportError:
is_pandas = False
try:
from dateutil.relativedelta import relativedelta
is_dateutil = True
except ImportError:
is_dateutil = False
from .settings import settings
log = logging.getLogger(__name__)
millifactor = 10**6.0
class BokehJSONEncoder(json.JSONEncoder):
def transform_series(self, obj):
"""transform series
"""
vals = obj.values
return self.transform_array(vals)
# Check for astype failures (putative Numpy < 1.7)
dt2001 = np.datetime64('2001')
legacy_datetime64 = (dt2001.astype('int64') ==
dt2001.astype('datetime64[ms]').astype('int64'))
def transform_array(self, obj):
"""Transform arrays into lists of json safe types
also handles pandas series, and replacing
nans and infs with strings
"""
## not quite correct, truncates to ms..
if obj.dtype.kind == 'M':
if self.legacy_datetime64:
if obj.dtype == np.dtype('datetime64[ns]'):
return (obj.astype('int64') / millifactor).tolist()
# else punt.
else:
return obj.astype('datetime64[ms]').astype('int64').tolist()
elif obj.dtype.kind in ('u', 'i', 'f'):
return self.transform_numerical_array(obj)
return obj.tolist()
def transform_numerical_array(self, obj):
"""handles nans/inf conversion
"""
if isinstance(obj, np.ma.MaskedArray):
obj = obj.filled(np.nan) # Set masked values to nan
if not np.isnan(obj).any() and not np.isinf(obj).any():
return obj.tolist()
else:
transformed = obj.astype('object')
transformed[np.isnan(obj)] = 'NaN'
transformed[np.isposinf(obj)] = 'Infinity'
transformed[np.isneginf(obj)] = '-Infinity'
return transformed.tolist()
def transform_python_types(self, obj):
"""handle special scalars, default to default json encoder
"""
# Pandas Timestamp
if is_pandas and isinstance(obj, pd.tslib.Timestamp):
return obj.value / millifactor #nanosecond to millisecond
elif np.issubdtype(type(obj), np.float):
return float(obj)
elif np.issubdtype(type(obj), np.int):
return int(obj)
elif np.issubdtype(type(obj), np.bool_):
return bool(obj)
# Datetime, Date
elif isinstance(obj, (dt.datetime, dt.date)):
return calendar.timegm(obj.timetuple()) * 1000.
# Numpy datetime64
elif isinstance(obj, np.datetime64):
epoch_delta = obj - np.datetime64('1970-01-01T00:00:00Z')
return (epoch_delta / np.timedelta64(1, 'ms'))
# Time
elif isinstance(obj, dt.time):
return (obj.hour*3600 + obj.minute*60 + obj.second)*1000 + obj.microsecond / 1000.
elif is_dateutil and isinstance(obj, relativedelta):
return dict(years=obj.years, months=obj.months, days=obj.days, hours=obj.hours,
minutes=obj.minutes, seconds=obj.seconds, microseconds=obj.microseconds)
# Decimal
elif isinstance(obj, decimal.Decimal):
return float(obj)
else:
return super(BokehJSONEncoder, self).default(obj)
def default(self, obj):
#argh! local import!
from .plot_object import PlotObject
from .properties import HasProps
from .colors import Color
## array types
if is_pandas and isinstance(obj, (pd.Series, pd.Index)):
return self.transform_series(obj)
elif isinstance(obj, np.ndarray):
return self.transform_array(obj)
elif isinstance(obj, PlotObject):
return obj.ref
elif isinstance(obj, HasProps):
return obj.changed_properties_with_values()
elif isinstance(obj, Color):
return obj.to_css()
else:
return self.transform_python_types(obj)
def serialize_json(obj, encoder=BokehJSONEncoder, **kwargs):
if settings.pretty(False):
kwargs["indent"] = 4
return json.dumps(obj, cls=encoder, **kwargs)
deserialize_json = json.loads
serialize_web = serialize_json
deserialize_web = deserialize_json
def status_obj(status):
return {'msgtype': 'status',
'status': status}
def error_obj(error_msg):
return {
'msgtype': 'error',
'error_msg': error_msg}
| bsd-3-clause |
petroolg/robo-spline | graph.py | 1 | 6668 | # Coordinated Spline Motion and Robot Control Project
#
# Copyright (c) 2017 Olga Petrova <olga.petrova@cvut.cz>
# Advisor: Pavel Pisa <pisa@cmp.felk.cvut.cz>
# FEE CTU Prague, Czech Republic
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
# In 2017, project funded by PiKRON s.r.o. http://www.pikron.com/
import matplotlib.pyplot as plt
import matplotlib.ticker as ticker
import numpy as np
from matplotlib.widgets import Slider, Button, RadioButtons
from interpolation import *
''' Module provides graphing functions for spline trajectory visualisation '''
class Graph:
"""
Class for graphing interface for spline trajectory. User can adjust interpolation algorithm and parameters.
"""
def __init__(self, path):
self.sol = path
self.spline = 'poly'
self.init_offset = self.sol[0]
self.order = 2
self.n_polynom, self.n_joints = self.sol.shape
self.n_seg = int(len(self.sol)/3)
self.lambda_ = 0.1
def update(self, ax, fig, init=False):
"""
Helper function for graph update after parameter change.
:param ax: Axis to update.
:param fig: Figure to update.
:param init: Whether to initialize plots or use existing.
"""
res = 100 # discretisation of x axis
if self.spline == 'poly':
params = poly.interpolate(self.sol)
self.order = 3
ax.set_title('3rd order polynomial')
if self.spline == 'b-spline':
params = b_spline.interpolate(self.sol, order=self.order)
ax.set_title('%s order B-spline' % (str(self.order) + ('nd' if self.order == 2 else 'rd')))
if self.spline == 'p-spline':
num_segments = self.n_seg
poly_deg = self.order
penalty_order = 2
lambda_ = self.lambda_
params = p_spline.interpolate(self.sol, num_segments, poly_deg, penalty_order, lambda_)
ax.set_title('%s order P-spline' % (str(self.order) + ('nd' if self.order == 2 else 'rd')))
if self.order == 2:
t = np.vstack((np.linspace(0, 1, res), np.linspace(0, 1, res) ** 2))
if self.order == 3:
t = np.vstack((np.linspace(0, 1, res), np.linspace(0, 1, res) ** 2, np.linspace(0, 1, res) ** 3))
# shrinking of x axis is compensated
t_long = np.linspace(0, self.n_polynom - 1, params.shape[0] * res)
y = np.empty((self.n_joints, 0))
off = self.init_offset[np.newaxis].T
for i in range(params.shape[0]):
yi = params[i].reshape((self.n_joints, self.order)).dot(t) + off
off = yi[:, -1][np.newaxis].T
y = np.append(y, yi, axis=1)
t = np.arange(0, self.sol.shape[0])
if init:
self.plot = []
for k in range(self.n_joints):
self.plot += plt.plot(t, self.sol.T[k],'k')
self.plot += plt.plot(t_long, y[k])
else:
for k in range(self.n_joints):
self.plot[2*k].set_data(t, self.sol.T[k])
self.plot[2*k+1].set_data(t_long, y[k])
fig.canvas.draw_idle()
def show_gui(self):
"""
Show GUI for trajectory visualisation and parameter adjustment.
"""
fig, ax = plt.subplots()
plt.subplots_adjust(left=0.3, bottom=0.25)
ax.yaxis.set_major_formatter(ticker.FormatStrFormatter('%0.e'))
self.update(ax, fig, init=True)
# plt.axis([0, 1, -10, 10])
axcolor = 'lightgoldenrodyellow'
ax_lambda = plt.axes([0.25, 0.1, 0.65, 0.03], facecolor=axcolor) # type: Axes
ax_n_seg = plt.axes([0.25, 0.15, 0.65, 0.03], facecolor=axcolor) # type: Axes
s_lam = Slider(ax_lambda, 'lambda', 0.001, 1.0, valinit=0.1)
s_seg = Slider(ax_n_seg, 'number of segments', 1, len(self.sol), valinit=int(len(self.sol)/3))
def sel_l_seg(val):
self.lambda_ = s_lam.val
self.n_seg = int(s_seg.val)
self.update(ax, fig)
s_lam.on_changed(sel_l_seg)
s_seg.on_changed(sel_l_seg)
resetax = plt.axes([0.8, 0.025, 0.1, 0.04])
button = Button(resetax, 'Reset', color=axcolor, hovercolor='0.975')
def reset(event):
s_lam.reset()
s_seg.reset()
button.on_clicked(reset)
r_ord = plt.axes([0.025, 0.35, 0.15, 0.15], facecolor=axcolor)
radio_ord = RadioButtons(r_ord, ('2nd order', '3rd order'), active=0)
rsp = plt.axes([0.025, 0.55, 0.15, 0.15], facecolor=axcolor)
radio_spline = RadioButtons(rsp, ('Polynomial', 'B-spline', 'P-spline'), active=0)
def set_order(label):
self.order = int(label[0])
self.update(ax, fig)
def set_spline(label):
if label == 'Polynomial':
self.spline = 'poly'
ax_lambda.set_visible(False)
ax_n_seg.set_visible(False)
r_ord.set_visible(False)
if label == 'B-spline':
self.spline = 'b-spline'
ax_lambda.set_visible(False)
ax_n_seg.set_visible(False)
r_ord.set_visible(True)
if label == 'P-spline':
self.spline = 'p-spline'
ax_lambda.set_visible(True)
ax_n_seg.set_visible(True)
r_ord.set_visible(True)
self.update(ax, fig)
radio_ord.on_clicked(set_order)
radio_spline.on_clicked(set_spline)
ax_lambda.set_visible(False)
ax_n_seg.set_visible(False)
r_ord.set_visible(False)
plt.show() | mit |
wangz19/TEST | Bone_modeling/gaussian_distribution.py | 1 | 1866 | # function is defining a gaussian distribution parameter
import numpy as np
import matplotlib.pyplot as plt
###--------------parameter
sampleSize = 1000 #total sample point
mean_E = 50. # mean value of modulus
#####----------------------gaussian distribution -------------------#######
mu, sigma = 0, 11.
E_temp = mean_E + np.random.normal(mu, sigma, sampleSize)
# delete all the nagative value generated
E = np.array ([num for num in E_temp if num > 0])
print "mean value is %.4f with %.4f percent discrepancy" %(np.mean(E), abs(np.mean(E)-mean_E)/mean_E*100)
print "max = %.4f" %max(E)
print "min = %.4f" %min(E)
count, bins, ignored = plt.hist (E, 20, normed = True, align = 'mid')
x = np.linspace(min(bins),max(bins),1000)
pdf = np.exp( -(x-np.mean(E))**2 /(2 * sigma**2))/np.sqrt(2*np.pi*sigma**2)
plt.plot(x,pdf,linewidth = 2, color = 'r')
plt.axis ([min(E)*.9,max(E)*1.1,0,max(count)*1.2])
plt.show()
#####----------------------gaussian distribution -------------------#######
#####----------------------lognormal distribution -------------------#######
# power_constant = np.log(mean_E) # to insure mean_value is constant, (mu+(sigma^2)/2)should be constant
# print power_constant
# mu = power_constant
# count = 0
# for count in range(0,10):
# count += 1
# mu = mu - 0.2
# # mean and standard deviation
# sigma = np.sqrt((power_constant-mu)*2.)
# s = np.random.lognormal (mu, sigma, sampleSize)
# print np.mean(s)
# print max(s)
# print min(s)
# count, bins, ignored = plt.hist (s, 500, normed = True, align = 'mid')
# x = np.linspace(0, 100000, 1000)
# pdf = (np.exp(-(np.log(x) - mu)**2 / (2 * sigma**2))
# / (x * sigma * np.sqrt(2 * np.pi)))
# plt.plot (x, pdf, linewidth = 2)
# plt.axis ([0,100000,0,0.0004])
# plt.show()
#####----------------------lognormal distribution -------------------####### | gpl-2.0 |
mehdidc/scikit-learn | sklearn/linear_model/tests/test_theil_sen.py | 234 | 9928 | """
Testing for Theil-Sen module (sklearn.linear_model.theil_sen)
"""
# Author: Florian Wilhelm <florian.wilhelm@gmail.com>
# License: BSD 3 clause
from __future__ import division, print_function, absolute_import
import os
import sys
from contextlib import contextmanager
import numpy as np
from numpy.testing import assert_array_equal, assert_array_less
from numpy.testing import assert_array_almost_equal, assert_warns
from scipy.linalg import norm
from scipy.optimize import fmin_bfgs
from nose.tools import raises, assert_almost_equal
from sklearn.utils import ConvergenceWarning
from sklearn.linear_model import LinearRegression, TheilSenRegressor
from sklearn.linear_model.theil_sen import _spatial_median, _breakdown_point
from sklearn.linear_model.theil_sen import _modified_weiszfeld_step
from sklearn.utils.testing import assert_greater, assert_less
@contextmanager
def no_stdout_stderr():
old_stdout = sys.stdout
old_stderr = sys.stderr
sys.stdout = open(os.devnull, 'w')
sys.stderr = open(os.devnull, 'w')
yield
sys.stdout.flush()
sys.stderr.flush()
sys.stdout = old_stdout
sys.stderr = old_stderr
def gen_toy_problem_1d(intercept=True):
random_state = np.random.RandomState(0)
# Linear model y = 3*x + N(2, 0.1**2)
w = 3.
if intercept:
c = 2.
n_samples = 50
else:
c = 0.1
n_samples = 100
x = random_state.normal(size=n_samples)
noise = 0.1 * random_state.normal(size=n_samples)
y = w * x + c + noise
# Add some outliers
if intercept:
x[42], y[42] = (-2, 4)
x[43], y[43] = (-2.5, 8)
x[33], y[33] = (2.5, 1)
x[49], y[49] = (2.1, 2)
else:
x[42], y[42] = (-2, 4)
x[43], y[43] = (-2.5, 8)
x[53], y[53] = (2.5, 1)
x[60], y[60] = (2.1, 2)
x[72], y[72] = (1.8, -7)
return x[:, np.newaxis], y, w, c
def gen_toy_problem_2d():
random_state = np.random.RandomState(0)
n_samples = 100
# Linear model y = 5*x_1 + 10*x_2 + N(1, 0.1**2)
X = random_state.normal(size=(n_samples, 2))
w = np.array([5., 10.])
c = 1.
noise = 0.1 * random_state.normal(size=n_samples)
y = np.dot(X, w) + c + noise
# Add some outliers
n_outliers = n_samples // 10
ix = random_state.randint(0, n_samples, size=n_outliers)
y[ix] = 50 * random_state.normal(size=n_outliers)
return X, y, w, c
def gen_toy_problem_4d():
random_state = np.random.RandomState(0)
n_samples = 10000
# Linear model y = 5*x_1 + 10*x_2 + 42*x_3 + 7*x_4 + N(1, 0.1**2)
X = random_state.normal(size=(n_samples, 4))
w = np.array([5., 10., 42., 7.])
c = 1.
noise = 0.1 * random_state.normal(size=n_samples)
y = np.dot(X, w) + c + noise
# Add some outliers
n_outliers = n_samples // 10
ix = random_state.randint(0, n_samples, size=n_outliers)
y[ix] = 50 * random_state.normal(size=n_outliers)
return X, y, w, c
def test_modweiszfeld_step_1d():
X = np.array([1., 2., 3.]).reshape(3, 1)
# Check startvalue is element of X and solution
median = 2.
new_y = _modified_weiszfeld_step(X, median)
assert_array_almost_equal(new_y, median)
# Check startvalue is not the solution
y = 2.5
new_y = _modified_weiszfeld_step(X, y)
assert_array_less(median, new_y)
assert_array_less(new_y, y)
# Check startvalue is not the solution but element of X
y = 3.
new_y = _modified_weiszfeld_step(X, y)
assert_array_less(median, new_y)
assert_array_less(new_y, y)
# Check that a single vector is identity
X = np.array([1., 2., 3.]).reshape(1, 3)
y = X[0, ]
new_y = _modified_weiszfeld_step(X, y)
assert_array_equal(y, new_y)
def test_modweiszfeld_step_2d():
X = np.array([0., 0., 1., 1., 0., 1.]).reshape(3, 2)
y = np.array([0.5, 0.5])
# Check first two iterations
new_y = _modified_weiszfeld_step(X, y)
assert_array_almost_equal(new_y, np.array([1 / 3, 2 / 3]))
new_y = _modified_weiszfeld_step(X, new_y)
assert_array_almost_equal(new_y, np.array([0.2792408, 0.7207592]))
# Check fix point
y = np.array([0.21132505, 0.78867497])
new_y = _modified_weiszfeld_step(X, y)
assert_array_almost_equal(new_y, y)
def test_spatial_median_1d():
X = np.array([1., 2., 3.]).reshape(3, 1)
true_median = 2.
_, median = _spatial_median(X)
assert_array_almost_equal(median, true_median)
# Test larger problem and for exact solution in 1d case
random_state = np.random.RandomState(0)
X = random_state.randint(100, size=(1000, 1))
true_median = np.median(X.ravel())
_, median = _spatial_median(X)
assert_array_equal(median, true_median)
def test_spatial_median_2d():
X = np.array([0., 0., 1., 1., 0., 1.]).reshape(3, 2)
_, median = _spatial_median(X, max_iter=100, tol=1.e-6)
def cost_func(y):
dists = np.array([norm(x - y) for x in X])
return np.sum(dists)
# Check if median is solution of the Fermat-Weber location problem
fermat_weber = fmin_bfgs(cost_func, median, disp=False)
assert_array_almost_equal(median, fermat_weber)
# Check when maximum iteration is exceeded a warning is emitted
assert_warns(ConvergenceWarning, _spatial_median, X, max_iter=30, tol=0.)
def test_theil_sen_1d():
X, y, w, c = gen_toy_problem_1d()
# Check that Least Squares fails
lstq = LinearRegression().fit(X, y)
assert_greater(np.abs(lstq.coef_ - w), 0.9)
# Check that Theil-Sen works
theil_sen = TheilSenRegressor(random_state=0).fit(X, y)
assert_array_almost_equal(theil_sen.coef_, w, 1)
assert_array_almost_equal(theil_sen.intercept_, c, 1)
def test_theil_sen_1d_no_intercept():
X, y, w, c = gen_toy_problem_1d(intercept=False)
# Check that Least Squares fails
lstq = LinearRegression(fit_intercept=False).fit(X, y)
assert_greater(np.abs(lstq.coef_ - w - c), 0.5)
# Check that Theil-Sen works
theil_sen = TheilSenRegressor(fit_intercept=False,
random_state=0).fit(X, y)
assert_array_almost_equal(theil_sen.coef_, w + c, 1)
assert_almost_equal(theil_sen.intercept_, 0.)
def test_theil_sen_2d():
X, y, w, c = gen_toy_problem_2d()
# Check that Least Squares fails
lstq = LinearRegression().fit(X, y)
assert_greater(norm(lstq.coef_ - w), 1.0)
# Check that Theil-Sen works
theil_sen = TheilSenRegressor(max_subpopulation=1e3,
random_state=0).fit(X, y)
assert_array_almost_equal(theil_sen.coef_, w, 1)
assert_array_almost_equal(theil_sen.intercept_, c, 1)
def test_calc_breakdown_point():
bp = _breakdown_point(1e10, 2)
assert_less(np.abs(bp - 1 + 1/(np.sqrt(2))), 1.e-6)
@raises(ValueError)
def test_checksubparams_negative_subpopulation():
X, y, w, c = gen_toy_problem_1d()
TheilSenRegressor(max_subpopulation=-1, random_state=0).fit(X, y)
@raises(ValueError)
def test_checksubparams_too_few_subsamples():
X, y, w, c = gen_toy_problem_1d()
TheilSenRegressor(n_subsamples=1, random_state=0).fit(X, y)
@raises(ValueError)
def test_checksubparams_too_many_subsamples():
X, y, w, c = gen_toy_problem_1d()
TheilSenRegressor(n_subsamples=101, random_state=0).fit(X, y)
@raises(ValueError)
def test_checksubparams_n_subsamples_if_less_samples_than_features():
random_state = np.random.RandomState(0)
n_samples, n_features = 10, 20
X = random_state.normal(size=(n_samples, n_features))
y = random_state.normal(size=n_samples)
TheilSenRegressor(n_subsamples=9, random_state=0).fit(X, y)
def test_subpopulation():
X, y, w, c = gen_toy_problem_4d()
theil_sen = TheilSenRegressor(max_subpopulation=250,
random_state=0).fit(X, y)
assert_array_almost_equal(theil_sen.coef_, w, 1)
assert_array_almost_equal(theil_sen.intercept_, c, 1)
def test_subsamples():
X, y, w, c = gen_toy_problem_4d()
theil_sen = TheilSenRegressor(n_subsamples=X.shape[0],
random_state=0).fit(X, y)
lstq = LinearRegression().fit(X, y)
# Check for exact the same results as Least Squares
assert_array_almost_equal(theil_sen.coef_, lstq.coef_, 9)
def test_verbosity():
X, y, w, c = gen_toy_problem_1d()
# Check that Theil-Sen can be verbose
with no_stdout_stderr():
TheilSenRegressor(verbose=True, random_state=0).fit(X, y)
TheilSenRegressor(verbose=True,
max_subpopulation=10,
random_state=0).fit(X, y)
def test_theil_sen_parallel():
X, y, w, c = gen_toy_problem_2d()
# Check that Least Squares fails
lstq = LinearRegression().fit(X, y)
assert_greater(norm(lstq.coef_ - w), 1.0)
# Check that Theil-Sen works
theil_sen = TheilSenRegressor(n_jobs=-1,
random_state=0,
max_subpopulation=2e3).fit(X, y)
assert_array_almost_equal(theil_sen.coef_, w, 1)
assert_array_almost_equal(theil_sen.intercept_, c, 1)
def test_less_samples_than_features():
random_state = np.random.RandomState(0)
n_samples, n_features = 10, 20
X = random_state.normal(size=(n_samples, n_features))
y = random_state.normal(size=n_samples)
# Check that Theil-Sen falls back to Least Squares if fit_intercept=False
theil_sen = TheilSenRegressor(fit_intercept=False,
random_state=0).fit(X, y)
lstq = LinearRegression(fit_intercept=False).fit(X, y)
assert_array_almost_equal(theil_sen.coef_, lstq.coef_, 12)
# Check fit_intercept=True case. This will not be equal to the Least
# Squares solution since the intercept is calculated differently.
theil_sen = TheilSenRegressor(fit_intercept=True, random_state=0).fit(X, y)
y_pred = theil_sen.predict(X)
assert_array_almost_equal(y_pred, y, 12)
| bsd-3-clause |
imk1/IMKTFBindingCode | runRandomForestClassification.py | 1 | 2267 | def makeRandomForestClassificationInputs(positivesFileName, negativesFileName, validPositivesFileName, validNegativesFileName):
# Make inputs for a random forward classifier
# ASSUMES THAT FEATURES ARE SORTED IN THE SAME WAY IN ALL FILES
positivesFeatureMat = np.loadtxt(positivesFileName)
negativesFeatureMat = np.loadtxt(negativesFileName)
trainFeatureMat = np.concatenate((positivesFeatureMat, negativesFeatureMat), axis = 0)
positivesValidFeatureMat = np.loadtxt(validPositivesFileName)
negativesValidFeatureMat = np.loadtxt(validNegativesFileName)
validFeatureMat = np.concatenate((positivesValidFeatureMat, negativesValidFeatureMat), axis = 0)
trainLabelsVec = np.concatenate((np.ones(positivesFeatureMat.shape[0]), np.zeros(negativesFeatureMat.shape[0])))
validLabelsVec = np.concatenate((np.ones(positivesValidFeatureMat.shape[0]), np.zeros(negativesValidFeatureMat.shape[0])))
return [trainFeatureMat, validFeatureMat, trainLabelsVec, validLabelsVec]
def runRandomForestClassification(trainFeatureMat, validFeatureMat, trainLabelsVec, validLabelsVec):
# Run the random forest regression and record the results
print "Running classifier!"
model = RandomForestClassifier(n_estimators=100, criterion="gini", max_features=0.05, max_depth=10, min_samples_split=20, n_jobs=1)
model.fit(trainFeatureMat, trainLabelsVec)
trainingAccuracy = model.score(trainFeatureMat, trainLabelsVec)
validationAccuracy = model.score(validFeatureMat, validLabelsVec)
print trainingAccuracy
print validationAccuracy
#return model
if __name__=="__main__":
import sys
import random
import numpy as np
from sklearn.ensemble import RandomForestClassifier
positivesFileName = sys.argv[1]
negativesFileName = sys.argv[2]
validPositivesFileName = sys.argv[3]
validNegativesFileName = sys.argv[4]
#outputFileName = sys.argv[5]
[trainFeatureMat, validFeatureMat, trainLabelsVec, validLabelsVec] = makeRandomForestClassificationInputs(positivesFileName, negativesFileName, validPositivesFileName, validNegativesFileName)
#model = runExtraTreesClassification(trainFeatureMat, validFeatureMat, trainLabelsVec, validLabelsVec)
runRandomForestClassification(trainFeatureMat, validFeatureMat, trainLabelsVec, validLabelsVec)
| mit |
lheagy/simpegem | simpegEM/Utils/SrcUtils.py | 2 | 7432 | from SimPEG import *
from scipy.special import ellipk, ellipe
from scipy.constants import mu_0, pi
def MagneticDipoleVectorPotential(srcLoc, obsLoc, component, moment=1., dipoleMoment=(0., 0., 1.), mu = mu_0):
"""
Calculate the vector potential of a set of magnetic dipoles
at given locations 'ref. <http://en.wikipedia.org/wiki/Dipole#Magnetic_vector_potential>'
:param numpy.ndarray srcLoc: Location of the source(s) (x, y, z)
:param numpy.ndarray,SimPEG.Mesh obsLoc: Where the potentials will be calculated (x, y, z) or a SimPEG Mesh
:param str,list component: The component to calculate - 'x', 'y', or 'z' if an array, or grid type if mesh, can be a list
:param numpy.ndarray dipoleMoment: The vector dipole moment
:rtype: numpy.ndarray
:return: The vector potential each dipole at each observation location
"""
#TODO: break this out!
if type(component) in [list, tuple]:
out = range(len(component))
for i, comp in enumerate(component):
out[i] = MagneticDipoleVectorPotential(srcLoc, obsLoc, comp, dipoleMoment=dipoleMoment)
return np.concatenate(out)
if isinstance(obsLoc, Mesh.BaseMesh):
mesh = obsLoc
assert component in ['Ex','Ey','Ez','Fx','Fy','Fz'], "Components must be in: ['Ex','Ey','Ez','Fx','Fy','Fz']"
return MagneticDipoleVectorPotential(srcLoc, getattr(mesh,'grid'+component), component[1], dipoleMoment=dipoleMoment)
if component == 'x':
dimInd = 0
elif component == 'y':
dimInd = 1
elif component == 'z':
dimInd = 2
else:
raise ValueError('Invalid component')
srcLoc = np.atleast_2d(srcLoc)
obsLoc = np.atleast_2d(obsLoc)
dipoleMoment = np.atleast_2d(dipoleMoment)
nEdges = obsLoc.shape[0]
nSrc = srcLoc.shape[0]
m = np.array(dipoleMoment).repeat(nEdges, axis=0)
A = np.empty((nEdges, nSrc))
for i in range(nSrc):
dR = obsLoc - srcLoc[i, np.newaxis].repeat(nEdges, axis=0)
mCr = np.cross(m, dR)
r = np.sqrt((dR**2).sum(axis=1))
A[:, i] = +(mu/(4*pi)) * mCr[:,dimInd]/(r**3)
if nSrc == 1:
return A.flatten()
return A
def MagneticDipoleFields(srcLoc, obsLoc, component, moment=1., mu = mu_0):
"""
Calculate the vector potential of a set of magnetic dipoles
at given locations 'ref. <http://en.wikipedia.org/wiki/Dipole#Magnetic_vector_potential>'
:param numpy.ndarray srcLoc: Location of the source(s) (x, y, z)
:param numpy.ndarray obsLoc: Where the potentials will be calculated (x, y, z)
:param str component: The component to calculate - 'x', 'y', or 'z'
:param numpy.ndarray moment: The vector dipole moment (vertical)
:rtype: numpy.ndarray
:return: The vector potential each dipole at each observation location
"""
if component=='x':
dimInd = 0
elif component=='y':
dimInd = 1
elif component=='z':
dimInd = 2
else:
raise ValueError('Invalid component')
srcLoc = np.atleast_2d(srcLoc)
obsLoc = np.atleast_2d(obsLoc)
moment = np.atleast_2d(moment)
nFaces = obsLoc.shape[0]
nSrc = srcLoc.shape[0]
m = np.array(moment).repeat(nFaces, axis=0)
B = np.empty((nFaces, nSrc))
for i in range(nSrc):
dR = obsLoc - srcLoc[i, np.newaxis].repeat(nFaces, axis=0)
r = np.sqrt((dR**2).sum(axis=1))
if dimInd == 0:
B[:, i] = +(mu/(4*pi)) /(r**3) * (3*dR[:,2]*dR[:,0]/r**2)
elif dimInd == 1:
B[:, i] = +(mu/(4*pi)) /(r**3) * (3*dR[:,2]*dR[:,1]/r**2)
elif dimInd == 2:
B[:, i] = +(mu/(4*pi)) /(r**3) * (3*dR[:,2]**2/r**2-1)
else:
raise Exception("Not Implemented")
if nSrc == 1:
return B.flatten()
return B
def MagneticLoopVectorPotential(srcLoc, obsLoc, component, radius, mu=mu_0):
"""
Calculate the vector potential of horizontal circular loop
at given locations
:param numpy.ndarray srcLoc: Location of the source(s) (x, y, z)
:param numpy.ndarray,SimPEG.Mesh obsLoc: Where the potentials will be calculated (x, y, z) or a SimPEG Mesh
:param str,list component: The component to calculate - 'x', 'y', or 'z' if an array, or grid type if mesh, can be a list
:param numpy.ndarray I: Input current of the loop
:param numpy.ndarray radius: radius of the loop
:rtype: numpy.ndarray
:return: The vector potential each dipole at each observation location
"""
if type(component) in [list, tuple]:
out = range(len(component))
for i, comp in enumerate(component):
out[i] = MagneticLoopVectorPotential(srcLoc, obsLoc, comp, radius, mu)
return np.concatenate(out)
if isinstance(obsLoc, Mesh.BaseMesh):
mesh = obsLoc
assert component in ['Ex','Ey','Ez','Fx','Fy','Fz'], "Components must be in: ['Ex','Ey','Ez','Fx','Fy','Fz']"
return MagneticLoopVectorPotential(srcLoc, getattr(mesh,'grid'+component), component[1], radius, mu)
srcLoc = np.atleast_2d(srcLoc)
obsLoc = np.atleast_2d(obsLoc)
n = obsLoc.shape[0]
nSrc = srcLoc.shape[0]
if component=='z':
A = np.zeros((n, nSrc))
if nSrc ==1:
return A.flatten()
return A
else:
A = np.zeros((n, nSrc))
for i in range (nSrc):
x = obsLoc[:, 0] - srcLoc[i, 0]
y = obsLoc[:, 1] - srcLoc[i, 1]
z = obsLoc[:, 2] - srcLoc[i, 2]
r = np.sqrt(x**2 + y**2)
m = (4 * radius * r) / ((radius + r)**2 + z**2)
m[m > 1.] = 1.
# m might be slightly larger than 1 due to rounding errors
# but ellipke requires 0 <= m <= 1
K = ellipk(m)
E = ellipe(m)
ind = (r > 0) & (m < 1)
# % 1/r singular at r = 0 and K(m) singular at m = 1
Aphi = np.zeros(n)
# % Common factor is (mu * I) / pi with I = 1 and mu = 4e-7 * pi.
Aphi[ind] = 4e-7 / np.sqrt(m[ind]) * np.sqrt(radius / r[ind]) *((1. - m[ind] / 2.) * K[ind] - E[ind])
if component == 'x':
A[ind, i] = Aphi[ind] * (-y[ind] / r[ind] )
elif component == 'y':
A[ind, i] = Aphi[ind] * ( x[ind] / r[ind] )
else:
raise ValueError('Invalid component')
if nSrc == 1:
return A.flatten()
return A
if __name__ == '__main__':
from SimPEG import Mesh
import matplotlib.pyplot as plt
cs = 20
ncx, ncy, ncz = 41, 41, 40
hx = np.ones(ncx)*cs
hy = np.ones(ncy)*cs
hz = np.ones(ncz)*cs
mesh = Mesh.TensorMesh([hx, hy, hz], 'CCC')
srcLoc = np.r_[0., 0., 0.]
Ax = MagneticLoopVectorPotential(srcLoc, mesh.gridEx, 'x', 200)
Ay = MagneticLoopVectorPotential(srcLoc, mesh.gridEy, 'y', 200)
Az = MagneticLoopVectorPotential(srcLoc, mesh.gridEz, 'z', 200)
A = np.r_[Ax, Ay, Az]
B0 = mesh.edgeCurl*A
J0 = mesh.edgeCurl.T*B0
# mesh.plotImage(A, vType = 'Ex')
# mesh.plotImage(A, vType = 'Ey')
mesh.plotImage(B0, vType = 'Fx')
mesh.plotImage(B0, vType = 'Fy')
mesh.plotImage(B0, vType = 'Fz')
# # mesh.plotImage(J0, vType = 'Ex')
# mesh.plotImage(J0, vType = 'Ey')
# mesh.plotImage(J0, vType = 'Ez')
plt.show()
| mit |
luca76/QGIS | python/plugins/processing/algs/qgis/VectorLayerHistogram.py | 6 | 2835 | # -*- coding: utf-8 -*-
"""
***************************************************************************
EquivalentNumField.py
---------------------
Date : January 2013
Copyright : (C) 2013 by Victor Olaya
Email : volayaf at gmail dot com
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
__author__ = 'Victor Olaya'
__date__ = 'January 2013'
__copyright__ = '(C) 2013, Victor Olaya'
# This will get replaced with a git SHA1 when you do a git archive
__revision__ = '$Format:%H$'
import matplotlib.pyplot as plt
import matplotlib.pylab as lab
from PyQt4.QtCore import *
from qgis.core import *
from processing.core.GeoAlgorithm import GeoAlgorithm
from processing.parameters.ParameterVector import ParameterVector
from processing.parameters.ParameterTableField import ParameterTableField
from processing.parameters.ParameterNumber import ParameterNumber
from processing.outputs.OutputHTML import OutputHTML
from processing.tools import *
class VectorLayerHistogram(GeoAlgorithm):
INPUT = 'INPUT'
OUTPUT = 'OUTPUT'
FIELD = 'FIELD'
BINS = 'BINS'
def processAlgorithm(self, progress):
uri = self.getParameterValue(self.INPUT)
layer = getObjectFromUri(uri)
fieldname = self.getParameterValue(self.FIELD)
output = self.getOutputValue(self.OUTPUT)
values = vector.getAttributeValues(layer, fieldname)
plt.close()
bins = self.getParameterValue(self.BINS)
plt.hist(values[fieldname], bins)
plotFilename = output + '.png'
lab.savefig(plotFilename)
f = open(output, 'w')
f.write('<img src="' + plotFilename + '"/>')
f.close()
def defineCharacteristics(self):
self.name = 'Vector layer histogram'
self.group = 'Graphics'
self.addParameter(ParameterVector(self.INPUT, 'Input layer',
[ParameterVector.VECTOR_TYPE_ANY]))
self.addParameter(ParameterTableField(self.FIELD, 'Attribute',
self.INPUT, ParameterTableField.DATA_TYPE_NUMBER))
self.addParameter(ParameterNumber(self.BINS, 'number of bins', 2,
None, 10))
self.addOutput(OutputHTML(self.OUTPUT, 'Output'))
| gpl-2.0 |
YinongLong/scikit-learn | sklearn/cross_validation.py | 11 | 69870 |
"""
The :mod:`sklearn.cross_validation` module includes utilities for cross-
validation and performance evaluation.
"""
# Author: Alexandre Gramfort <alexandre.gramfort@inria.fr>,
# Gael Varoquaux <gael.varoquaux@normalesup.org>,
# Olivier Grisel <olivier.grisel@ensta.org>
# License: BSD 3 clause
from __future__ import print_function
from __future__ import division
import warnings
from itertools import chain, combinations
from math import ceil, floor, factorial
import numbers
import time
from abc import ABCMeta, abstractmethod
import numpy as np
import scipy.sparse as sp
from .base import is_classifier, clone
from .utils import indexable, check_random_state, safe_indexing
from .utils.validation import (_is_arraylike, _num_samples,
column_or_1d)
from .utils.multiclass import type_of_target
from .utils.random import choice
from .externals.joblib import Parallel, delayed, logger
from .externals.six import with_metaclass
from .externals.six.moves import zip
from .metrics.scorer import check_scoring
from .utils.fixes import bincount
from .gaussian_process.kernels import Kernel as GPKernel
from .exceptions import FitFailedWarning
warnings.warn("This module was deprecated in version 0.18 in favor of the "
"model_selection module into which all the refactored classes "
"and functions are moved. Also note that the interface of the "
"new CV iterators are different from that of this module. "
"This module will be removed in 0.20.", DeprecationWarning)
__all__ = ['KFold',
'LabelKFold',
'LeaveOneLabelOut',
'LeaveOneOut',
'LeavePLabelOut',
'LeavePOut',
'ShuffleSplit',
'StratifiedKFold',
'StratifiedShuffleSplit',
'PredefinedSplit',
'LabelShuffleSplit',
'check_cv',
'cross_val_score',
'cross_val_predict',
'permutation_test_score',
'train_test_split']
class _PartitionIterator(with_metaclass(ABCMeta)):
"""Base class for CV iterators where train_mask = ~test_mask
Implementations must define `_iter_test_masks` or `_iter_test_indices`.
Parameters
----------
n : int
Total number of elements in dataset.
"""
def __init__(self, n):
if abs(n - int(n)) >= np.finfo('f').eps:
raise ValueError("n must be an integer")
self.n = int(n)
def __iter__(self):
ind = np.arange(self.n)
for test_index in self._iter_test_masks():
train_index = np.logical_not(test_index)
train_index = ind[train_index]
test_index = ind[test_index]
yield train_index, test_index
# Since subclasses must implement either _iter_test_masks or
# _iter_test_indices, neither can be abstract.
def _iter_test_masks(self):
"""Generates boolean masks corresponding to test sets.
By default, delegates to _iter_test_indices()
"""
for test_index in self._iter_test_indices():
test_mask = self._empty_mask()
test_mask[test_index] = True
yield test_mask
def _iter_test_indices(self):
"""Generates integer indices corresponding to test sets."""
raise NotImplementedError
def _empty_mask(self):
return np.zeros(self.n, dtype=np.bool)
class LeaveOneOut(_PartitionIterator):
"""Leave-One-Out cross validation iterator.
Provides train/test indices to split data in train test sets. Each
sample is used once as a test set (singleton) while the remaining
samples form the training set.
Note: ``LeaveOneOut(n)`` is equivalent to ``KFold(n, n_folds=n)`` and
``LeavePOut(n, p=1)``.
Due to the high number of test sets (which is the same as the
number of samples) this cross validation method can be very costly.
For large datasets one should favor KFold, StratifiedKFold or
ShuffleSplit.
Read more in the :ref:`User Guide <cross_validation>`.
Parameters
----------
n : int
Total number of elements in dataset.
Examples
--------
>>> from sklearn import cross_validation
>>> X = np.array([[1, 2], [3, 4]])
>>> y = np.array([1, 2])
>>> loo = cross_validation.LeaveOneOut(2)
>>> len(loo)
2
>>> print(loo)
sklearn.cross_validation.LeaveOneOut(n=2)
>>> for train_index, test_index in loo:
... print("TRAIN:", train_index, "TEST:", test_index)
... X_train, X_test = X[train_index], X[test_index]
... y_train, y_test = y[train_index], y[test_index]
... print(X_train, X_test, y_train, y_test)
TRAIN: [1] TEST: [0]
[[3 4]] [[1 2]] [2] [1]
TRAIN: [0] TEST: [1]
[[1 2]] [[3 4]] [1] [2]
See also
--------
LeaveOneLabelOut for splitting the data according to explicit,
domain-specific stratification of the dataset.
"""
def _iter_test_indices(self):
return range(self.n)
def __repr__(self):
return '%s.%s(n=%i)' % (
self.__class__.__module__,
self.__class__.__name__,
self.n,
)
def __len__(self):
return self.n
class LeavePOut(_PartitionIterator):
"""Leave-P-Out cross validation iterator
Provides train/test indices to split data in train test sets. This results
in testing on all distinct samples of size p, while the remaining n - p
samples form the training set in each iteration.
Note: ``LeavePOut(n, p)`` is NOT equivalent to ``KFold(n, n_folds=n // p)``
which creates non-overlapping test sets.
Due to the high number of iterations which grows combinatorically with the
number of samples this cross validation method can be very costly. For
large datasets one should favor KFold, StratifiedKFold or ShuffleSplit.
Read more in the :ref:`User Guide <cross_validation>`.
Parameters
----------
n : int
Total number of elements in dataset.
p : int
Size of the test sets.
Examples
--------
>>> from sklearn import cross_validation
>>> X = np.array([[1, 2], [3, 4], [5, 6], [7, 8]])
>>> y = np.array([1, 2, 3, 4])
>>> lpo = cross_validation.LeavePOut(4, 2)
>>> len(lpo)
6
>>> print(lpo)
sklearn.cross_validation.LeavePOut(n=4, p=2)
>>> for train_index, test_index in lpo:
... print("TRAIN:", train_index, "TEST:", test_index)
... X_train, X_test = X[train_index], X[test_index]
... y_train, y_test = y[train_index], y[test_index]
TRAIN: [2 3] TEST: [0 1]
TRAIN: [1 3] TEST: [0 2]
TRAIN: [1 2] TEST: [0 3]
TRAIN: [0 3] TEST: [1 2]
TRAIN: [0 2] TEST: [1 3]
TRAIN: [0 1] TEST: [2 3]
"""
def __init__(self, n, p):
super(LeavePOut, self).__init__(n)
self.p = p
def _iter_test_indices(self):
for comb in combinations(range(self.n), self.p):
yield np.array(comb)
def __repr__(self):
return '%s.%s(n=%i, p=%i)' % (
self.__class__.__module__,
self.__class__.__name__,
self.n,
self.p,
)
def __len__(self):
return int(factorial(self.n) / factorial(self.n - self.p)
/ factorial(self.p))
class _BaseKFold(with_metaclass(ABCMeta, _PartitionIterator)):
"""Base class to validate KFold approaches"""
@abstractmethod
def __init__(self, n, n_folds, shuffle, random_state):
super(_BaseKFold, self).__init__(n)
if abs(n_folds - int(n_folds)) >= np.finfo('f').eps:
raise ValueError("n_folds must be an integer")
self.n_folds = n_folds = int(n_folds)
if n_folds <= 1:
raise ValueError(
"k-fold cross validation requires at least one"
" train / test split by setting n_folds=2 or more,"
" got n_folds={0}.".format(n_folds))
if n_folds > self.n:
raise ValueError(
("Cannot have number of folds n_folds={0} greater"
" than the number of samples: {1}.").format(n_folds, n))
if not isinstance(shuffle, bool):
raise TypeError("shuffle must be True or False;"
" got {0}".format(shuffle))
self.shuffle = shuffle
self.random_state = random_state
class KFold(_BaseKFold):
"""K-Folds cross validation iterator.
Provides train/test indices to split data in train test sets. Split
dataset into k consecutive folds (without shuffling by default).
Each fold is then used as a validation set once while the k - 1 remaining
fold(s) form the training set.
Read more in the :ref:`User Guide <cross_validation>`.
Parameters
----------
n : int
Total number of elements.
n_folds : int, default=3
Number of folds. Must be at least 2.
shuffle : boolean, optional
Whether to shuffle the data before splitting into batches.
random_state : None, int or RandomState
When shuffle=True, pseudo-random number generator state used for
shuffling. If None, use default numpy RNG for shuffling.
Examples
--------
>>> from sklearn.cross_validation import KFold
>>> X = np.array([[1, 2], [3, 4], [1, 2], [3, 4]])
>>> y = np.array([1, 2, 3, 4])
>>> kf = KFold(4, n_folds=2)
>>> len(kf)
2
>>> print(kf) # doctest: +NORMALIZE_WHITESPACE
sklearn.cross_validation.KFold(n=4, n_folds=2, shuffle=False,
random_state=None)
>>> for train_index, test_index in kf:
... print("TRAIN:", train_index, "TEST:", test_index)
... X_train, X_test = X[train_index], X[test_index]
... y_train, y_test = y[train_index], y[test_index]
TRAIN: [2 3] TEST: [0 1]
TRAIN: [0 1] TEST: [2 3]
Notes
-----
The first n % n_folds folds have size n // n_folds + 1, other folds have
size n // n_folds.
See also
--------
StratifiedKFold take label information into account to avoid building
folds with imbalanced class distributions (for binary or multiclass
classification tasks).
LabelKFold: K-fold iterator variant with non-overlapping labels.
"""
def __init__(self, n, n_folds=3, shuffle=False,
random_state=None):
super(KFold, self).__init__(n, n_folds, shuffle, random_state)
self.idxs = np.arange(n)
if shuffle:
rng = check_random_state(self.random_state)
rng.shuffle(self.idxs)
def _iter_test_indices(self):
n = self.n
n_folds = self.n_folds
fold_sizes = (n // n_folds) * np.ones(n_folds, dtype=np.int)
fold_sizes[:n % n_folds] += 1
current = 0
for fold_size in fold_sizes:
start, stop = current, current + fold_size
yield self.idxs[start:stop]
current = stop
def __repr__(self):
return '%s.%s(n=%i, n_folds=%i, shuffle=%s, random_state=%s)' % (
self.__class__.__module__,
self.__class__.__name__,
self.n,
self.n_folds,
self.shuffle,
self.random_state,
)
def __len__(self):
return self.n_folds
class LabelKFold(_BaseKFold):
"""K-fold iterator variant with non-overlapping labels.
The same label will not appear in two different folds (the number of
distinct labels has to be at least equal to the number of folds).
The folds are approximately balanced in the sense that the number of
distinct labels is approximately the same in each fold.
.. versionadded:: 0.17
Parameters
----------
labels : array-like with shape (n_samples, )
Contains a label for each sample.
The folds are built so that the same label does not appear in two
different folds.
n_folds : int, default=3
Number of folds. Must be at least 2.
Examples
--------
>>> from sklearn.cross_validation import LabelKFold
>>> X = np.array([[1, 2], [3, 4], [5, 6], [7, 8]])
>>> y = np.array([1, 2, 3, 4])
>>> labels = np.array([0, 0, 2, 2])
>>> label_kfold = LabelKFold(labels, n_folds=2)
>>> len(label_kfold)
2
>>> print(label_kfold)
sklearn.cross_validation.LabelKFold(n_labels=4, n_folds=2)
>>> for train_index, test_index in label_kfold:
... print("TRAIN:", train_index, "TEST:", test_index)
... X_train, X_test = X[train_index], X[test_index]
... y_train, y_test = y[train_index], y[test_index]
... print(X_train, X_test, y_train, y_test)
...
TRAIN: [0 1] TEST: [2 3]
[[1 2]
[3 4]] [[5 6]
[7 8]] [1 2] [3 4]
TRAIN: [2 3] TEST: [0 1]
[[5 6]
[7 8]] [[1 2]
[3 4]] [3 4] [1 2]
See also
--------
LeaveOneLabelOut for splitting the data according to explicit,
domain-specific stratification of the dataset.
"""
def __init__(self, labels, n_folds=3):
super(LabelKFold, self).__init__(len(labels), n_folds,
shuffle=False, random_state=None)
unique_labels, labels = np.unique(labels, return_inverse=True)
n_labels = len(unique_labels)
if n_folds > n_labels:
raise ValueError(
("Cannot have number of folds n_folds={0} greater"
" than the number of labels: {1}.").format(n_folds,
n_labels))
# Weight labels by their number of occurrences
n_samples_per_label = np.bincount(labels)
# Distribute the most frequent labels first
indices = np.argsort(n_samples_per_label)[::-1]
n_samples_per_label = n_samples_per_label[indices]
# Total weight of each fold
n_samples_per_fold = np.zeros(n_folds)
# Mapping from label index to fold index
label_to_fold = np.zeros(len(unique_labels))
# Distribute samples by adding the largest weight to the lightest fold
for label_index, weight in enumerate(n_samples_per_label):
lightest_fold = np.argmin(n_samples_per_fold)
n_samples_per_fold[lightest_fold] += weight
label_to_fold[indices[label_index]] = lightest_fold
self.idxs = label_to_fold[labels]
def _iter_test_indices(self):
for f in range(self.n_folds):
yield np.where(self.idxs == f)[0]
def __repr__(self):
return '{0}.{1}(n_labels={2}, n_folds={3})'.format(
self.__class__.__module__,
self.__class__.__name__,
self.n,
self.n_folds,
)
def __len__(self):
return self.n_folds
class StratifiedKFold(_BaseKFold):
"""Stratified K-Folds cross validation iterator
Provides train/test indices to split data in train test sets.
This cross-validation object is a variation of KFold that
returns stratified folds. The folds are made by preserving
the percentage of samples for each class.
Read more in the :ref:`User Guide <cross_validation>`.
Parameters
----------
y : array-like, [n_samples]
Samples to split in K folds.
n_folds : int, default=3
Number of folds. Must be at least 2.
shuffle : boolean, optional
Whether to shuffle each stratification of the data before splitting
into batches.
random_state : None, int or RandomState
When shuffle=True, pseudo-random number generator state used for
shuffling. If None, use default numpy RNG for shuffling.
Examples
--------
>>> from sklearn.cross_validation import StratifiedKFold
>>> X = np.array([[1, 2], [3, 4], [1, 2], [3, 4]])
>>> y = np.array([0, 0, 1, 1])
>>> skf = StratifiedKFold(y, n_folds=2)
>>> len(skf)
2
>>> print(skf) # doctest: +NORMALIZE_WHITESPACE
sklearn.cross_validation.StratifiedKFold(labels=[0 0 1 1], n_folds=2,
shuffle=False, random_state=None)
>>> for train_index, test_index in skf:
... print("TRAIN:", train_index, "TEST:", test_index)
... X_train, X_test = X[train_index], X[test_index]
... y_train, y_test = y[train_index], y[test_index]
TRAIN: [1 3] TEST: [0 2]
TRAIN: [0 2] TEST: [1 3]
Notes
-----
All the folds have size trunc(n_samples / n_folds), the last one has the
complementary.
See also
--------
LabelKFold: K-fold iterator variant with non-overlapping labels.
"""
def __init__(self, y, n_folds=3, shuffle=False,
random_state=None):
super(StratifiedKFold, self).__init__(
len(y), n_folds, shuffle, random_state)
y = np.asarray(y)
n_samples = y.shape[0]
unique_labels, y_inversed = np.unique(y, return_inverse=True)
label_counts = bincount(y_inversed)
min_labels = np.min(label_counts)
if np.all(self.n_folds > label_counts):
raise ValueError("All the n_labels for individual classes"
" are less than %d folds."
% (self.n_folds))
if self.n_folds > min_labels:
warnings.warn(("The least populated class in y has only %d"
" members, which is too few. The minimum"
" number of labels for any class cannot"
" be less than n_folds=%d."
% (min_labels, self.n_folds)), Warning)
# don't want to use the same seed in each label's shuffle
if self.shuffle:
rng = check_random_state(self.random_state)
else:
rng = self.random_state
# pre-assign each sample to a test fold index using individual KFold
# splitting strategies for each label so as to respect the
# balance of labels
per_label_cvs = [
KFold(max(c, self.n_folds), self.n_folds, shuffle=self.shuffle,
random_state=rng) for c in label_counts]
test_folds = np.zeros(n_samples, dtype=np.int)
for test_fold_idx, per_label_splits in enumerate(zip(*per_label_cvs)):
for label, (_, test_split) in zip(unique_labels, per_label_splits):
label_test_folds = test_folds[y == label]
# the test split can be too big because we used
# KFold(max(c, self.n_folds), self.n_folds) instead of
# KFold(c, self.n_folds) to make it possible to not crash even
# if the data is not 100% stratifiable for all the labels
# (we use a warning instead of raising an exception)
# If this is the case, let's trim it:
test_split = test_split[test_split < len(label_test_folds)]
label_test_folds[test_split] = test_fold_idx
test_folds[y == label] = label_test_folds
self.test_folds = test_folds
self.y = y
def _iter_test_masks(self):
for i in range(self.n_folds):
yield self.test_folds == i
def __repr__(self):
return '%s.%s(labels=%s, n_folds=%i, shuffle=%s, random_state=%s)' % (
self.__class__.__module__,
self.__class__.__name__,
self.y,
self.n_folds,
self.shuffle,
self.random_state,
)
def __len__(self):
return self.n_folds
class LeaveOneLabelOut(_PartitionIterator):
"""Leave-One-Label_Out cross-validation iterator
Provides train/test indices to split data according to a third-party
provided label. This label information can be used to encode arbitrary
domain specific stratifications of the samples as integers.
For instance the labels could be the year of collection of the samples
and thus allow for cross-validation against time-based splits.
Read more in the :ref:`User Guide <cross_validation>`.
Parameters
----------
labels : array-like of int with shape (n_samples,)
Arbitrary domain-specific stratification of the data to be used
to draw the splits.
Examples
--------
>>> from sklearn import cross_validation
>>> X = np.array([[1, 2], [3, 4], [5, 6], [7, 8]])
>>> y = np.array([1, 2, 1, 2])
>>> labels = np.array([1, 1, 2, 2])
>>> lol = cross_validation.LeaveOneLabelOut(labels)
>>> len(lol)
2
>>> print(lol)
sklearn.cross_validation.LeaveOneLabelOut(labels=[1 1 2 2])
>>> for train_index, test_index in lol:
... print("TRAIN:", train_index, "TEST:", test_index)
... X_train, X_test = X[train_index], X[test_index]
... y_train, y_test = y[train_index], y[test_index]
... print(X_train, X_test, y_train, y_test)
TRAIN: [2 3] TEST: [0 1]
[[5 6]
[7 8]] [[1 2]
[3 4]] [1 2] [1 2]
TRAIN: [0 1] TEST: [2 3]
[[1 2]
[3 4]] [[5 6]
[7 8]] [1 2] [1 2]
See also
--------
LabelKFold: K-fold iterator variant with non-overlapping labels.
"""
def __init__(self, labels):
super(LeaveOneLabelOut, self).__init__(len(labels))
# We make a copy of labels to avoid side-effects during iteration
self.labels = np.array(labels, copy=True)
self.unique_labels = np.unique(labels)
self.n_unique_labels = len(self.unique_labels)
def _iter_test_masks(self):
for i in self.unique_labels:
yield self.labels == i
def __repr__(self):
return '%s.%s(labels=%s)' % (
self.__class__.__module__,
self.__class__.__name__,
self.labels,
)
def __len__(self):
return self.n_unique_labels
class LeavePLabelOut(_PartitionIterator):
"""Leave-P-Label_Out cross-validation iterator
Provides train/test indices to split data according to a third-party
provided label. This label information can be used to encode arbitrary
domain specific stratifications of the samples as integers.
For instance the labels could be the year of collection of the samples
and thus allow for cross-validation against time-based splits.
The difference between LeavePLabelOut and LeaveOneLabelOut is that
the former builds the test sets with all the samples assigned to
``p`` different values of the labels while the latter uses samples
all assigned the same labels.
Read more in the :ref:`User Guide <cross_validation>`.
Parameters
----------
labels : array-like of int with shape (n_samples,)
Arbitrary domain-specific stratification of the data to be used
to draw the splits.
p : int
Number of samples to leave out in the test split.
Examples
--------
>>> from sklearn import cross_validation
>>> X = np.array([[1, 2], [3, 4], [5, 6]])
>>> y = np.array([1, 2, 1])
>>> labels = np.array([1, 2, 3])
>>> lpl = cross_validation.LeavePLabelOut(labels, p=2)
>>> len(lpl)
3
>>> print(lpl)
sklearn.cross_validation.LeavePLabelOut(labels=[1 2 3], p=2)
>>> for train_index, test_index in lpl:
... print("TRAIN:", train_index, "TEST:", test_index)
... X_train, X_test = X[train_index], X[test_index]
... y_train, y_test = y[train_index], y[test_index]
... print(X_train, X_test, y_train, y_test)
TRAIN: [2] TEST: [0 1]
[[5 6]] [[1 2]
[3 4]] [1] [1 2]
TRAIN: [1] TEST: [0 2]
[[3 4]] [[1 2]
[5 6]] [2] [1 1]
TRAIN: [0] TEST: [1 2]
[[1 2]] [[3 4]
[5 6]] [1] [2 1]
See also
--------
LabelKFold: K-fold iterator variant with non-overlapping labels.
"""
def __init__(self, labels, p):
# We make a copy of labels to avoid side-effects during iteration
super(LeavePLabelOut, self).__init__(len(labels))
self.labels = np.array(labels, copy=True)
self.unique_labels = np.unique(labels)
self.n_unique_labels = len(self.unique_labels)
self.p = p
def _iter_test_masks(self):
comb = combinations(range(self.n_unique_labels), self.p)
for idx in comb:
test_index = self._empty_mask()
idx = np.array(idx)
for l in self.unique_labels[idx]:
test_index[self.labels == l] = True
yield test_index
def __repr__(self):
return '%s.%s(labels=%s, p=%s)' % (
self.__class__.__module__,
self.__class__.__name__,
self.labels,
self.p,
)
def __len__(self):
return int(factorial(self.n_unique_labels) /
factorial(self.n_unique_labels - self.p) /
factorial(self.p))
class BaseShuffleSplit(with_metaclass(ABCMeta)):
"""Base class for ShuffleSplit and StratifiedShuffleSplit"""
def __init__(self, n, n_iter=10, test_size=0.1, train_size=None,
random_state=None):
self.n = n
self.n_iter = n_iter
self.test_size = test_size
self.train_size = train_size
self.random_state = random_state
self.n_train, self.n_test = _validate_shuffle_split(n, test_size,
train_size)
def __iter__(self):
for train, test in self._iter_indices():
yield train, test
return
@abstractmethod
def _iter_indices(self):
"""Generate (train, test) indices"""
class ShuffleSplit(BaseShuffleSplit):
"""Random permutation cross-validation iterator.
Yields indices to split data into training and test sets.
Note: contrary to other cross-validation strategies, random splits
do not guarantee that all folds will be different, although this is
still very likely for sizeable datasets.
Read more in the :ref:`User Guide <cross_validation>`.
Parameters
----------
n : int
Total number of elements in the dataset.
n_iter : int (default 10)
Number of re-shuffling & splitting iterations.
test_size : float (default 0.1), int, or None
If float, should be between 0.0 and 1.0 and represent the
proportion of the dataset to include in the test split. If
int, represents the absolute number of test samples. If None,
the value is automatically set to the complement of the train size.
train_size : float, int, or None (default is None)
If float, should be between 0.0 and 1.0 and represent the
proportion of the dataset to include in the train split. If
int, represents the absolute number of train samples. If None,
the value is automatically set to the complement of the test size.
random_state : int or RandomState
Pseudo-random number generator state used for random sampling.
Examples
--------
>>> from sklearn import cross_validation
>>> rs = cross_validation.ShuffleSplit(4, n_iter=3,
... test_size=.25, random_state=0)
>>> len(rs)
3
>>> print(rs)
... # doctest: +ELLIPSIS
ShuffleSplit(4, n_iter=3, test_size=0.25, ...)
>>> for train_index, test_index in rs:
... print("TRAIN:", train_index, "TEST:", test_index)
...
TRAIN: [3 1 0] TEST: [2]
TRAIN: [2 1 3] TEST: [0]
TRAIN: [0 2 1] TEST: [3]
>>> rs = cross_validation.ShuffleSplit(4, n_iter=3,
... train_size=0.5, test_size=.25, random_state=0)
>>> for train_index, test_index in rs:
... print("TRAIN:", train_index, "TEST:", test_index)
...
TRAIN: [3 1] TEST: [2]
TRAIN: [2 1] TEST: [0]
TRAIN: [0 2] TEST: [3]
"""
def _iter_indices(self):
rng = check_random_state(self.random_state)
for i in range(self.n_iter):
# random partition
permutation = rng.permutation(self.n)
ind_test = permutation[:self.n_test]
ind_train = permutation[self.n_test:self.n_test + self.n_train]
yield ind_train, ind_test
def __repr__(self):
return ('%s(%d, n_iter=%d, test_size=%s, '
'random_state=%s)' % (
self.__class__.__name__,
self.n,
self.n_iter,
str(self.test_size),
self.random_state,
))
def __len__(self):
return self.n_iter
def _validate_shuffle_split(n, test_size, train_size):
if test_size is None and train_size is None:
raise ValueError(
'test_size and train_size can not both be None')
if test_size is not None:
if np.asarray(test_size).dtype.kind == 'f':
if test_size >= 1.:
raise ValueError(
'test_size=%f should be smaller '
'than 1.0 or be an integer' % test_size)
elif np.asarray(test_size).dtype.kind == 'i':
if test_size >= n:
raise ValueError(
'test_size=%d should be smaller '
'than the number of samples %d' % (test_size, n))
else:
raise ValueError("Invalid value for test_size: %r" % test_size)
if train_size is not None:
if np.asarray(train_size).dtype.kind == 'f':
if train_size >= 1.:
raise ValueError("train_size=%f should be smaller "
"than 1.0 or be an integer" % train_size)
elif np.asarray(test_size).dtype.kind == 'f' and \
train_size + test_size > 1.:
raise ValueError('The sum of test_size and train_size = %f, '
'should be smaller than 1.0. Reduce '
'test_size and/or train_size.' %
(train_size + test_size))
elif np.asarray(train_size).dtype.kind == 'i':
if train_size >= n:
raise ValueError("train_size=%d should be smaller "
"than the number of samples %d" %
(train_size, n))
else:
raise ValueError("Invalid value for train_size: %r" % train_size)
if np.asarray(test_size).dtype.kind == 'f':
n_test = ceil(test_size * n)
elif np.asarray(test_size).dtype.kind == 'i':
n_test = float(test_size)
if train_size is None:
n_train = n - n_test
else:
if np.asarray(train_size).dtype.kind == 'f':
n_train = floor(train_size * n)
else:
n_train = float(train_size)
if test_size is None:
n_test = n - n_train
if n_train + n_test > n:
raise ValueError('The sum of train_size and test_size = %d, '
'should be smaller than the number of '
'samples %d. Reduce test_size and/or '
'train_size.' % (n_train + n_test, n))
return int(n_train), int(n_test)
def _approximate_mode(class_counts, n_draws, rng):
"""Computes approximate mode of multivariate hypergeometric.
This is an approximation to the mode of the multivariate
hypergeometric given by class_counts and n_draws.
It shouldn't be off by more than one.
It is the mostly likely outcome of drawing n_draws many
samples from the population given by class_counts.
Parameters
----------
class_counts : ndarray of int
Population per class.
n_draws : int
Number of draws (samples to draw) from the overall population.
rng : random state
Used to break ties.
Returns
-------
sampled_classes : ndarray of int
Number of samples drawn from each class.
np.sum(sampled_classes) == n_draws
"""
# this computes a bad approximation to the mode of the
# multivariate hypergeometric given by class_counts and n_draws
continuous = n_draws * class_counts / class_counts.sum()
# floored means we don't overshoot n_samples, but probably undershoot
floored = np.floor(continuous)
# we add samples according to how much "left over" probability
# they had, until we arrive at n_samples
need_to_add = int(n_draws - floored.sum())
if need_to_add > 0:
remainder = continuous - floored
values = np.sort(np.unique(remainder))[::-1]
# add according to remainder, but break ties
# randomly to avoid biases
for value in values:
inds, = np.where(remainder == value)
# if we need_to_add less than what's in inds
# we draw randomly from them.
# if we need to add more, we add them all and
# go to the next value
add_now = min(len(inds), need_to_add)
inds = choice(inds, size=add_now, replace=False, random_state=rng)
floored[inds] += 1
need_to_add -= add_now
if need_to_add == 0:
break
return floored.astype(np.int)
class StratifiedShuffleSplit(BaseShuffleSplit):
"""Stratified ShuffleSplit cross validation iterator
Provides train/test indices to split data in train test sets.
This cross-validation object is a merge of StratifiedKFold and
ShuffleSplit, which returns stratified randomized folds. The folds
are made by preserving the percentage of samples for each class.
Note: like the ShuffleSplit strategy, stratified random splits
do not guarantee that all folds will be different, although this is
still very likely for sizeable datasets.
Read more in the :ref:`User Guide <cross_validation>`.
Parameters
----------
y : array, [n_samples]
Labels of samples.
n_iter : int (default 10)
Number of re-shuffling & splitting iterations.
test_size : float (default 0.1), int, or None
If float, should be between 0.0 and 1.0 and represent the
proportion of the dataset to include in the test split. If
int, represents the absolute number of test samples. If None,
the value is automatically set to the complement of the train size.
train_size : float, int, or None (default is None)
If float, should be between 0.0 and 1.0 and represent the
proportion of the dataset to include in the train split. If
int, represents the absolute number of train samples. If None,
the value is automatically set to the complement of the test size.
random_state : int or RandomState
Pseudo-random number generator state used for random sampling.
Examples
--------
>>> from sklearn.cross_validation import StratifiedShuffleSplit
>>> X = np.array([[1, 2], [3, 4], [1, 2], [3, 4]])
>>> y = np.array([0, 0, 1, 1])
>>> sss = StratifiedShuffleSplit(y, 3, test_size=0.5, random_state=0)
>>> len(sss)
3
>>> print(sss) # doctest: +ELLIPSIS
StratifiedShuffleSplit(labels=[0 0 1 1], n_iter=3, ...)
>>> for train_index, test_index in sss:
... print("TRAIN:", train_index, "TEST:", test_index)
... X_train, X_test = X[train_index], X[test_index]
... y_train, y_test = y[train_index], y[test_index]
TRAIN: [1 2] TEST: [3 0]
TRAIN: [0 2] TEST: [1 3]
TRAIN: [0 2] TEST: [3 1]
"""
def __init__(self, y, n_iter=10, test_size=0.1, train_size=None,
random_state=None):
super(StratifiedShuffleSplit, self).__init__(
len(y), n_iter, test_size, train_size, random_state)
self.y = np.array(y)
self.classes, self.y_indices = np.unique(y, return_inverse=True)
n_cls = self.classes.shape[0]
if np.min(bincount(self.y_indices)) < 2:
raise ValueError("The least populated class in y has only 1"
" member, which is too few. The minimum"
" number of labels for any class cannot"
" be less than 2.")
if self.n_train < n_cls:
raise ValueError('The train_size = %d should be greater or '
'equal to the number of classes = %d' %
(self.n_train, n_cls))
if self.n_test < n_cls:
raise ValueError('The test_size = %d should be greater or '
'equal to the number of classes = %d' %
(self.n_test, n_cls))
def _iter_indices(self):
rng = check_random_state(self.random_state)
cls_count = bincount(self.y_indices)
for n in range(self.n_iter):
# if there are ties in the class-counts, we want
# to make sure to break them anew in each iteration
n_i = _approximate_mode(cls_count, self.n_train, rng)
class_counts_remaining = cls_count - n_i
t_i = _approximate_mode(class_counts_remaining, self.n_test, rng)
train = []
test = []
for i, _ in enumerate(self.classes):
permutation = rng.permutation(cls_count[i])
perm_indices_class_i = np.where(
(i == self.y_indices))[0][permutation]
train.extend(perm_indices_class_i[:n_i[i]])
test.extend(perm_indices_class_i[n_i[i]:n_i[i] + t_i[i]])
train = rng.permutation(train)
test = rng.permutation(test)
yield train, test
def __repr__(self):
return ('%s(labels=%s, n_iter=%d, test_size=%s, '
'random_state=%s)' % (
self.__class__.__name__,
self.y,
self.n_iter,
str(self.test_size),
self.random_state,
))
def __len__(self):
return self.n_iter
class PredefinedSplit(_PartitionIterator):
"""Predefined split cross validation iterator
Splits the data into training/test set folds according to a predefined
scheme. Each sample can be assigned to at most one test set fold, as
specified by the user through the ``test_fold`` parameter.
Read more in the :ref:`User Guide <cross_validation>`.
Parameters
----------
test_fold : "array-like, shape (n_samples,)
test_fold[i] gives the test set fold of sample i. A value of -1
indicates that the corresponding sample is not part of any test set
folds, but will instead always be put into the training fold.
Examples
--------
>>> from sklearn.cross_validation import PredefinedSplit
>>> X = np.array([[1, 2], [3, 4], [1, 2], [3, 4]])
>>> y = np.array([0, 0, 1, 1])
>>> ps = PredefinedSplit(test_fold=[0, 1, -1, 1])
>>> len(ps)
2
>>> print(ps) # doctest: +NORMALIZE_WHITESPACE +ELLIPSIS
sklearn.cross_validation.PredefinedSplit(test_fold=[ 0 1 -1 1])
>>> for train_index, test_index in ps:
... print("TRAIN:", train_index, "TEST:", test_index)
... X_train, X_test = X[train_index], X[test_index]
... y_train, y_test = y[train_index], y[test_index]
TRAIN: [1 2 3] TEST: [0]
TRAIN: [0 2] TEST: [1 3]
"""
def __init__(self, test_fold):
super(PredefinedSplit, self).__init__(len(test_fold))
self.test_fold = np.array(test_fold, dtype=np.int)
self.test_fold = column_or_1d(self.test_fold)
self.unique_folds = np.unique(self.test_fold)
self.unique_folds = self.unique_folds[self.unique_folds != -1]
def _iter_test_indices(self):
for f in self.unique_folds:
yield np.where(self.test_fold == f)[0]
def __repr__(self):
return '%s.%s(test_fold=%s)' % (
self.__class__.__module__,
self.__class__.__name__,
self.test_fold)
def __len__(self):
return len(self.unique_folds)
class LabelShuffleSplit(ShuffleSplit):
"""Shuffle-Labels-Out cross-validation iterator
Provides randomized train/test indices to split data according to a
third-party provided label. This label information can be used to encode
arbitrary domain specific stratifications of the samples as integers.
For instance the labels could be the year of collection of the samples
and thus allow for cross-validation against time-based splits.
The difference between LeavePLabelOut and LabelShuffleSplit is that
the former generates splits using all subsets of size ``p`` unique labels,
whereas LabelShuffleSplit generates a user-determined number of random
test splits, each with a user-determined fraction of unique labels.
For example, a less computationally intensive alternative to
``LeavePLabelOut(labels, p=10)`` would be
``LabelShuffleSplit(labels, test_size=10, n_iter=100)``.
Note: The parameters ``test_size`` and ``train_size`` refer to labels, and
not to samples, as in ShuffleSplit.
.. versionadded:: 0.17
Parameters
----------
labels : array, [n_samples]
Labels of samples
n_iter : int (default 5)
Number of re-shuffling and splitting iterations.
test_size : float (default 0.2), int, or None
If float, should be between 0.0 and 1.0 and represent the
proportion of the labels to include in the test split. If
int, represents the absolute number of test labels. If None,
the value is automatically set to the complement of the train size.
train_size : float, int, or None (default is None)
If float, should be between 0.0 and 1.0 and represent the
proportion of the labels to include in the train split. If
int, represents the absolute number of train labels. If None,
the value is automatically set to the complement of the test size.
random_state : int or RandomState
Pseudo-random number generator state used for random sampling.
"""
def __init__(self, labels, n_iter=5, test_size=0.2, train_size=None,
random_state=None):
classes, label_indices = np.unique(labels, return_inverse=True)
super(LabelShuffleSplit, self).__init__(
len(classes),
n_iter=n_iter,
test_size=test_size,
train_size=train_size,
random_state=random_state)
self.labels = labels
self.classes = classes
self.label_indices = label_indices
def __repr__(self):
return ('%s(labels=%s, n_iter=%d, test_size=%s, '
'random_state=%s)' % (
self.__class__.__name__,
self.labels,
self.n_iter,
str(self.test_size),
self.random_state,
))
def __len__(self):
return self.n_iter
def _iter_indices(self):
for label_train, label_test in super(LabelShuffleSplit,
self)._iter_indices():
# these are the indices of classes in the partition
# invert them into data indices
train = np.flatnonzero(np.in1d(self.label_indices, label_train))
test = np.flatnonzero(np.in1d(self.label_indices, label_test))
yield train, test
##############################################################################
def _index_param_value(X, v, indices):
"""Private helper function for parameter value indexing."""
if not _is_arraylike(v) or _num_samples(v) != _num_samples(X):
# pass through: skip indexing
return v
if sp.issparse(v):
v = v.tocsr()
return safe_indexing(v, indices)
def cross_val_predict(estimator, X, y=None, cv=None, n_jobs=1,
verbose=0, fit_params=None, pre_dispatch='2*n_jobs'):
"""Generate cross-validated estimates for each input data point
Read more in the :ref:`User Guide <cross_validation>`.
Parameters
----------
estimator : estimator object implementing 'fit' and 'predict'
The object to use to fit the data.
X : array-like
The data to fit. Can be, for example a list, or an array at least 2d.
y : array-like, optional, default: None
The target variable to try to predict in the case of
supervised learning.
cv : int, cross-validation generator or an iterable, optional
Determines the cross-validation splitting strategy.
Possible inputs for cv are:
- None, to use the default 3-fold cross-validation,
- integer, to specify the number of folds.
- An object to be used as a cross-validation generator.
- An iterable yielding train/test splits.
For integer/None inputs, if the estimator is a classifier and ``y`` is
either binary or multiclass, :class:`StratifiedKFold` is used. In all
other cases, :class:`KFold` is used.
Refer :ref:`User Guide <cross_validation>` for the various
cross-validation strategies that can be used here.
n_jobs : integer, optional
The number of CPUs to use to do the computation. -1 means
'all CPUs'.
verbose : integer, optional
The verbosity level.
fit_params : dict, optional
Parameters to pass to the fit method of the estimator.
pre_dispatch : int, or string, optional
Controls the number of jobs that get dispatched during parallel
execution. Reducing this number can be useful to avoid an
explosion of memory consumption when more jobs get dispatched
than CPUs can process. This parameter can be:
- None, in which case all the jobs are immediately
created and spawned. Use this for lightweight and
fast-running jobs, to avoid delays due to on-demand
spawning of the jobs
- An int, giving the exact number of total jobs that are
spawned
- A string, giving an expression as a function of n_jobs,
as in '2*n_jobs'
Returns
-------
preds : ndarray
This is the result of calling 'predict'
Examples
--------
>>> from sklearn import datasets, linear_model
>>> from sklearn.cross_validation import cross_val_predict
>>> diabetes = datasets.load_diabetes()
>>> X = diabetes.data[:150]
>>> y = diabetes.target[:150]
>>> lasso = linear_model.Lasso()
>>> y_pred = cross_val_predict(lasso, X, y)
"""
X, y = indexable(X, y)
cv = check_cv(cv, X, y, classifier=is_classifier(estimator))
# We clone the estimator to make sure that all the folds are
# independent, and that it is pickle-able.
parallel = Parallel(n_jobs=n_jobs, verbose=verbose,
pre_dispatch=pre_dispatch)
preds_blocks = parallel(delayed(_fit_and_predict)(clone(estimator), X, y,
train, test, verbose,
fit_params)
for train, test in cv)
preds = [p for p, _ in preds_blocks]
locs = np.concatenate([loc for _, loc in preds_blocks])
if not _check_is_partition(locs, _num_samples(X)):
raise ValueError('cross_val_predict only works for partitions')
inv_locs = np.empty(len(locs), dtype=int)
inv_locs[locs] = np.arange(len(locs))
# Check for sparse predictions
if sp.issparse(preds[0]):
preds = sp.vstack(preds, format=preds[0].format)
else:
preds = np.concatenate(preds)
return preds[inv_locs]
def _fit_and_predict(estimator, X, y, train, test, verbose, fit_params):
"""Fit estimator and predict values for a given dataset split.
Read more in the :ref:`User Guide <cross_validation>`.
Parameters
----------
estimator : estimator object implementing 'fit' and 'predict'
The object to use to fit the data.
X : array-like of shape at least 2D
The data to fit.
y : array-like, optional, default: None
The target variable to try to predict in the case of
supervised learning.
train : array-like, shape (n_train_samples,)
Indices of training samples.
test : array-like, shape (n_test_samples,)
Indices of test samples.
verbose : integer
The verbosity level.
fit_params : dict or None
Parameters that will be passed to ``estimator.fit``.
Returns
-------
preds : sequence
Result of calling 'estimator.predict'
test : array-like
This is the value of the test parameter
"""
# Adjust length of sample weights
fit_params = fit_params if fit_params is not None else {}
fit_params = dict([(k, _index_param_value(X, v, train))
for k, v in fit_params.items()])
X_train, y_train = _safe_split(estimator, X, y, train)
X_test, _ = _safe_split(estimator, X, y, test, train)
if y_train is None:
estimator.fit(X_train, **fit_params)
else:
estimator.fit(X_train, y_train, **fit_params)
preds = estimator.predict(X_test)
return preds, test
def _check_is_partition(locs, n):
"""Check whether locs is a reordering of the array np.arange(n)
Parameters
----------
locs : ndarray
integer array to test
n : int
number of expected elements
Returns
-------
is_partition : bool
True iff sorted(locs) is range(n)
"""
if len(locs) != n:
return False
hit = np.zeros(n, bool)
hit[locs] = True
if not np.all(hit):
return False
return True
def cross_val_score(estimator, X, y=None, scoring=None, cv=None, n_jobs=1,
verbose=0, fit_params=None, pre_dispatch='2*n_jobs'):
"""Evaluate a score by cross-validation
Read more in the :ref:`User Guide <cross_validation>`.
Parameters
----------
estimator : estimator object implementing 'fit'
The object to use to fit the data.
X : array-like
The data to fit. Can be, for example a list, or an array at least 2d.
y : array-like, optional, default: None
The target variable to try to predict in the case of
supervised learning.
scoring : string, callable or None, optional, default: None
A string (see model evaluation documentation) or
a scorer callable object / function with signature
``scorer(estimator, X, y)``.
cv : int, cross-validation generator or an iterable, optional
Determines the cross-validation splitting strategy.
Possible inputs for cv are:
- None, to use the default 3-fold cross-validation,
- integer, to specify the number of folds.
- An object to be used as a cross-validation generator.
- An iterable yielding train/test splits.
For integer/None inputs, if the estimator is a classifier and ``y`` is
either binary or multiclass, :class:`StratifiedKFold` is used. In all
other cases, :class:`KFold` is used.
Refer :ref:`User Guide <cross_validation>` for the various
cross-validation strategies that can be used here.
n_jobs : integer, optional
The number of CPUs to use to do the computation. -1 means
'all CPUs'.
verbose : integer, optional
The verbosity level.
fit_params : dict, optional
Parameters to pass to the fit method of the estimator.
pre_dispatch : int, or string, optional
Controls the number of jobs that get dispatched during parallel
execution. Reducing this number can be useful to avoid an
explosion of memory consumption when more jobs get dispatched
than CPUs can process. This parameter can be:
- None, in which case all the jobs are immediately
created and spawned. Use this for lightweight and
fast-running jobs, to avoid delays due to on-demand
spawning of the jobs
- An int, giving the exact number of total jobs that are
spawned
- A string, giving an expression as a function of n_jobs,
as in '2*n_jobs'
Returns
-------
scores : array of float, shape=(len(list(cv)),)
Array of scores of the estimator for each run of the cross validation.
Examples
--------
>>> from sklearn import datasets, linear_model
>>> from sklearn.cross_validation import cross_val_score
>>> diabetes = datasets.load_diabetes()
>>> X = diabetes.data[:150]
>>> y = diabetes.target[:150]
>>> lasso = linear_model.Lasso()
>>> print(cross_val_score(lasso, X, y)) # doctest: +ELLIPSIS
[ 0.33150734 0.08022311 0.03531764]
See Also
---------
:func:`sklearn.metrics.make_scorer`:
Make a scorer from a performance metric or loss function.
"""
X, y = indexable(X, y)
cv = check_cv(cv, X, y, classifier=is_classifier(estimator))
scorer = check_scoring(estimator, scoring=scoring)
# We clone the estimator to make sure that all the folds are
# independent, and that it is pickle-able.
parallel = Parallel(n_jobs=n_jobs, verbose=verbose,
pre_dispatch=pre_dispatch)
scores = parallel(delayed(_fit_and_score)(clone(estimator), X, y, scorer,
train, test, verbose, None,
fit_params)
for train, test in cv)
return np.array(scores)[:, 0]
def _fit_and_score(estimator, X, y, scorer, train, test, verbose,
parameters, fit_params, return_train_score=False,
return_parameters=False, error_score='raise'):
"""Fit estimator and compute scores for a given dataset split.
Parameters
----------
estimator : estimator object implementing 'fit'
The object to use to fit the data.
X : array-like of shape at least 2D
The data to fit.
y : array-like, optional, default: None
The target variable to try to predict in the case of
supervised learning.
scorer : callable
A scorer callable object / function with signature
``scorer(estimator, X, y)``.
train : array-like, shape (n_train_samples,)
Indices of training samples.
test : array-like, shape (n_test_samples,)
Indices of test samples.
verbose : integer
The verbosity level.
error_score : 'raise' (default) or numeric
Value to assign to the score if an error occurs in estimator fitting.
If set to 'raise', the error is raised. If a numeric value is given,
FitFailedWarning is raised. This parameter does not affect the refit
step, which will always raise the error.
parameters : dict or None
Parameters to be set on the estimator.
fit_params : dict or None
Parameters that will be passed to ``estimator.fit``.
return_train_score : boolean, optional, default: False
Compute and return score on training set.
return_parameters : boolean, optional, default: False
Return parameters that has been used for the estimator.
Returns
-------
train_score : float, optional
Score on training set, returned only if `return_train_score` is `True`.
test_score : float
Score on test set.
n_test_samples : int
Number of test samples.
scoring_time : float
Time spent for fitting and scoring in seconds.
parameters : dict or None, optional
The parameters that have been evaluated.
"""
if verbose > 1:
if parameters is None:
msg = ''
else:
msg = '%s' % (', '.join('%s=%s' % (k, v)
for k, v in parameters.items()))
print("[CV] %s %s" % (msg, (64 - len(msg)) * '.'))
# Adjust length of sample weights
fit_params = fit_params if fit_params is not None else {}
fit_params = dict([(k, _index_param_value(X, v, train))
for k, v in fit_params.items()])
if parameters is not None:
estimator.set_params(**parameters)
start_time = time.time()
X_train, y_train = _safe_split(estimator, X, y, train)
X_test, y_test = _safe_split(estimator, X, y, test, train)
try:
if y_train is None:
estimator.fit(X_train, **fit_params)
else:
estimator.fit(X_train, y_train, **fit_params)
except Exception as e:
if error_score == 'raise':
raise
elif isinstance(error_score, numbers.Number):
test_score = error_score
if return_train_score:
train_score = error_score
warnings.warn("Classifier fit failed. The score on this train-test"
" partition for these parameters will be set to %f. "
"Details: \n%r" % (error_score, e), FitFailedWarning)
else:
raise ValueError("error_score must be the string 'raise' or a"
" numeric value. (Hint: if using 'raise', please"
" make sure that it has been spelled correctly.)"
)
else:
test_score = _score(estimator, X_test, y_test, scorer)
if return_train_score:
train_score = _score(estimator, X_train, y_train, scorer)
scoring_time = time.time() - start_time
if verbose > 2:
msg += ", score=%f" % test_score
if verbose > 1:
end_msg = "%s -%s" % (msg, logger.short_format_time(scoring_time))
print("[CV] %s %s" % ((64 - len(end_msg)) * '.', end_msg))
ret = [train_score] if return_train_score else []
ret.extend([test_score, _num_samples(X_test), scoring_time])
if return_parameters:
ret.append(parameters)
return ret
def _safe_split(estimator, X, y, indices, train_indices=None):
"""Create subset of dataset and properly handle kernels."""
if hasattr(estimator, 'kernel') and callable(estimator.kernel) \
and not isinstance(estimator.kernel, GPKernel):
# cannot compute the kernel values with custom function
raise ValueError("Cannot use a custom kernel function. "
"Precompute the kernel matrix instead.")
if not hasattr(X, "shape"):
if getattr(estimator, "_pairwise", False):
raise ValueError("Precomputed kernels or affinity matrices have "
"to be passed as arrays or sparse matrices.")
X_subset = [X[idx] for idx in indices]
else:
if getattr(estimator, "_pairwise", False):
# X is a precomputed square kernel matrix
if X.shape[0] != X.shape[1]:
raise ValueError("X should be a square kernel matrix")
if train_indices is None:
X_subset = X[np.ix_(indices, indices)]
else:
X_subset = X[np.ix_(indices, train_indices)]
else:
X_subset = safe_indexing(X, indices)
if y is not None:
y_subset = safe_indexing(y, indices)
else:
y_subset = None
return X_subset, y_subset
def _score(estimator, X_test, y_test, scorer):
"""Compute the score of an estimator on a given test set."""
if y_test is None:
score = scorer(estimator, X_test)
else:
score = scorer(estimator, X_test, y_test)
if hasattr(score, 'item'):
try:
# e.g. unwrap memmapped scalars
score = score.item()
except ValueError:
# non-scalar?
pass
if not isinstance(score, numbers.Number):
raise ValueError("scoring must return a number, got %s (%s) instead."
% (str(score), type(score)))
return score
def _permutation_test_score(estimator, X, y, cv, scorer):
"""Auxiliary function for permutation_test_score"""
avg_score = []
for train, test in cv:
estimator.fit(X[train], y[train])
avg_score.append(scorer(estimator, X[test], y[test]))
return np.mean(avg_score)
def _shuffle(y, labels, random_state):
"""Return a shuffled copy of y eventually shuffle among same labels."""
if labels is None:
ind = random_state.permutation(len(y))
else:
ind = np.arange(len(labels))
for label in np.unique(labels):
this_mask = (labels == label)
ind[this_mask] = random_state.permutation(ind[this_mask])
return y[ind]
def check_cv(cv, X=None, y=None, classifier=False):
"""Input checker utility for building a CV in a user friendly way.
Parameters
----------
cv : int, cross-validation generator or an iterable, optional
Determines the cross-validation splitting strategy.
Possible inputs for cv are:
- None, to use the default 3-fold cross-validation,
- integer, to specify the number of folds.
- An object to be used as a cross-validation generator.
- An iterable yielding train/test splits.
For integer/None inputs, if classifier is True and ``y`` is binary or
multiclass, :class:`StratifiedKFold` is used. In all other cases,
:class:`KFold` is used.
Refer :ref:`User Guide <cross_validation>` for the various
cross-validation strategies that can be used here.
X : array-like
The data the cross-val object will be applied on.
y : array-like
The target variable for a supervised learning problem.
classifier : boolean optional
Whether the task is a classification task, in which case
stratified KFold will be used.
Returns
-------
checked_cv: a cross-validation generator instance.
The return value is guaranteed to be a cv generator instance, whatever
the input type.
"""
is_sparse = sp.issparse(X)
if cv is None:
cv = 3
if isinstance(cv, numbers.Integral):
if classifier:
if type_of_target(y) in ['binary', 'multiclass']:
cv = StratifiedKFold(y, cv)
else:
cv = KFold(_num_samples(y), cv)
else:
if not is_sparse:
n_samples = len(X)
else:
n_samples = X.shape[0]
cv = KFold(n_samples, cv)
return cv
def permutation_test_score(estimator, X, y, cv=None,
n_permutations=100, n_jobs=1, labels=None,
random_state=0, verbose=0, scoring=None):
"""Evaluate the significance of a cross-validated score with permutations
Read more in the :ref:`User Guide <cross_validation>`.
Parameters
----------
estimator : estimator object implementing 'fit'
The object to use to fit the data.
X : array-like of shape at least 2D
The data to fit.
y : array-like
The target variable to try to predict in the case of
supervised learning.
scoring : string, callable or None, optional, default: None
A string (see model evaluation documentation) or
a scorer callable object / function with signature
``scorer(estimator, X, y)``.
cv : int, cross-validation generator or an iterable, optional
Determines the cross-validation splitting strategy.
Possible inputs for cv are:
- None, to use the default 3-fold cross-validation,
- integer, to specify the number of folds.
- An object to be used as a cross-validation generator.
- An iterable yielding train/test splits.
For integer/None inputs, if the estimator is a classifier and ``y`` is
either binary or multiclass, :class:`StratifiedKFold` is used. In all
other cases, :class:`KFold` is used.
Refer :ref:`User Guide <cross_validation>` for the various
cross-validation strategies that can be used here.
n_permutations : integer, optional
Number of times to permute ``y``.
n_jobs : integer, optional
The number of CPUs to use to do the computation. -1 means
'all CPUs'.
labels : array-like of shape [n_samples] (optional)
Labels constrain the permutation among groups of samples with
a same label.
random_state : RandomState or an int seed (0 by default)
A random number generator instance to define the state of the
random permutations generator.
verbose : integer, optional
The verbosity level.
Returns
-------
score : float
The true score without permuting targets.
permutation_scores : array, shape (n_permutations,)
The scores obtained for each permutations.
pvalue : float
The returned value equals p-value if `scoring` returns bigger
numbers for better scores (e.g., accuracy_score). If `scoring` is
rather a loss function (i.e. when lower is better such as with
`mean_squared_error`) then this is actually the complement of the
p-value: 1 - p-value.
Notes
-----
This function implements Test 1 in:
Ojala and Garriga. Permutation Tests for Studying Classifier
Performance. The Journal of Machine Learning Research (2010)
vol. 11
"""
X, y = indexable(X, y)
cv = check_cv(cv, X, y, classifier=is_classifier(estimator))
scorer = check_scoring(estimator, scoring=scoring)
random_state = check_random_state(random_state)
# We clone the estimator to make sure that all the folds are
# independent, and that it is pickle-able.
score = _permutation_test_score(clone(estimator), X, y, cv, scorer)
permutation_scores = Parallel(n_jobs=n_jobs, verbose=verbose)(
delayed(_permutation_test_score)(
clone(estimator), X, _shuffle(y, labels, random_state), cv,
scorer)
for _ in range(n_permutations))
permutation_scores = np.array(permutation_scores)
pvalue = (np.sum(permutation_scores >= score) + 1.0) / (n_permutations + 1)
return score, permutation_scores, pvalue
permutation_test_score.__test__ = False # to avoid a pb with nosetests
def train_test_split(*arrays, **options):
"""Split arrays or matrices into random train and test subsets
Quick utility that wraps input validation and
``next(iter(ShuffleSplit(n_samples)))`` and application to input
data into a single call for splitting (and optionally subsampling)
data in a oneliner.
Read more in the :ref:`User Guide <cross_validation>`.
Parameters
----------
*arrays : sequence of indexables with same length / shape[0]
Allowed inputs are lists, numpy arrays, scipy-sparse
matrices or pandas dataframes.
test_size : float, int, or None (default is None)
If float, should be between 0.0 and 1.0 and represent the
proportion of the dataset to include in the test split. If
int, represents the absolute number of test samples. If None,
the value is automatically set to the complement of the train size.
If train size is also None, test size is set to 0.25.
train_size : float, int, or None (default is None)
If float, should be between 0.0 and 1.0 and represent the
proportion of the dataset to include in the train split. If
int, represents the absolute number of train samples. If None,
the value is automatically set to the complement of the test size.
random_state : int or RandomState
Pseudo-random number generator state used for random sampling.
stratify : array-like or None (default is None)
If not None, data is split in a stratified fashion, using this as
the labels array.
.. versionadded:: 0.17
*stratify* splitting
Returns
-------
splitting : list, length = 2 * len(arrays),
List containing train-test split of inputs.
.. versionadded:: 0.16
If the input is sparse, the output will be a
``scipy.sparse.csr_matrix``. Else, output type is the same as the
input type.
Examples
--------
>>> import numpy as np
>>> from sklearn.cross_validation import train_test_split
>>> X, y = np.arange(10).reshape((5, 2)), range(5)
>>> X
array([[0, 1],
[2, 3],
[4, 5],
[6, 7],
[8, 9]])
>>> list(y)
[0, 1, 2, 3, 4]
>>> X_train, X_test, y_train, y_test = train_test_split(
... X, y, test_size=0.33, random_state=42)
...
>>> X_train
array([[4, 5],
[0, 1],
[6, 7]])
>>> y_train
[2, 0, 3]
>>> X_test
array([[2, 3],
[8, 9]])
>>> y_test
[1, 4]
"""
n_arrays = len(arrays)
if n_arrays == 0:
raise ValueError("At least one array required as input")
test_size = options.pop('test_size', None)
train_size = options.pop('train_size', None)
random_state = options.pop('random_state', None)
stratify = options.pop('stratify', None)
if options:
raise TypeError("Invalid parameters passed: %s" % str(options))
if test_size is None and train_size is None:
test_size = 0.25
arrays = indexable(*arrays)
if stratify is not None:
cv = StratifiedShuffleSplit(stratify, test_size=test_size,
train_size=train_size,
random_state=random_state)
else:
n_samples = _num_samples(arrays[0])
cv = ShuffleSplit(n_samples, test_size=test_size,
train_size=train_size,
random_state=random_state)
train, test = next(iter(cv))
return list(chain.from_iterable((safe_indexing(a, train),
safe_indexing(a, test)) for a in arrays))
train_test_split.__test__ = False # to avoid a pb with nosetests
| bsd-3-clause |
pechersky/keras-molecules | download_dataset.py | 2 | 2215 | import os
import argparse
import urllib
import pandas
import tempfile
from progressbar import ProgressBar, Percentage, Bar, ETA, FileTransferSpeed
DEFAULTS = {
"chembl22": {
"uri": "ftp://ftp.ebi.ac.uk/pub/databases/chembl/ChEMBLdb/latest/chembl_22_chemreps.txt.gz",
"outfile": "data/chembl22.h5"
},
"zinc12": {
"uri": "http://zinc.docking.org/db/bysubset/13/13_prop.xls",
"outfile": "data/zinc12.h5"
}
}
def get_arguments():
parser = argparse.ArgumentParser(description = 'Download ChEMBL entries and convert them to input for preprocessing')
parser.add_argument('--dataset', type = str, help = "%s ...or specify your own --uri" % ", ".join(DEFAULTS.keys()))
parser.add_argument('--uri', type = str, help = 'URI to download ChEMBL entries from')
parser.add_argument('--outfile', type = str, help = 'Output file name')
args = parser.parse_args()
if args.dataset and args.dataset in DEFAULTS.keys():
uri = DEFAULTS[args.dataset]['uri']
outfile = args.outfile or DEFAULTS[args.dataset]['outfile']
elif args.dataset not in DEFAULTS.keys():
parser.error("Dataset %s unknown. Valid choices are: %s" % (args.dataset, ", ".join(DEFAULTS.keys())))
else:
uri = args.uri
outfile = args.outfile
if uri is None:
parser.error("You must choose either a known --dataset or provide a --uri and --outfile.")
sys.exit(1)
if outfile is None:
parser.error("You must provide an --outfile if using a custom --uri.")
sys.exit(1)
return (uri, outfile)
def main():
uri, outfile = get_arguments()
fd = tempfile.NamedTemporaryFile()
progress = ProgressBar(widgets=[Percentage(), ' ', Bar(), ' ', ETA(), ' ', FileTransferSpeed()])
def update(count, blockSize, totalSize):
if progress.maxval is None:
progress.maxval = totalSize
progress.start()
progress.update(min(count * blockSize, totalSize))
urllib.urlretrieve(uri, fd.name, reporthook = update)
df = pandas.read_csv(fd.name, delimiter = '\t')
df.to_hdf(outfile, 'table', format = 'table', data_columns = True)
if __name__ == '__main__':
main()
| mit |
jkthompson/nupic | examples/opf/tools/sp_plotter.py | 8 | 15763 | #! /usr/bin/env python
# ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
import sys
import os
import time
import copy
import csv
import numpy as np
from nupic.research import FDRCSpatial2
from nupic.bindings.math import GetNTAReal
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
realDType = GetNTAReal()
############################################################################
def generatePlot(outputs, origData):
""" Generates a table where each cell represent a frequency of pairs
as described below.
x coordinate is the % difference between input records (origData list),
y coordinate is the % difference between corresponding output records.
"""
PLOT_PRECISION = 100
distribMatrix = np.zeros((PLOT_PRECISION+1,PLOT_PRECISION+1))
outputSize = len(outputs)
for i in range(0,outputSize):
for j in range(i+1,outputSize):
in1 = outputs[i]
in2 = outputs[j]
dist = (abs(in1-in2) > 0.1)
intDist = int(dist.sum()/2+0.1)
orig1 = origData[i]
orig2 = origData[j]
origDist = (abs(orig1-orig2) > 0.1)
intOrigDist = int(origDist.sum()/2+0.1)
if intDist < 2 and intOrigDist > 10:
print 'Elements %d,%d has very small SP distance: %d' % (i, j, intDist)
print 'Input elements distance is %d' % intOrigDist
x = int(PLOT_PRECISION*intDist/40.0)
y = int(PLOT_PRECISION*intOrigDist/42.0)
if distribMatrix[x, y] < 0.1:
distribMatrix[x, y] = 3
else:
if distribMatrix[x, y] < 10:
distribMatrix[x, y] += 1
# Add some elements for the scale drawing
distribMatrix[4, 50] = 3
distribMatrix[4, 52] = 4
distribMatrix[4, 54] = 5
distribMatrix[4, 56] = 6
distribMatrix[4, 58] = 7
distribMatrix[4, 60] = 8
distribMatrix[4, 62] = 9
distribMatrix[4, 64] = 10
return distribMatrix
############################################################################
def generateRandomInput(numRecords, elemSize = 400, numSet = 42):
""" Generates a set of input record
Params:
numRecords - how many records to generate
elemSize - the size of each record (num 0s or 1s)
numSet - how many 1s in each record
Returns: a list of inputs
"""
inputs = []
for _ in xrange(numRecords):
input = np.zeros(elemSize, dtype=realDType)
for _ in range(0,numSet):
ind = np.random.random_integers(0, elemSize-1, 1)[0]
input[ind] = 1
while abs(input.sum() - numSet) > 0.1:
ind = np.random.random_integers(0, elemSize-1, 1)[0]
input[ind] = 1
inputs.append(input)
return inputs
############################################################################
def appendInputWithSimilarValues(inputs):
""" Creates an 'one-off' record for each record in the inputs. Appends new
records to the same inputs list.
"""
numInputs = len(inputs)
for i in xrange(numInputs):
input = inputs[i]
for j in xrange(len(input)-1):
if input[j] == 1 and input[j+1] == 0:
newInput = copy.deepcopy(input)
newInput[j] = 0
newInput[j+1] = 1
inputs.append(newInput)
break
############################################################################
def appendInputWithNSimilarValues(inputs, numNear = 10):
""" Creates a neighboring record for each record in the inputs and adds
new records at the end of the inputs list
"""
numInputs = len(inputs)
skipOne = False
for i in xrange(numInputs):
input = inputs[i]
numChanged = 0
newInput = copy.deepcopy(input)
for j in xrange(len(input)-1):
if skipOne:
skipOne = False
continue
if input[j] == 1 and input[j+1] == 0:
newInput[j] = 0
newInput[j+1] = 1
inputs.append(newInput)
newInput = copy.deepcopy(newInput)
#print input
#print newInput
numChanged += 1
skipOne = True
if numChanged == numNear:
break
############################################################################
def modifyBits(inputVal, maxChanges):
""" Modifies up to maxChanges number of bits in the inputVal
"""
changes = np.random.random_integers(0, maxChanges, 1)[0]
if changes == 0:
return inputVal
inputWidth = len(inputVal)
whatToChange = np.random.random_integers(0, 41, changes)
runningIndex = -1
numModsDone = 0
for i in xrange(inputWidth):
if numModsDone >= changes:
break
if inputVal[i] == 1:
runningIndex += 1
if runningIndex in whatToChange:
if i != 0 and inputVal[i-1] == 0:
inputVal[i-1] = 1
inputVal[i] = 0
return inputVal
############################################################################
def getRandomWithMods(inputSpace, maxChanges):
""" Returns a random selection from the inputSpace with randomly modified
up to maxChanges number of bits.
"""
size = len(inputSpace)
ind = np.random.random_integers(0, size-1, 1)[0]
value = copy.deepcopy(inputSpace[ind])
if maxChanges == 0:
return value
return modifyBits(value, maxChanges)
############################################################################
def testSP():
""" Run a SP test
"""
elemSize = 400
numSet = 42
addNear = True
numRecords = 2
wantPlot = True
poolPct = 0.5
itr = 1
doLearn = True
while numRecords < 3:
# Setup a SP
sp = FDRCSpatial2.FDRCSpatial2(
coincidencesShape=(2048, 1),
inputShape = (1, elemSize),
inputBorder = elemSize/2-1,
coincInputRadius = elemSize/2,
numActivePerInhArea = 40,
spVerbosity = 0,
stimulusThreshold = 0,
seed = 1,
coincInputPoolPct = poolPct,
globalInhibition = True
)
# Generate inputs using rand()
inputs = generateRandomInput(numRecords, elemSize, numSet)
if addNear:
# Append similar entries (distance of 1)
appendInputWithNSimilarValues(inputs, 42)
inputSize = len(inputs)
print 'Num random records = %d, inputs to process %d' % (numRecords, inputSize)
# Run a number of iterations, with learning on or off,
# retrieve results from the last iteration only
outputs = np.zeros((inputSize,2048))
numIter = 1
if doLearn:
numIter = itr
for iter in xrange(numIter):
for i in xrange(inputSize):
time.sleep(0.001)
if iter == numIter - 1:
outputs[i] = sp.compute(inputs[i], learn=doLearn, infer=False)
#print outputs[i].sum(), outputs[i]
else:
sp.compute(inputs[i], learn=doLearn, infer=False)
# Build a plot from the generated input and output and display it
distribMatrix = generatePlot(outputs, inputs)
# If we don't want a plot, just continue
if wantPlot:
plt.imshow(distribMatrix, origin='lower', interpolation = "nearest")
plt.ylabel('SP (2048/40) distance in %')
plt.xlabel('Input (400/42) distance in %')
title = 'SP distribution'
if doLearn:
title += ', leaning ON'
else:
title += ', learning OFF'
title += ', inputs = %d' % len(inputs)
title += ', iterations = %d' % numIter
title += ', poolPct =%f' % poolPct
plt.suptitle(title, fontsize=12)
plt.show()
#plt.savefig(os.path.join('~/Desktop/ExperimentResults/videos5', '%s' % numRecords))
#plt.clf()
numRecords += 1
return
############################################################################
def testSPNew():
""" New version of the test"""
elemSize = 400
numSet = 42
addNear = True
numRecords = 1000
wantPlot = False
poolPct = 0.5
itr = 5
pattern = [60, 1000]
doLearn = True
start = 1
learnIter = 0
noLearnIter = 0
numLearns = 0
numTests = 0
numIter = 1
numGroups = 1000
PLOT_PRECISION = 100.0
distribMatrix = np.zeros((PLOT_PRECISION+1,PLOT_PRECISION+1))
inputs = generateRandomInput(numGroups, elemSize, numSet)
# Setup a SP
sp = FDRCSpatial2.FDRCSpatial2(
coincidencesShape=(2048, 1),
inputShape = (1, elemSize),
inputBorder = elemSize/2-1,
coincInputRadius = elemSize/2,
numActivePerInhArea = 40,
spVerbosity = 0,
stimulusThreshold = 0,
synPermConnected = 0.12,
seed = 1,
coincInputPoolPct = poolPct,
globalInhibition = True
)
cleanPlot = False
for i in xrange(numRecords):
input1 = getRandomWithMods(inputs, 4)
if i % 2 == 0:
input2 = getRandomWithMods(inputs, 4)
else:
input2 = input1.copy()
input2 = modifyBits(input2, 21)
inDist = (abs(input1-input2) > 0.1)
intInDist = int(inDist.sum()/2+0.1)
#print intInDist
if start == 0:
doLearn = True
learnIter += 1
if learnIter == pattern[start]:
numLearns += 1
start = 1
noLearnIter = 0
elif start == 1:
doLearn = False
noLearnIter += 1
if noLearnIter == pattern[start]:
numTests += 1
start = 0
learnIter = 0
cleanPlot = True
output1 = sp.compute(input1, learn=doLearn, infer=False).copy()
output2 = sp.compute(input2, learn=doLearn, infer=False).copy()
time.sleep(0.001)
outDist = (abs(output1-output2) > 0.1)
intOutDist = int(outDist.sum()/2+0.1)
if not doLearn and intOutDist < 2 and intInDist > 10:
"""
sp.spVerbosity = 10
sp.compute(input1, learn=doLearn, infer=False)
sp.compute(input2, learn=doLearn, infer=False)
sp.spVerbosity = 0
print 'Elements has very small SP distance: %d' % intOutDist
print output1.nonzero()
print output2.nonzero()
print sp._firingBoostFactors[output1.nonzero()[0]]
print sp._synPermBoostFactors[output1.nonzero()[0]]
print 'Input elements distance is %d' % intInDist
print input1.nonzero()
print input2.nonzero()
sys.stdin.readline()
"""
if not doLearn:
x = int(PLOT_PRECISION*intOutDist/40.0)
y = int(PLOT_PRECISION*intInDist/42.0)
if distribMatrix[x, y] < 0.1:
distribMatrix[x, y] = 3
else:
if distribMatrix[x, y] < 10:
distribMatrix[x, y] += 1
#print i
# If we don't want a plot, just continue
if wantPlot and cleanPlot:
plt.imshow(distribMatrix, origin='lower', interpolation = "nearest")
plt.ylabel('SP (2048/40) distance in %')
plt.xlabel('Input (400/42) distance in %')
title = 'SP distribution'
#if doLearn:
# title += ', leaning ON'
#else:
# title += ', learning OFF'
title += ', learn sets = %d' % numLearns
title += ', test sets = %d' % numTests
title += ', iter = %d' % numIter
title += ', groups = %d' % numGroups
title += ', Pct =%f' % poolPct
plt.suptitle(title, fontsize=12)
#plt.show()
plt.savefig(os.path.join('~/Desktop/ExperimentResults/videosNew', '%s' % i))
plt.clf()
distribMatrix = np.zeros((PLOT_PRECISION+1,PLOT_PRECISION+1))
cleanPlot = False
############################################################################
def testSPFile():
""" Run test on the data file - the file has records previously encoded.
"""
spSize = 2048
spSet = 40
poolPct = 0.5
pattern = [50, 1000]
doLearn = True
PLOT_PRECISION = 100.0
distribMatrix = np.zeros((PLOT_PRECISION+1,PLOT_PRECISION+1))
inputs = []
#file = open('~/Desktop/ExperimentResults/sampleArtificial.csv', 'rb')
#elemSize = 400
#numSet = 42
#file = open('~/Desktop/ExperimentResults/sampleDataBasilOneField.csv', 'rb')
#elemSize = 499
#numSet = 7
outdir = '~/Desktop/ExperimentResults/Basil100x21'
inputFile = outdir+'.csv'
file = open(inputFile, 'rb')
elemSize = 100
numSet = 21
reader = csv.reader(file)
for row in reader:
input = np.array(map(float, row), dtype=realDType)
if len(input.nonzero()[0]) != numSet:
continue
inputs.append(input.copy())
file.close()
# Setup a SP
sp = FDRCSpatial2.FDRCSpatial2(
coincidencesShape=(spSize, 1),
inputShape = (1, elemSize),
inputBorder = (elemSize-1)/2,
coincInputRadius = elemSize/2,
numActivePerInhArea = spSet,
spVerbosity = 0,
stimulusThreshold = 0,
synPermConnected = 0.10,
seed = 1,
coincInputPoolPct = poolPct,
globalInhibition = True
)
cleanPlot = False
doLearn = False
print 'Finished reading file, inputs/outputs to process =', len(inputs)
size = len(inputs)
for iter in xrange(100):
print 'Iteration', iter
# Learn
if iter != 0:
for learnRecs in xrange(pattern[0]):
ind = np.random.random_integers(0, size-1, 1)[0]
sp.compute(inputs[ind], learn=True, infer=False)
# Test
for _ in xrange(pattern[1]):
rand1 = np.random.random_integers(0, size-1, 1)[0]
rand2 = np.random.random_integers(0, size-1, 1)[0]
output1 = sp.compute(inputs[rand1], learn=False, infer=True).copy()
output2 = sp.compute(inputs[rand2], learn=False, infer=True).copy()
outDist = (abs(output1-output2) > 0.1)
intOutDist = int(outDist.sum()/2+0.1)
inDist = (abs(inputs[rand1]-inputs[rand2]) > 0.1)
intInDist = int(inDist.sum()/2+0.1)
if intInDist != numSet or intOutDist != spSet:
print rand1, rand2, '-', intInDist, intOutDist
x = int(PLOT_PRECISION*intOutDist/spSet)
y = int(PLOT_PRECISION*intInDist/numSet)
if distribMatrix[x, y] < 0.1:
distribMatrix[x, y] = 3
else:
if distribMatrix[x, y] < 10:
distribMatrix[x, y] += 1
if True:
plt.imshow(distribMatrix, origin='lower', interpolation = "nearest")
plt.ylabel('SP (%d/%d) distance in pct' % (spSize, spSet))
plt.xlabel('Input (%d/%d) distance in pct' % (elemSize, numSet))
title = 'SP distribution'
title += ', iter = %d' % iter
title += ', Pct =%f' % poolPct
plt.suptitle(title, fontsize=12)
#plt.savefig(os.path.join('~/Desktop/ExperimentResults/videosArtData', '%s' % iter))
plt.savefig(os.path.join(outdir, '%s' % iter))
plt.clf()
distribMatrix = np.zeros((PLOT_PRECISION+1,PLOT_PRECISION+1))
############################################################################
if __name__ == '__main__':
np.random.seed(83)
#testSP()
#testSPNew()
testSPFile()
| gpl-3.0 |
satishgoda/bokeh | bokeh/sampledata/gapminder.py | 41 | 2655 | from __future__ import absolute_import
import pandas as pd
from os.path import join
import sys
from . import _data_dir
'''
This module provides a pandas DataFrame instance of four
of the datasets from gapminder.org.
These are read in from csvs that have been downloaded from Bokeh's
sample data on S3. But the original code that generated the csvs from the
raw gapminder data is available at the bottom of this file.
'''
data_dir = _data_dir()
datasets = [
'fertility',
'life_expectancy',
'population',
'regions',
]
for dataset in datasets:
filename = join(data_dir, 'gapminder_%s.csv' % dataset)
try:
setattr(
sys.modules[__name__],
dataset,
pd.read_csv(filename, index_col='Country')
)
except (IOError, OSError):
raise RuntimeError('Could not load gapminder data file "%s". Please execute bokeh.sampledata.download()' % filename)
__all__ = datasets
# ====================================================
# Original data is from Gapminder - www.gapminder.org.
# The google docs links are maintained by gapminder
# The following script was used to get the data from gapminder
# and process it into the csvs stored in bokeh's sampledata.
"""
population_url = "http://spreadsheets.google.com/pub?key=phAwcNAVuyj0XOoBL_n5tAQ&output=xls"
fertility_url = "http://spreadsheets.google.com/pub?key=phAwcNAVuyj0TAlJeCEzcGQ&output=xls"
life_expectancy_url = "http://spreadsheets.google.com/pub?key=tiAiXcrneZrUnnJ9dBU-PAw&output=xls"
regions_url = "https://docs.google.com/spreadsheets/d/1OxmGUNWeADbPJkQxVPupSOK5MbAECdqThnvyPrwG5Os/pub?gid=1&output=xls"
def _get_data(url):
# Get the data from the url and return only 1962 - 2013
df = pd.read_excel(url, index_col=0)
df = df.unstack().unstack()
df = df[(df.index >= 1964) & (df.index <= 2013)]
df = df.unstack().unstack()
return df
fertility_df = _get_data(fertility_url)
life_expectancy_df = _get_data(life_expectancy_url)
population_df = _get_data(population_url)
regions_df = pd.read_excel(regions_url, index_col=0)
# have common countries across all data
fertility_df = fertility_df.drop(fertility_df.index.difference(life_expectancy_df.index))
population_df = population_df.drop(population_df.index.difference(life_expectancy_df.index))
regions_df = regions_df.drop(regions_df.index.difference(life_expectancy_df.index))
fertility_df.to_csv('gapminder_fertility.csv')
population_df.to_csv('gapminder_population.csv')
life_expectancy_df.to_csv('gapminder_life_expectancy.csv')
regions_df.to_csv('gapminder_regions.csv')
"""
# ======================================================
| bsd-3-clause |
wibeasley/PyCap | test/test_project.py | 3 | 11950 | #! /usr/bin/env python
import unittest
from redcap import Project, RedcapError
skip_pd = False
try:
import pandas as pd
except ImportError:
skip_pd = True
class ProjectTests(unittest.TestCase):
"""docstring for ProjectTests"""
url = 'https://redcap.vanderbilt.edu/api/'
bad_url = 'https://redcap.vanderbilt.edu/api'
reg_token = '8E66DB6844D58E990075AFB51658A002'
long_proj = Project(url, '1387872621BBF1C17CC47FD8AE25FF54')
reg_proj = Project(url, reg_token)
ssl_proj = Project(url, reg_token, verify_ssl=False)
survey_proj = Project(url, '37CAB1ABC2FEB3BB9D821DF13BA38A7B')
def setUp(self):
pass
def tearDown(self):
pass
def test_good_init(self):
"""Ensure basic instantiation """
self.assertIsInstance(self.long_proj, Project)
self.assertIsInstance(self.reg_proj, Project)
self.assertIsInstance(self.ssl_proj, Project)
def test_normal_attrs(self):
"""Ensure projects are created with all normal attrs"""
for attr in ('metadata', 'field_names', 'field_labels', 'forms',
'events', 'arm_names', 'arm_nums', 'def_field'):
self.assertTrue(hasattr(self.reg_proj, attr))
def test_long_attrs(self):
"proj.events/arm_names/arm_nums should not be empty in long projects"
self.assertIsNotNone(self.long_proj.events)
self.assertIsNotNone(self.long_proj.arm_names)
self.assertIsNotNone(self.long_proj.arm_nums)
def test_is_longitudinal(self):
"Test the is_longitudinal method"
self.assertFalse(self.reg_proj.is_longitudinal())
self.assertTrue(self.long_proj.is_longitudinal())
def test_regular_attrs(self):
"""proj.events/arm_names/arm_nums should be empty tuples"""
for attr in 'events', 'arm_names', 'arm_nums':
attr_obj = getattr(self.reg_proj, attr)
self.assertIsNotNone(attr_obj)
self.assertEqual(len(attr_obj), 0)
def test_json_export(self):
""" Make sure we get a list of dicts"""
data = self.reg_proj.export_records()
self.assertIsInstance(data, list)
for record in data:
self.assertIsInstance(record, dict)
def test_long_export(self):
"""After determining a unique event name, make sure we get a
list of dicts"""
unique_event = self.long_proj.events[0]['unique_event_name']
data = self.long_proj.export_records(events=[unique_event])
self.assertIsInstance(data, list)
for record in data:
self.assertIsInstance(record, dict)
def test_import_records(self):
"Test record import"
data = self.reg_proj.export_records()
response = self.reg_proj.import_records(data)
self.assertIn('count', response)
self.assertNotIn('error', response)
def test_import_exception(self):
"Test record import throws RedcapError for bad import"
data = self.reg_proj.export_records()
data[0]['non_existent_key'] = 'foo'
with self.assertRaises(RedcapError) as cm:
self.reg_proj.import_records(data)
exc = cm.exception
self.assertIn('error', exc.args[0])
def is_good_csv(self, csv_string):
"Helper to test csv strings"
return isinstance(csv_string, basestring)
def test_csv_export(self):
"""Test valid csv export """
csv = self.reg_proj.export_records(format='csv')
self.assertTrue(self.is_good_csv(csv))
def test_metadata_export(self):
"""Test valid metadata csv export"""
csv = self.reg_proj.export_metadata(format='csv')
self.assertTrue(self.is_good_csv(csv))
def test_bad_creds(self):
"Test that exceptions are raised with bad URL or tokens"
with self.assertRaises(RedcapError):
Project(self.bad_url, self.reg_token)
with self.assertRaises(RedcapError):
Project(self.url, '1')
def test_fem_export(self):
""" Test fem export in json format gives list of dicts"""
fem = self.long_proj.export_fem(format='json')
self.assertIsInstance(fem, list)
for arm in fem:
self.assertIsInstance(arm, dict)
def test_file_export(self):
"""Test file export and proper content-type parsing"""
record, field = '1', 'file'
#Upload first to make sure file is there
self.import_file()
# Now export it
content, headers = self.reg_proj.export_file(record, field)
self.assertIsInstance(content, basestring)
# We should at least get the filename in the headers
for key in ['name']:
self.assertIn(key, headers)
# needs to raise ValueError for exporting non-file fields
with self.assertRaises(ValueError):
self.reg_proj.export_file(record=record, field='dob')
# Delete and make sure we get an RedcapError with next export
self.reg_proj.delete_file(record, field)
with self.assertRaises(RedcapError):
self.reg_proj.export_file(record, field)
def import_file(self):
upload_fname = self.upload_fname()
with open(upload_fname, 'r') as fobj:
response = self.reg_proj.import_file('1', 'file', upload_fname, fobj)
return response
def upload_fname(self):
import os
this_dir, this_fname = os.path.split(__file__)
return os.path.join(this_dir, 'data.txt')
def test_file_import(self):
"Test file import"
# Make sure a well-formed request doesn't throw RedcapError
try:
response = self.import_file()
except RedcapError:
self.fail("Shouldn't throw RedcapError for successful imports")
self.assertTrue('error' not in response)
# Test importing a file to a non-file field raises a ValueError
fname = self.upload_fname()
with open(fname, 'r') as fobj:
with self.assertRaises(ValueError):
response = self.reg_proj.import_file('1', 'first_name',
fname, fobj)
def test_file_delete(self):
"Test file deletion"
# upload a file
fname = self.upload_fname()
with open(fname, 'r') as fobj:
self.reg_proj.import_file('1', 'file', fname, fobj)
# make sure deleting doesn't raise
try:
self.reg_proj.delete_file('1', 'file')
except RedcapError:
self.fail("Shouldn't throw RedcapError for successful deletes")
def test_user_export(self):
"Test user export"
users = self.reg_proj.export_users()
# A project must have at least one user
self.assertTrue(len(users) > 0)
req_keys = ['firstname', 'lastname', 'email', 'username',
'expiration', 'data_access_group', 'data_export',
'forms']
for user in users:
for key in req_keys:
self.assertIn(key, user)
def test_verify_ssl(self):
"""Test argument making for SSL verification"""
# Test we won't verify SSL cert for non-verified project
post_kwargs = self.ssl_proj._kwargs()
self.assertIn('verify', post_kwargs)
self.assertFalse(post_kwargs['verify'])
# Test we do verify SSL cert in normal project
post_kwargs = self.reg_proj._kwargs()
self.assertIn('verify', post_kwargs)
self.assertTrue(post_kwargs['verify'])
def test_export_data_access_groups(self):
"""Test we get 'redcap_data_access_group' in exported data"""
records = self.reg_proj.export_records(export_data_access_groups=True)
for record in records:
self.assertIn('redcap_data_access_group', record)
# When not passed, that key shouldn't be there
records = self.reg_proj.export_records()
for record in records:
self.assertNotIn('redcap_data_access_group', record)
def test_export_survey_fields(self):
"""Test that we get the appropriate survey keys in the exported
data.
Note that the 'demographics' form has been setup as the survey
in the `survey_proj` project. The _timestamp field will vary for
users as their survey form will be named differently"""
records = self.survey_proj.export_records(export_survey_fields=True)
for record in records:
self.assertIn('redcap_survey_identifier', record)
self.assertIn('demographics_timestamp', record)
# The regular project doesn't have a survey setup. Users should
# be able this argument as True but it winds up a no-op.
records = self.reg_proj.export_records(export_survey_fields=True)
for record in records:
self.assertNotIn('redcap_survey_identifier', record)
self.assertNotIn('demographics_timestamp', record)
@unittest.skipIf(skip_pd, "Couldn't import pandas")
def test_metadata_to_df(self):
"""Test metadata export --> DataFrame"""
df = self.reg_proj.export_metadata(format='df')
self.assertIsInstance(df, pd.DataFrame)
@unittest.skipIf(skip_pd, "Couldn't import pandas")
def test_export_to_df(self):
"""Test export --> DataFrame"""
df = self.reg_proj.export_records(format='df')
self.assertIsInstance(df, pd.DataFrame)
# Test it's a normal index
self.assertTrue(hasattr(df.index, 'name'))
# Test for a MultiIndex on longitudinal df
long_df = self.long_proj.export_records(format='df', event_name='raw')
self.assertTrue(hasattr(long_df.index, 'names'))
@unittest.skipIf(skip_pd, "Couldn't import pandas")
def test_export_df_kwargs(self):
"""Test passing kwargs to export DataFrame construction"""
df = self.reg_proj.export_records(format='df',
df_kwargs={'index_col': 'first_name'})
self.assertEqual(df.index.name, 'first_name')
self.assertTrue('study_id' in df)
@unittest.skipIf(skip_pd, "Couldn't import pandas")
def test_metadata_df_kwargs(self):
"""Test passing kwargs to metadata DataFrame construction"""
df = self.reg_proj.export_metadata(format='df',
df_kwargs={'index_col': 'field_label'})
self.assertEqual(df.index.name, 'field_label')
self.assertTrue('field_name' in df)
@unittest.skipIf(skip_pd, "Couldn't import pandas")
def test_import_dataframe(self):
"""Test importing a pandas.DataFrame"""
df = self.reg_proj.export_records(format='df')
# grrr coerce implicilty converted floats to str(int())
for col in ['matrix1', 'matrix2', 'matrix3', 'sex']:
df[col] = map(lambda x: str(int(x)) if pd.notnull(x) else '', df[col])
response = self.reg_proj.import_records(df)
self.assertIn('count', response)
self.assertNotIn('error', response)
long_df = self.long_proj.export_records(event_name='raw', format='df')
response = self.long_proj.import_records(long_df)
self.assertIn('count', response)
self.assertNotIn('error', response)
def test_date_formatting(self):
"""Test date_format parameter"""
def import_factory(date_string):
return [{'study_id': '1',
'dob': date_string}]
# Default YMD with dashes
import_ymd = import_factory('2000-01-01')
response = self.reg_proj.import_records(import_ymd)
self.assertEqual(response['count'], 1)
# DMY with /
import_dmy = import_factory('31/01/2000')
response = self.reg_proj.import_records(import_dmy, date_format='DMY')
self.assertEqual(response['count'], 1)
import_mdy = import_factory('12/31/2000')
response = self.reg_proj.import_records(import_mdy, date_format='MDY')
self.assertEqual(response['count'], 1)
| mit |
ChinaQuants/bokeh | examples/plotting/file/elements.py | 43 | 1485 | import pandas as pd
from bokeh.plotting import figure, show, output_file
from bokeh.sampledata import periodic_table
elements = periodic_table.elements
elements = elements[elements["atomic number"] <= 82]
elements = elements[~pd.isnull(elements["melting point"])]
mass = [float(x.strip("[]")) for x in elements["atomic mass"]]
elements["atomic mass"] = mass
palette = list(reversed([
"#67001f","#b2182b","#d6604d","#f4a582","#fddbc7","#f7f7f7","#d1e5f0","#92c5de","#4393c3","#2166ac","#053061"
]))
melting_points = elements["melting point"]
low = min(melting_points)
high= max(melting_points)
melting_point_inds = [int(10*(x-low)/(high-low)) for x in melting_points] #gives items in colors a value from 0-10
meltingpointcolors = [palette[i] for i in melting_point_inds]
output_file("elements.html", title="elements.py example")
TOOLS = "pan,wheel_zoom,box_zoom,reset,resize,save"
p = figure(tools=TOOLS, toolbar_location="left", logo="grey", plot_width=1200)
p.title = "Density vs Atomic Weight of Elements (colored by melting point)"
p.background_fill= "#cccccc"
p.circle(elements["atomic mass"], elements["density"], size=12,
color=meltingpointcolors, line_color="black", fill_alpha=0.8)
p.text(elements["atomic mass"], elements["density"]+0.3,
text=elements["symbol"],text_color="#333333",
text_align="center", text_font_size="10pt")
p.xaxis.axis_label="atomic weight (amu)"
p.yaxis.axis_label="density (g/cm^3)"
p.grid.grid_line_color="white"
show(p)
| bsd-3-clause |
pianomania/scikit-learn | sklearn/cluster/mean_shift_.py | 42 | 15514 | """Mean shift clustering algorithm.
Mean shift clustering aims to discover *blobs* in a smooth density of
samples. It is a centroid based algorithm, which works by updating candidates
for centroids to be the mean of the points within a given region. These
candidates are then filtered in a post-processing stage to eliminate
near-duplicates to form the final set of centroids.
Seeding is performed using a binning technique for scalability.
"""
# Authors: Conrad Lee <conradlee@gmail.com>
# Alexandre Gramfort <alexandre.gramfort@inria.fr>
# Gael Varoquaux <gael.varoquaux@normalesup.org>
# Martino Sorbaro <martino.sorbaro@ed.ac.uk>
import numpy as np
import warnings
from collections import defaultdict
from ..externals import six
from ..utils.validation import check_is_fitted
from ..utils import extmath, check_random_state, gen_batches, check_array
from ..base import BaseEstimator, ClusterMixin
from ..neighbors import NearestNeighbors
from ..metrics.pairwise import pairwise_distances_argmin
from ..externals.joblib import Parallel
from ..externals.joblib import delayed
def estimate_bandwidth(X, quantile=0.3, n_samples=None, random_state=0,
n_jobs=1):
"""Estimate the bandwidth to use with the mean-shift algorithm.
That this function takes time at least quadratic in n_samples. For large
datasets, it's wise to set that parameter to a small value.
Parameters
----------
X : array-like, shape=[n_samples, n_features]
Input points.
quantile : float, default 0.3
should be between [0, 1]
0.5 means that the median of all pairwise distances is used.
n_samples : int, optional
The number of samples to use. If not given, all samples are used.
random_state : int or RandomState
Pseudo-random number generator state used for random sampling.
n_jobs : int, optional (default = 1)
The number of parallel jobs to run for neighbors search.
If ``-1``, then the number of jobs is set to the number of CPU cores.
Returns
-------
bandwidth : float
The bandwidth parameter.
"""
random_state = check_random_state(random_state)
if n_samples is not None:
idx = random_state.permutation(X.shape[0])[:n_samples]
X = X[idx]
nbrs = NearestNeighbors(n_neighbors=int(X.shape[0] * quantile),
n_jobs=n_jobs)
nbrs.fit(X)
bandwidth = 0.
for batch in gen_batches(len(X), 500):
d, _ = nbrs.kneighbors(X[batch, :], return_distance=True)
bandwidth += np.max(d, axis=1).sum()
return bandwidth / X.shape[0]
# separate function for each seed's iterative loop
def _mean_shift_single_seed(my_mean, X, nbrs, max_iter):
# For each seed, climb gradient until convergence or max_iter
bandwidth = nbrs.get_params()['radius']
stop_thresh = 1e-3 * bandwidth # when mean has converged
completed_iterations = 0
while True:
# Find mean of points within bandwidth
i_nbrs = nbrs.radius_neighbors([my_mean], bandwidth,
return_distance=False)[0]
points_within = X[i_nbrs]
if len(points_within) == 0:
break # Depending on seeding strategy this condition may occur
my_old_mean = my_mean # save the old mean
my_mean = np.mean(points_within, axis=0)
# If converged or at max_iter, adds the cluster
if (extmath.norm(my_mean - my_old_mean) < stop_thresh or
completed_iterations == max_iter):
return tuple(my_mean), len(points_within)
completed_iterations += 1
def mean_shift(X, bandwidth=None, seeds=None, bin_seeding=False,
min_bin_freq=1, cluster_all=True, max_iter=300,
n_jobs=1):
"""Perform mean shift clustering of data using a flat kernel.
Read more in the :ref:`User Guide <mean_shift>`.
Parameters
----------
X : array-like, shape=[n_samples, n_features]
Input data.
bandwidth : float, optional
Kernel bandwidth.
If bandwidth is not given, it is determined using a heuristic based on
the median of all pairwise distances. This will take quadratic time in
the number of samples. The sklearn.cluster.estimate_bandwidth function
can be used to do this more efficiently.
seeds : array-like, shape=[n_seeds, n_features] or None
Point used as initial kernel locations. If None and bin_seeding=False,
each data point is used as a seed. If None and bin_seeding=True,
see bin_seeding.
bin_seeding : boolean, default=False
If true, initial kernel locations are not locations of all
points, but rather the location of the discretized version of
points, where points are binned onto a grid whose coarseness
corresponds to the bandwidth. Setting this option to True will speed
up the algorithm because fewer seeds will be initialized.
Ignored if seeds argument is not None.
min_bin_freq : int, default=1
To speed up the algorithm, accept only those bins with at least
min_bin_freq points as seeds.
cluster_all : boolean, default True
If true, then all points are clustered, even those orphans that are
not within any kernel. Orphans are assigned to the nearest kernel.
If false, then orphans are given cluster label -1.
max_iter : int, default 300
Maximum number of iterations, per seed point before the clustering
operation terminates (for that seed point), if has not converged yet.
n_jobs : int
The number of jobs to use for the computation. This works by computing
each of the n_init runs in parallel.
If -1 all CPUs are used. If 1 is given, no parallel computing code is
used at all, which is useful for debugging. For n_jobs below -1,
(n_cpus + 1 + n_jobs) are used. Thus for n_jobs = -2, all CPUs but one
are used.
.. versionadded:: 0.17
Parallel Execution using *n_jobs*.
Returns
-------
cluster_centers : array, shape=[n_clusters, n_features]
Coordinates of cluster centers.
labels : array, shape=[n_samples]
Cluster labels for each point.
Notes
-----
See examples/cluster/plot_mean_shift.py for an example.
"""
if bandwidth is None:
bandwidth = estimate_bandwidth(X, n_jobs=n_jobs)
elif bandwidth <= 0:
raise ValueError("bandwidth needs to be greater than zero or None,\
got %f" % bandwidth)
if seeds is None:
if bin_seeding:
seeds = get_bin_seeds(X, bandwidth, min_bin_freq)
else:
seeds = X
n_samples, n_features = X.shape
center_intensity_dict = {}
nbrs = NearestNeighbors(radius=bandwidth, n_jobs=n_jobs).fit(X)
# execute iterations on all seeds in parallel
all_res = Parallel(n_jobs=n_jobs)(
delayed(_mean_shift_single_seed)
(seed, X, nbrs, max_iter) for seed in seeds)
# copy results in a dictionary
for i in range(len(seeds)):
if all_res[i] is not None:
center_intensity_dict[all_res[i][0]] = all_res[i][1]
if not center_intensity_dict:
# nothing near seeds
raise ValueError("No point was within bandwidth=%f of any seed."
" Try a different seeding strategy \
or increase the bandwidth."
% bandwidth)
# POST PROCESSING: remove near duplicate points
# If the distance between two kernels is less than the bandwidth,
# then we have to remove one because it is a duplicate. Remove the
# one with fewer points.
sorted_by_intensity = sorted(center_intensity_dict.items(),
key=lambda tup: tup[1], reverse=True)
sorted_centers = np.array([tup[0] for tup in sorted_by_intensity])
unique = np.ones(len(sorted_centers), dtype=np.bool)
nbrs = NearestNeighbors(radius=bandwidth,
n_jobs=n_jobs).fit(sorted_centers)
for i, center in enumerate(sorted_centers):
if unique[i]:
neighbor_idxs = nbrs.radius_neighbors([center],
return_distance=False)[0]
unique[neighbor_idxs] = 0
unique[i] = 1 # leave the current point as unique
cluster_centers = sorted_centers[unique]
# ASSIGN LABELS: a point belongs to the cluster that it is closest to
nbrs = NearestNeighbors(n_neighbors=1, n_jobs=n_jobs).fit(cluster_centers)
labels = np.zeros(n_samples, dtype=np.int)
distances, idxs = nbrs.kneighbors(X)
if cluster_all:
labels = idxs.flatten()
else:
labels.fill(-1)
bool_selector = distances.flatten() <= bandwidth
labels[bool_selector] = idxs.flatten()[bool_selector]
return cluster_centers, labels
def get_bin_seeds(X, bin_size, min_bin_freq=1):
"""Finds seeds for mean_shift.
Finds seeds by first binning data onto a grid whose lines are
spaced bin_size apart, and then choosing those bins with at least
min_bin_freq points.
Parameters
----------
X : array-like, shape=[n_samples, n_features]
Input points, the same points that will be used in mean_shift.
bin_size : float
Controls the coarseness of the binning. Smaller values lead
to more seeding (which is computationally more expensive). If you're
not sure how to set this, set it to the value of the bandwidth used
in clustering.mean_shift.
min_bin_freq : integer, optional
Only bins with at least min_bin_freq will be selected as seeds.
Raising this value decreases the number of seeds found, which
makes mean_shift computationally cheaper.
Returns
-------
bin_seeds : array-like, shape=[n_samples, n_features]
Points used as initial kernel positions in clustering.mean_shift.
"""
# Bin points
bin_sizes = defaultdict(int)
for point in X:
binned_point = np.round(point / bin_size)
bin_sizes[tuple(binned_point)] += 1
# Select only those bins as seeds which have enough members
bin_seeds = np.array([point for point, freq in six.iteritems(bin_sizes) if
freq >= min_bin_freq], dtype=np.float32)
if len(bin_seeds) == len(X):
warnings.warn("Binning data failed with provided bin_size=%f,"
" using data points as seeds." % bin_size)
return X
bin_seeds = bin_seeds * bin_size
return bin_seeds
class MeanShift(BaseEstimator, ClusterMixin):
"""Mean shift clustering using a flat kernel.
Mean shift clustering aims to discover "blobs" in a smooth density of
samples. It is a centroid-based algorithm, which works by updating
candidates for centroids to be the mean of the points within a given
region. These candidates are then filtered in a post-processing stage to
eliminate near-duplicates to form the final set of centroids.
Seeding is performed using a binning technique for scalability.
Read more in the :ref:`User Guide <mean_shift>`.
Parameters
----------
bandwidth : float, optional
Bandwidth used in the RBF kernel.
If not given, the bandwidth is estimated using
sklearn.cluster.estimate_bandwidth; see the documentation for that
function for hints on scalability (see also the Notes, below).
seeds : array, shape=[n_samples, n_features], optional
Seeds used to initialize kernels. If not set,
the seeds are calculated by clustering.get_bin_seeds
with bandwidth as the grid size and default values for
other parameters.
bin_seeding : boolean, optional
If true, initial kernel locations are not locations of all
points, but rather the location of the discretized version of
points, where points are binned onto a grid whose coarseness
corresponds to the bandwidth. Setting this option to True will speed
up the algorithm because fewer seeds will be initialized.
default value: False
Ignored if seeds argument is not None.
min_bin_freq : int, optional
To speed up the algorithm, accept only those bins with at least
min_bin_freq points as seeds. If not defined, set to 1.
cluster_all : boolean, default True
If true, then all points are clustered, even those orphans that are
not within any kernel. Orphans are assigned to the nearest kernel.
If false, then orphans are given cluster label -1.
n_jobs : int
The number of jobs to use for the computation. This works by computing
each of the n_init runs in parallel.
If -1 all CPUs are used. If 1 is given, no parallel computing code is
used at all, which is useful for debugging. For n_jobs below -1,
(n_cpus + 1 + n_jobs) are used. Thus for n_jobs = -2, all CPUs but one
are used.
Attributes
----------
cluster_centers_ : array, [n_clusters, n_features]
Coordinates of cluster centers.
labels_ :
Labels of each point.
Notes
-----
Scalability:
Because this implementation uses a flat kernel and
a Ball Tree to look up members of each kernel, the complexity will tend
towards O(T*n*log(n)) in lower dimensions, with n the number of samples
and T the number of points. In higher dimensions the complexity will
tend towards O(T*n^2).
Scalability can be boosted by using fewer seeds, for example by using
a higher value of min_bin_freq in the get_bin_seeds function.
Note that the estimate_bandwidth function is much less scalable than the
mean shift algorithm and will be the bottleneck if it is used.
References
----------
Dorin Comaniciu and Peter Meer, "Mean Shift: A robust approach toward
feature space analysis". IEEE Transactions on Pattern Analysis and
Machine Intelligence. 2002. pp. 603-619.
"""
def __init__(self, bandwidth=None, seeds=None, bin_seeding=False,
min_bin_freq=1, cluster_all=True, n_jobs=1):
self.bandwidth = bandwidth
self.seeds = seeds
self.bin_seeding = bin_seeding
self.cluster_all = cluster_all
self.min_bin_freq = min_bin_freq
self.n_jobs = n_jobs
def fit(self, X, y=None):
"""Perform clustering.
Parameters
-----------
X : array-like, shape=[n_samples, n_features]
Samples to cluster.
"""
X = check_array(X)
self.cluster_centers_, self.labels_ = \
mean_shift(X, bandwidth=self.bandwidth, seeds=self.seeds,
min_bin_freq=self.min_bin_freq,
bin_seeding=self.bin_seeding,
cluster_all=self.cluster_all, n_jobs=self.n_jobs)
return self
def predict(self, X):
"""Predict the closest cluster each sample in X belongs to.
Parameters
----------
X : {array-like, sparse matrix}, shape=[n_samples, n_features]
New data to predict.
Returns
-------
labels : array, shape [n_samples,]
Index of the cluster each sample belongs to.
"""
check_is_fitted(self, "cluster_centers_")
return pairwise_distances_argmin(X, self.cluster_centers_)
| bsd-3-clause |
eramirem/astroML | book_figures/chapter6/fig_kmeans_metallicity.py | 3 | 3200 | """
EM example: K-means
-------------------
Figure 6.13
The K-means analysis of the stellar metallicity data used in figure 6.6. Note
how the background distribution "pulls" the cluster centers away from the locus
where one would place them by eye. This is why more sophisticated models like
GMM are often better in practice.
"""
# Author: Jake VanderPlas
# License: BSD
# The figure produced by this code is published in the textbook
# "Statistics, Data Mining, and Machine Learning in Astronomy" (2013)
# For more information, see http://astroML.github.com
# To report a bug or issue, use the following forum:
# https://groups.google.com/forum/#!forum/astroml-general
import numpy as np
from matplotlib import pyplot as plt
from matplotlib.patches import Ellipse
from scipy.stats import norm
from sklearn.cluster import KMeans
from sklearn import preprocessing
from astroML.datasets import fetch_sdss_sspp
#----------------------------------------------------------------------
# This function adjusts matplotlib settings for a uniform feel in the textbook.
# Note that with usetex=True, fonts are rendered with LaTeX. This may
# result in an error if LaTeX is not installed on your system. In that case,
# you can set usetex to False.
from astroML.plotting import setup_text_plots
setup_text_plots(fontsize=8, usetex=True)
#------------------------------------------------------------
# Get data
data = fetch_sdss_sspp(cleaned=True)
X = np.vstack([data['FeH'], data['alphFe']]).T
# truncate dataset for speed
X = X[::5]
#------------------------------------------------------------
# Compute a 2D histogram of the input
H, FeH_bins, alphFe_bins = np.histogram2d(data['FeH'], data['alphFe'], 50)
#------------------------------------------------------------
# Compute the KMeans clustering
n_clusters = 4
scaler = preprocessing.StandardScaler()
clf = KMeans(n_clusters)
clf.fit(scaler.fit_transform(X))
#------------------------------------------------------------
# Visualize the results
fig = plt.figure(figsize=(5, 5))
ax = fig.add_subplot()
# plot density
ax = plt.axes()
ax.imshow(H.T, origin='lower', interpolation='nearest', aspect='auto',
extent=[FeH_bins[0], FeH_bins[-1],
alphFe_bins[0], alphFe_bins[-1]],
cmap=plt.cm.binary)
# plot cluster centers
cluster_centers = scaler.inverse_transform(clf.cluster_centers_)
ax.scatter(cluster_centers[:, 0], cluster_centers[:, 1],
s=40, c='w', edgecolors='k')
# plot cluster boundaries
FeH_centers = 0.5 * (FeH_bins[1:] + FeH_bins[:-1])
alphFe_centers = 0.5 * (alphFe_bins[1:] + alphFe_bins[:-1])
Xgrid = np.meshgrid(FeH_centers, alphFe_centers)
Xgrid = np.array(Xgrid).reshape((2, 50 * 50)).T
H = clf.predict(scaler.transform(Xgrid)).reshape((50, 50))
for i in range(n_clusters):
Hcp = H.copy()
flag = (Hcp == i)
Hcp[flag] = 1
Hcp[~flag] = 0
ax.contour(FeH_centers, alphFe_centers, Hcp, [-0.5, 0.5],
linewidths=1, colors='k')
ax.xaxis.set_major_locator(plt.MultipleLocator(0.3))
ax.set_xlim(-1.101, 0.101)
ax.set_ylim(alphFe_bins[0], alphFe_bins[-1])
ax.set_xlabel(r'$\rm [Fe/H]$')
ax.set_ylabel(r'$\rm [\alpha/Fe]$')
plt.show()
| bsd-2-clause |
backtou/longlab | gr-digital/examples/example_fll.py | 17 | 4821 | #!/usr/bin/env python
from gnuradio import gr, digital
from gnuradio import eng_notation
from gnuradio.eng_option import eng_option
from optparse import OptionParser
try:
import scipy
except ImportError:
print "Error: could not import scipy (http://www.scipy.org/)"
sys.exit(1)
try:
import pylab
except ImportError:
print "Error: could not import pylab (http://matplotlib.sourceforge.net/)"
sys.exit(1)
class example_fll(gr.top_block):
def __init__(self, N, sps, rolloff, ntaps, bw, noise, foffset, toffset, poffset):
gr.top_block.__init__(self)
rrc_taps = gr.firdes.root_raised_cosine(
sps, sps, 1.0, rolloff, ntaps)
data = 2.0*scipy.random.randint(0, 2, N) - 1.0
data = scipy.exp(1j*poffset) * data
self.src = gr.vector_source_c(data.tolist(), False)
self.rrc = gr.interp_fir_filter_ccf(sps, rrc_taps)
self.chn = gr.channel_model(noise, foffset, toffset)
self.fll = digital.fll_band_edge_cc(sps, rolloff, ntaps, bw)
self.vsnk_src = gr.vector_sink_c()
self.vsnk_fll = gr.vector_sink_c()
self.vsnk_frq = gr.vector_sink_f()
self.vsnk_phs = gr.vector_sink_f()
self.vsnk_err = gr.vector_sink_f()
self.connect(self.src, self.rrc, self.chn, self.fll, self.vsnk_fll)
self.connect(self.rrc, self.vsnk_src)
self.connect((self.fll,1), self.vsnk_frq)
self.connect((self.fll,2), self.vsnk_phs)
self.connect((self.fll,3), self.vsnk_err)
def main():
parser = OptionParser(option_class=eng_option, conflict_handler="resolve")
parser.add_option("-N", "--nsamples", type="int", default=2000,
help="Set the number of samples to process [default=%default]")
parser.add_option("-S", "--sps", type="int", default=4,
help="Set the samples per symbol [default=%default]")
parser.add_option("-r", "--rolloff", type="eng_float", default=0.35,
help="Set the rolloff factor [default=%default]")
parser.add_option("-W", "--bandwidth", type="eng_float", default=2*scipy.pi/100.0,
help="Set the loop bandwidth [default=%default]")
parser.add_option("-n", "--ntaps", type="int", default=45,
help="Set the number of taps in the filters [default=%default]")
parser.add_option("", "--noise", type="eng_float", default=0.0,
help="Set the simulation noise voltage [default=%default]")
parser.add_option("-f", "--foffset", type="eng_float", default=0.2,
help="Set the simulation's normalized frequency offset (in Hz) [default=%default]")
parser.add_option("-t", "--toffset", type="eng_float", default=1.0,
help="Set the simulation's timing offset [default=%default]")
parser.add_option("-p", "--poffset", type="eng_float", default=0.0,
help="Set the simulation's phase offset [default=%default]")
(options, args) = parser.parse_args ()
# Adjust N for the interpolation by sps
options.nsamples = options.nsamples // options.sps
# Set up the program-under-test
put = example_fll(options.nsamples, options.sps, options.rolloff,
options.ntaps, options.bandwidth, options.noise,
options.foffset, options.toffset, options.poffset)
put.run()
data_src = scipy.array(put.vsnk_src.data())
data_err = scipy.array(put.vsnk_err.data())
# Convert the FLL's LO frequency from rads/sec to Hz
data_frq = scipy.array(put.vsnk_frq.data()) / (2.0*scipy.pi)
# adjust this to align with the data. There are 2 filters of
# ntaps long and the channel introduces another 4 sample delay.
data_fll = scipy.array(put.vsnk_fll.data()[2*options.ntaps-4:])
# Plot the FLL's LO frequency
f1 = pylab.figure(1, figsize=(12,10))
s1 = f1.add_subplot(2,2,1)
s1.plot(data_frq)
s1.set_title("FLL LO")
s1.set_xlabel("Samples")
s1.set_ylabel("Frequency (normalized Hz)")
# Plot the FLL's error
s2 = f1.add_subplot(2,2,2)
s2.plot(data_err)
s2.set_title("FLL Error")
s2.set_xlabel("Samples")
s2.set_ylabel("FLL Loop error")
# Plot the IQ symbols
s3 = f1.add_subplot(2,2,3)
s3.plot(data_src.real, data_src.imag, "o")
s3.plot(data_fll.real, data_fll.imag, "rx")
s3.set_title("IQ")
s3.set_xlabel("Real part")
s3.set_ylabel("Imag part")
# Plot the symbols in time
s4 = f1.add_subplot(2,2,4)
s4.plot(data_src.real, "o-")
s4.plot(data_fll.real, "rx-")
s4.set_title("Symbols")
s4.set_xlabel("Samples")
s4.set_ylabel("Real Part of Signals")
pylab.show()
if __name__ == "__main__":
try:
main()
except KeyboardInterrupt:
pass
| gpl-3.0 |
xzh86/scikit-learn | sklearn/metrics/cluster/__init__.py | 312 | 1322 | """
The :mod:`sklearn.metrics.cluster` submodule contains evaluation metrics for
cluster analysis results. There are two forms of evaluation:
- supervised, which uses a ground truth class values for each sample.
- unsupervised, which does not and measures the 'quality' of the model itself.
"""
from .supervised import adjusted_mutual_info_score
from .supervised import normalized_mutual_info_score
from .supervised import adjusted_rand_score
from .supervised import completeness_score
from .supervised import contingency_matrix
from .supervised import expected_mutual_information
from .supervised import homogeneity_completeness_v_measure
from .supervised import homogeneity_score
from .supervised import mutual_info_score
from .supervised import v_measure_score
from .supervised import entropy
from .unsupervised import silhouette_samples
from .unsupervised import silhouette_score
from .bicluster import consensus_score
__all__ = ["adjusted_mutual_info_score", "normalized_mutual_info_score",
"adjusted_rand_score", "completeness_score", "contingency_matrix",
"expected_mutual_information", "homogeneity_completeness_v_measure",
"homogeneity_score", "mutual_info_score", "v_measure_score",
"entropy", "silhouette_samples", "silhouette_score",
"consensus_score"]
| bsd-3-clause |
winklerand/pandas | pandas/tests/io/msgpack/test_pack.py | 9 | 4922 | # coding: utf-8
import pytest
import struct
from pandas import compat
from pandas.compat import u, OrderedDict
from pandas.io.msgpack import packb, unpackb, Unpacker, Packer
class TestPack(object):
def check(self, data, use_list=False):
re = unpackb(packb(data), use_list=use_list)
assert re == data
def testPack(self):
test_data = [
0, 1, 127, 128, 255, 256, 65535, 65536,
-1, -32, -33, -128, -129, -32768, -32769,
1.0,
b"", b"a", b"a" * 31, b"a" * 32,
None, True, False,
(), ((),), ((), None,),
{None: 0},
(1 << 23),
]
for td in test_data:
self.check(td)
def testPackUnicode(self):
test_data = [u(""), u("abcd"), [u("defgh")], u("Русский текст"), ]
for td in test_data:
re = unpackb(
packb(td, encoding='utf-8'), use_list=1, encoding='utf-8')
assert re == td
packer = Packer(encoding='utf-8')
data = packer.pack(td)
re = Unpacker(
compat.BytesIO(data), encoding='utf-8', use_list=1).unpack()
assert re == td
def testPackUTF32(self):
test_data = [
compat.u(""),
compat.u("abcd"),
[compat.u("defgh")],
compat.u("Русский текст"),
]
for td in test_data:
re = unpackb(
packb(td, encoding='utf-32'), use_list=1, encoding='utf-32')
assert re == td
def testPackBytes(self):
test_data = [b"", b"abcd", (b"defgh", ), ]
for td in test_data:
self.check(td)
def testIgnoreUnicodeErrors(self):
re = unpackb(
packb(b'abc\xeddef'), encoding='utf-8', unicode_errors='ignore',
use_list=1)
assert re == "abcdef"
def testStrictUnicodeUnpack(self):
pytest.raises(UnicodeDecodeError, unpackb, packb(b'abc\xeddef'),
encoding='utf-8', use_list=1)
def testStrictUnicodePack(self):
pytest.raises(UnicodeEncodeError, packb, compat.u("abc\xeddef"),
encoding='ascii', unicode_errors='strict')
def testIgnoreErrorsPack(self):
re = unpackb(
packb(
compat.u("abcФФФdef"), encoding='ascii',
unicode_errors='ignore'), encoding='utf-8', use_list=1)
assert re == compat.u("abcdef")
def testNoEncoding(self):
pytest.raises(TypeError, packb, compat.u("abc"), encoding=None)
def testDecodeBinary(self):
re = unpackb(packb("abc"), encoding=None, use_list=1)
assert re == b"abc"
def testPackFloat(self):
assert packb(1.0,
use_single_float=True) == b'\xca' + struct.pack('>f', 1.0)
assert packb(
1.0, use_single_float=False) == b'\xcb' + struct.pack('>d', 1.0)
def testArraySize(self, sizes=[0, 5, 50, 1000]):
bio = compat.BytesIO()
packer = Packer()
for size in sizes:
bio.write(packer.pack_array_header(size))
for i in range(size):
bio.write(packer.pack(i))
bio.seek(0)
unpacker = Unpacker(bio, use_list=1)
for size in sizes:
assert unpacker.unpack() == list(range(size))
def test_manualreset(self, sizes=[0, 5, 50, 1000]):
packer = Packer(autoreset=False)
for size in sizes:
packer.pack_array_header(size)
for i in range(size):
packer.pack(i)
bio = compat.BytesIO(packer.bytes())
unpacker = Unpacker(bio, use_list=1)
for size in sizes:
assert unpacker.unpack() == list(range(size))
packer.reset()
assert packer.bytes() == b''
def testMapSize(self, sizes=[0, 5, 50, 1000]):
bio = compat.BytesIO()
packer = Packer()
for size in sizes:
bio.write(packer.pack_map_header(size))
for i in range(size):
bio.write(packer.pack(i)) # key
bio.write(packer.pack(i * 2)) # value
bio.seek(0)
unpacker = Unpacker(bio)
for size in sizes:
assert unpacker.unpack() == {i: i * 2 for i in range(size)}
def test_odict(self):
seq = [(b'one', 1), (b'two', 2), (b'three', 3), (b'four', 4)]
od = OrderedDict(seq)
assert unpackb(packb(od), use_list=1) == dict(seq)
def pair_hook(seq):
return list(seq)
assert unpackb(
packb(od), object_pairs_hook=pair_hook, use_list=1) == seq
def test_pairlist(self):
pairlist = [(b'a', 1), (2, b'b'), (b'foo', b'bar')]
packer = Packer()
packed = packer.pack_map_pairs(pairlist)
unpacked = unpackb(packed, object_pairs_hook=list)
assert pairlist == unpacked
| bsd-3-clause |
alvarofierroclavero/scikit-learn | sklearn/naive_bayes.py | 128 | 28358 | # -*- coding: utf-8 -*-
"""
The :mod:`sklearn.naive_bayes` module implements Naive Bayes algorithms. These
are supervised learning methods based on applying Bayes' theorem with strong
(naive) feature independence assumptions.
"""
# Author: Vincent Michel <vincent.michel@inria.fr>
# Minor fixes by Fabian Pedregosa
# Amit Aides <amitibo@tx.technion.ac.il>
# Yehuda Finkelstein <yehudaf@tx.technion.ac.il>
# Lars Buitinck <L.J.Buitinck@uva.nl>
# Jan Hendrik Metzen <jhm@informatik.uni-bremen.de>
# (parts based on earlier work by Mathieu Blondel)
#
# License: BSD 3 clause
from abc import ABCMeta, abstractmethod
import numpy as np
from scipy.sparse import issparse
from .base import BaseEstimator, ClassifierMixin
from .preprocessing import binarize
from .preprocessing import LabelBinarizer
from .preprocessing import label_binarize
from .utils import check_X_y, check_array
from .utils.extmath import safe_sparse_dot, logsumexp
from .utils.multiclass import _check_partial_fit_first_call
from .utils.fixes import in1d
from .utils.validation import check_is_fitted
from .externals import six
__all__ = ['BernoulliNB', 'GaussianNB', 'MultinomialNB']
class BaseNB(six.with_metaclass(ABCMeta, BaseEstimator, ClassifierMixin)):
"""Abstract base class for naive Bayes estimators"""
@abstractmethod
def _joint_log_likelihood(self, X):
"""Compute the unnormalized posterior log probability of X
I.e. ``log P(c) + log P(x|c)`` for all rows x of X, as an array-like of
shape [n_classes, n_samples].
Input is passed to _joint_log_likelihood as-is by predict,
predict_proba and predict_log_proba.
"""
def predict(self, X):
"""
Perform classification on an array of test vectors X.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Returns
-------
C : array, shape = [n_samples]
Predicted target values for X
"""
jll = self._joint_log_likelihood(X)
return self.classes_[np.argmax(jll, axis=1)]
def predict_log_proba(self, X):
"""
Return log-probability estimates for the test vector X.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Returns
-------
C : array-like, shape = [n_samples, n_classes]
Returns the log-probability of the samples for each class in
the model. The columns correspond to the classes in sorted
order, as they appear in the attribute `classes_`.
"""
jll = self._joint_log_likelihood(X)
# normalize by P(x) = P(f_1, ..., f_n)
log_prob_x = logsumexp(jll, axis=1)
return jll - np.atleast_2d(log_prob_x).T
def predict_proba(self, X):
"""
Return probability estimates for the test vector X.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Returns
-------
C : array-like, shape = [n_samples, n_classes]
Returns the probability of the samples for each class in
the model. The columns correspond to the classes in sorted
order, as they appear in the attribute `classes_`.
"""
return np.exp(self.predict_log_proba(X))
class GaussianNB(BaseNB):
"""
Gaussian Naive Bayes (GaussianNB)
Can perform online updates to model parameters via `partial_fit` method.
For details on algorithm used to update feature means and variance online,
see Stanford CS tech report STAN-CS-79-773 by Chan, Golub, and LeVeque:
http://i.stanford.edu/pub/cstr/reports/cs/tr/79/773/CS-TR-79-773.pdf
Read more in the :ref:`User Guide <gaussian_naive_bayes>`.
Attributes
----------
class_prior_ : array, shape (n_classes,)
probability of each class.
class_count_ : array, shape (n_classes,)
number of training samples observed in each class.
theta_ : array, shape (n_classes, n_features)
mean of each feature per class
sigma_ : array, shape (n_classes, n_features)
variance of each feature per class
Examples
--------
>>> import numpy as np
>>> X = np.array([[-1, -1], [-2, -1], [-3, -2], [1, 1], [2, 1], [3, 2]])
>>> Y = np.array([1, 1, 1, 2, 2, 2])
>>> from sklearn.naive_bayes import GaussianNB
>>> clf = GaussianNB()
>>> clf.fit(X, Y)
GaussianNB()
>>> print(clf.predict([[-0.8, -1]]))
[1]
>>> clf_pf = GaussianNB()
>>> clf_pf.partial_fit(X, Y, np.unique(Y))
GaussianNB()
>>> print(clf_pf.predict([[-0.8, -1]]))
[1]
"""
def fit(self, X, y, sample_weight=None):
"""Fit Gaussian Naive Bayes according to X, y
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training vectors, where n_samples is the number of samples
and n_features is the number of features.
y : array-like, shape (n_samples,)
Target values.
sample_weight : array-like, shape (n_samples,), optional
Weights applied to individual samples (1. for unweighted).
Returns
-------
self : object
Returns self.
"""
X, y = check_X_y(X, y)
return self._partial_fit(X, y, np.unique(y), _refit=True,
sample_weight=sample_weight)
@staticmethod
def _update_mean_variance(n_past, mu, var, X, sample_weight=None):
"""Compute online update of Gaussian mean and variance.
Given starting sample count, mean, and variance, a new set of
points X, and optionally sample weights, return the updated mean and
variance. (NB - each dimension (column) in X is treated as independent
-- you get variance, not covariance).
Can take scalar mean and variance, or vector mean and variance to
simultaneously update a number of independent Gaussians.
See Stanford CS tech report STAN-CS-79-773 by Chan, Golub, and LeVeque:
http://i.stanford.edu/pub/cstr/reports/cs/tr/79/773/CS-TR-79-773.pdf
Parameters
----------
n_past : int
Number of samples represented in old mean and variance. If sample
weights were given, this should contain the sum of sample
weights represented in old mean and variance.
mu : array-like, shape (number of Gaussians,)
Means for Gaussians in original set.
var : array-like, shape (number of Gaussians,)
Variances for Gaussians in original set.
sample_weight : array-like, shape (n_samples,), optional
Weights applied to individual samples (1. for unweighted).
Returns
-------
total_mu : array-like, shape (number of Gaussians,)
Updated mean for each Gaussian over the combined set.
total_var : array-like, shape (number of Gaussians,)
Updated variance for each Gaussian over the combined set.
"""
if X.shape[0] == 0:
return mu, var
# Compute (potentially weighted) mean and variance of new datapoints
if sample_weight is not None:
n_new = float(sample_weight.sum())
new_mu = np.average(X, axis=0, weights=sample_weight / n_new)
new_var = np.average((X - new_mu) ** 2, axis=0,
weights=sample_weight / n_new)
else:
n_new = X.shape[0]
new_var = np.var(X, axis=0)
new_mu = np.mean(X, axis=0)
if n_past == 0:
return new_mu, new_var
n_total = float(n_past + n_new)
# Combine mean of old and new data, taking into consideration
# (weighted) number of observations
total_mu = (n_new * new_mu + n_past * mu) / n_total
# Combine variance of old and new data, taking into consideration
# (weighted) number of observations. This is achieved by combining
# the sum-of-squared-differences (ssd)
old_ssd = n_past * var
new_ssd = n_new * new_var
total_ssd = (old_ssd + new_ssd +
(n_past / float(n_new * n_total)) *
(n_new * mu - n_new * new_mu) ** 2)
total_var = total_ssd / n_total
return total_mu, total_var
def partial_fit(self, X, y, classes=None, sample_weight=None):
"""Incremental fit on a batch of samples.
This method is expected to be called several times consecutively
on different chunks of a dataset so as to implement out-of-core
or online learning.
This is especially useful when the whole dataset is too big to fit in
memory at once.
This method has some performance and numerical stability overhead,
hence it is better to call partial_fit on chunks of data that are
as large as possible (as long as fitting in the memory budget) to
hide the overhead.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training vectors, where n_samples is the number of samples and
n_features is the number of features.
y : array-like, shape (n_samples,)
Target values.
classes : array-like, shape (n_classes,)
List of all the classes that can possibly appear in the y vector.
Must be provided at the first call to partial_fit, can be omitted
in subsequent calls.
sample_weight : array-like, shape (n_samples,), optional
Weights applied to individual samples (1. for unweighted).
Returns
-------
self : object
Returns self.
"""
return self._partial_fit(X, y, classes, _refit=False,
sample_weight=sample_weight)
def _partial_fit(self, X, y, classes=None, _refit=False,
sample_weight=None):
"""Actual implementation of Gaussian NB fitting.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training vectors, where n_samples is the number of samples and
n_features is the number of features.
y : array-like, shape (n_samples,)
Target values.
classes : array-like, shape (n_classes,)
List of all the classes that can possibly appear in the y vector.
Must be provided at the first call to partial_fit, can be omitted
in subsequent calls.
_refit: bool
If true, act as though this were the first time we called
_partial_fit (ie, throw away any past fitting and start over).
sample_weight : array-like, shape (n_samples,), optional
Weights applied to individual samples (1. for unweighted).
Returns
-------
self : object
Returns self.
"""
X, y = check_X_y(X, y)
epsilon = 1e-9
if _refit:
self.classes_ = None
if _check_partial_fit_first_call(self, classes):
# This is the first call to partial_fit:
# initialize various cumulative counters
n_features = X.shape[1]
n_classes = len(self.classes_)
self.theta_ = np.zeros((n_classes, n_features))
self.sigma_ = np.zeros((n_classes, n_features))
self.class_prior_ = np.zeros(n_classes)
self.class_count_ = np.zeros(n_classes)
else:
if X.shape[1] != self.theta_.shape[1]:
msg = "Number of features %d does not match previous data %d."
raise ValueError(msg % (X.shape[1], self.theta_.shape[1]))
# Put epsilon back in each time
self.sigma_[:, :] -= epsilon
classes = self.classes_
unique_y = np.unique(y)
unique_y_in_classes = in1d(unique_y, classes)
if not np.all(unique_y_in_classes):
raise ValueError("The target label(s) %s in y do not exist in the "
"initial classes %s" %
(y[~unique_y_in_classes], classes))
for y_i in unique_y:
i = classes.searchsorted(y_i)
X_i = X[y == y_i, :]
if sample_weight is not None:
sw_i = sample_weight[y == y_i]
N_i = sw_i.sum()
else:
sw_i = None
N_i = X_i.shape[0]
new_theta, new_sigma = self._update_mean_variance(
self.class_count_[i], self.theta_[i, :], self.sigma_[i, :],
X_i, sw_i)
self.theta_[i, :] = new_theta
self.sigma_[i, :] = new_sigma
self.class_count_[i] += N_i
self.sigma_[:, :] += epsilon
self.class_prior_[:] = self.class_count_ / np.sum(self.class_count_)
return self
def _joint_log_likelihood(self, X):
check_is_fitted(self, "classes_")
X = check_array(X)
joint_log_likelihood = []
for i in range(np.size(self.classes_)):
jointi = np.log(self.class_prior_[i])
n_ij = - 0.5 * np.sum(np.log(2. * np.pi * self.sigma_[i, :]))
n_ij -= 0.5 * np.sum(((X - self.theta_[i, :]) ** 2) /
(self.sigma_[i, :]), 1)
joint_log_likelihood.append(jointi + n_ij)
joint_log_likelihood = np.array(joint_log_likelihood).T
return joint_log_likelihood
class BaseDiscreteNB(BaseNB):
"""Abstract base class for naive Bayes on discrete/categorical data
Any estimator based on this class should provide:
__init__
_joint_log_likelihood(X) as per BaseNB
"""
def _update_class_log_prior(self, class_prior=None):
n_classes = len(self.classes_)
if class_prior is not None:
if len(class_prior) != n_classes:
raise ValueError("Number of priors must match number of"
" classes.")
self.class_log_prior_ = np.log(class_prior)
elif self.fit_prior:
# empirical prior, with sample_weight taken into account
self.class_log_prior_ = (np.log(self.class_count_)
- np.log(self.class_count_.sum()))
else:
self.class_log_prior_ = np.zeros(n_classes) - np.log(n_classes)
def partial_fit(self, X, y, classes=None, sample_weight=None):
"""Incremental fit on a batch of samples.
This method is expected to be called several times consecutively
on different chunks of a dataset so as to implement out-of-core
or online learning.
This is especially useful when the whole dataset is too big to fit in
memory at once.
This method has some performance overhead hence it is better to call
partial_fit on chunks of data that are as large as possible
(as long as fitting in the memory budget) to hide the overhead.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Training vectors, where n_samples is the number of samples and
n_features is the number of features.
y : array-like, shape = [n_samples]
Target values.
classes : array-like, shape = [n_classes]
List of all the classes that can possibly appear in the y vector.
Must be provided at the first call to partial_fit, can be omitted
in subsequent calls.
sample_weight : array-like, shape = [n_samples], optional
Weights applied to individual samples (1. for unweighted).
Returns
-------
self : object
Returns self.
"""
X = check_array(X, accept_sparse='csr', dtype=np.float64)
_, n_features = X.shape
if _check_partial_fit_first_call(self, classes):
# This is the first call to partial_fit:
# initialize various cumulative counters
n_effective_classes = len(classes) if len(classes) > 1 else 2
self.class_count_ = np.zeros(n_effective_classes, dtype=np.float64)
self.feature_count_ = np.zeros((n_effective_classes, n_features),
dtype=np.float64)
elif n_features != self.coef_.shape[1]:
msg = "Number of features %d does not match previous data %d."
raise ValueError(msg % (n_features, self.coef_.shape[-1]))
Y = label_binarize(y, classes=self.classes_)
if Y.shape[1] == 1:
Y = np.concatenate((1 - Y, Y), axis=1)
n_samples, n_classes = Y.shape
if X.shape[0] != Y.shape[0]:
msg = "X.shape[0]=%d and y.shape[0]=%d are incompatible."
raise ValueError(msg % (X.shape[0], y.shape[0]))
# label_binarize() returns arrays with dtype=np.int64.
# We convert it to np.float64 to support sample_weight consistently
Y = Y.astype(np.float64)
if sample_weight is not None:
Y *= check_array(sample_weight).T
class_prior = self.class_prior
# Count raw events from data before updating the class log prior
# and feature log probas
self._count(X, Y)
# XXX: OPTIM: we could introduce a public finalization method to
# be called by the user explicitly just once after several consecutive
# calls to partial_fit and prior any call to predict[_[log_]proba]
# to avoid computing the smooth log probas at each call to partial fit
self._update_feature_log_prob()
self._update_class_log_prior(class_prior=class_prior)
return self
def fit(self, X, y, sample_weight=None):
"""Fit Naive Bayes classifier according to X, y
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Training vectors, where n_samples is the number of samples and
n_features is the number of features.
y : array-like, shape = [n_samples]
Target values.
sample_weight : array-like, shape = [n_samples], optional
Weights applied to individual samples (1. for unweighted).
Returns
-------
self : object
Returns self.
"""
X, y = check_X_y(X, y, 'csr')
_, n_features = X.shape
labelbin = LabelBinarizer()
Y = labelbin.fit_transform(y)
self.classes_ = labelbin.classes_
if Y.shape[1] == 1:
Y = np.concatenate((1 - Y, Y), axis=1)
# LabelBinarizer().fit_transform() returns arrays with dtype=np.int64.
# We convert it to np.float64 to support sample_weight consistently;
# this means we also don't have to cast X to floating point
Y = Y.astype(np.float64)
if sample_weight is not None:
Y *= check_array(sample_weight).T
class_prior = self.class_prior
# Count raw events from data before updating the class log prior
# and feature log probas
n_effective_classes = Y.shape[1]
self.class_count_ = np.zeros(n_effective_classes, dtype=np.float64)
self.feature_count_ = np.zeros((n_effective_classes, n_features),
dtype=np.float64)
self._count(X, Y)
self._update_feature_log_prob()
self._update_class_log_prior(class_prior=class_prior)
return self
# XXX The following is a stopgap measure; we need to set the dimensions
# of class_log_prior_ and feature_log_prob_ correctly.
def _get_coef(self):
return (self.feature_log_prob_[1:]
if len(self.classes_) == 2 else self.feature_log_prob_)
def _get_intercept(self):
return (self.class_log_prior_[1:]
if len(self.classes_) == 2 else self.class_log_prior_)
coef_ = property(_get_coef)
intercept_ = property(_get_intercept)
class MultinomialNB(BaseDiscreteNB):
"""
Naive Bayes classifier for multinomial models
The multinomial Naive Bayes classifier is suitable for classification with
discrete features (e.g., word counts for text classification). The
multinomial distribution normally requires integer feature counts. However,
in practice, fractional counts such as tf-idf may also work.
Read more in the :ref:`User Guide <multinomial_naive_bayes>`.
Parameters
----------
alpha : float, optional (default=1.0)
Additive (Laplace/Lidstone) smoothing parameter
(0 for no smoothing).
fit_prior : boolean
Whether to learn class prior probabilities or not.
If false, a uniform prior will be used.
class_prior : array-like, size (n_classes,)
Prior probabilities of the classes. If specified the priors are not
adjusted according to the data.
Attributes
----------
class_log_prior_ : array, shape (n_classes, )
Smoothed empirical log probability for each class.
intercept_ : property
Mirrors ``class_log_prior_`` for interpreting MultinomialNB
as a linear model.
feature_log_prob_ : array, shape (n_classes, n_features)
Empirical log probability of features
given a class, ``P(x_i|y)``.
coef_ : property
Mirrors ``feature_log_prob_`` for interpreting MultinomialNB
as a linear model.
class_count_ : array, shape (n_classes,)
Number of samples encountered for each class during fitting. This
value is weighted by the sample weight when provided.
feature_count_ : array, shape (n_classes, n_features)
Number of samples encountered for each (class, feature)
during fitting. This value is weighted by the sample weight when
provided.
Examples
--------
>>> import numpy as np
>>> X = np.random.randint(5, size=(6, 100))
>>> y = np.array([1, 2, 3, 4, 5, 6])
>>> from sklearn.naive_bayes import MultinomialNB
>>> clf = MultinomialNB()
>>> clf.fit(X, y)
MultinomialNB(alpha=1.0, class_prior=None, fit_prior=True)
>>> print(clf.predict(X[2]))
[3]
Notes
-----
For the rationale behind the names `coef_` and `intercept_`, i.e.
naive Bayes as a linear classifier, see J. Rennie et al. (2003),
Tackling the poor assumptions of naive Bayes text classifiers, ICML.
References
----------
C.D. Manning, P. Raghavan and H. Schuetze (2008). Introduction to
Information Retrieval. Cambridge University Press, pp. 234-265.
http://nlp.stanford.edu/IR-book/html/htmledition/naive-bayes-text-classification-1.html
"""
def __init__(self, alpha=1.0, fit_prior=True, class_prior=None):
self.alpha = alpha
self.fit_prior = fit_prior
self.class_prior = class_prior
def _count(self, X, Y):
"""Count and smooth feature occurrences."""
if np.any((X.data if issparse(X) else X) < 0):
raise ValueError("Input X must be non-negative")
self.feature_count_ += safe_sparse_dot(Y.T, X)
self.class_count_ += Y.sum(axis=0)
def _update_feature_log_prob(self):
"""Apply smoothing to raw counts and recompute log probabilities"""
smoothed_fc = self.feature_count_ + self.alpha
smoothed_cc = smoothed_fc.sum(axis=1)
self.feature_log_prob_ = (np.log(smoothed_fc)
- np.log(smoothed_cc.reshape(-1, 1)))
def _joint_log_likelihood(self, X):
"""Calculate the posterior log probability of the samples X"""
check_is_fitted(self, "classes_")
X = check_array(X, accept_sparse='csr')
return (safe_sparse_dot(X, self.feature_log_prob_.T)
+ self.class_log_prior_)
class BernoulliNB(BaseDiscreteNB):
"""Naive Bayes classifier for multivariate Bernoulli models.
Like MultinomialNB, this classifier is suitable for discrete data. The
difference is that while MultinomialNB works with occurrence counts,
BernoulliNB is designed for binary/boolean features.
Read more in the :ref:`User Guide <bernoulli_naive_bayes>`.
Parameters
----------
alpha : float, optional (default=1.0)
Additive (Laplace/Lidstone) smoothing parameter
(0 for no smoothing).
binarize : float or None, optional
Threshold for binarizing (mapping to booleans) of sample features.
If None, input is presumed to already consist of binary vectors.
fit_prior : boolean
Whether to learn class prior probabilities or not.
If false, a uniform prior will be used.
class_prior : array-like, size=[n_classes,]
Prior probabilities of the classes. If specified the priors are not
adjusted according to the data.
Attributes
----------
class_log_prior_ : array, shape = [n_classes]
Log probability of each class (smoothed).
feature_log_prob_ : array, shape = [n_classes, n_features]
Empirical log probability of features given a class, P(x_i|y).
class_count_ : array, shape = [n_classes]
Number of samples encountered for each class during fitting. This
value is weighted by the sample weight when provided.
feature_count_ : array, shape = [n_classes, n_features]
Number of samples encountered for each (class, feature)
during fitting. This value is weighted by the sample weight when
provided.
Examples
--------
>>> import numpy as np
>>> X = np.random.randint(2, size=(6, 100))
>>> Y = np.array([1, 2, 3, 4, 4, 5])
>>> from sklearn.naive_bayes import BernoulliNB
>>> clf = BernoulliNB()
>>> clf.fit(X, Y)
BernoulliNB(alpha=1.0, binarize=0.0, class_prior=None, fit_prior=True)
>>> print(clf.predict(X[2]))
[3]
References
----------
C.D. Manning, P. Raghavan and H. Schuetze (2008). Introduction to
Information Retrieval. Cambridge University Press, pp. 234-265.
http://nlp.stanford.edu/IR-book/html/htmledition/the-bernoulli-model-1.html
A. McCallum and K. Nigam (1998). A comparison of event models for naive
Bayes text classification. Proc. AAAI/ICML-98 Workshop on Learning for
Text Categorization, pp. 41-48.
V. Metsis, I. Androutsopoulos and G. Paliouras (2006). Spam filtering with
naive Bayes -- Which naive Bayes? 3rd Conf. on Email and Anti-Spam (CEAS).
"""
def __init__(self, alpha=1.0, binarize=.0, fit_prior=True,
class_prior=None):
self.alpha = alpha
self.binarize = binarize
self.fit_prior = fit_prior
self.class_prior = class_prior
def _count(self, X, Y):
"""Count and smooth feature occurrences."""
if self.binarize is not None:
X = binarize(X, threshold=self.binarize)
self.feature_count_ += safe_sparse_dot(Y.T, X)
self.class_count_ += Y.sum(axis=0)
def _update_feature_log_prob(self):
"""Apply smoothing to raw counts and recompute log probabilities"""
smoothed_fc = self.feature_count_ + self.alpha
smoothed_cc = self.class_count_ + self.alpha * 2
self.feature_log_prob_ = (np.log(smoothed_fc)
- np.log(smoothed_cc.reshape(-1, 1)))
def _joint_log_likelihood(self, X):
"""Calculate the posterior log probability of the samples X"""
check_is_fitted(self, "classes_")
X = check_array(X, accept_sparse='csr')
if self.binarize is not None:
X = binarize(X, threshold=self.binarize)
n_classes, n_features = self.feature_log_prob_.shape
n_samples, n_features_X = X.shape
if n_features_X != n_features:
raise ValueError("Expected input with %d features, got %d instead"
% (n_features, n_features_X))
neg_prob = np.log(1 - np.exp(self.feature_log_prob_))
# Compute neg_prob · (1 - X).T as ∑neg_prob - X · neg_prob
jll = safe_sparse_dot(X, (self.feature_log_prob_ - neg_prob).T)
jll += self.class_log_prior_ + neg_prob.sum(axis=1)
return jll
| bsd-3-clause |
goodfeli/pylearn2 | pylearn2/train_extensions/live_monitoring.py | 13 | 11530 | """
Training extension for allowing querying of monitoring values while an
experiment executes.
"""
__authors__ = "Dustin Webb"
__copyright__ = "Copyright 2010-2012, Universite de Montreal"
__credits__ = ["Dustin Webb"]
__license__ = "3-clause BSD"
__maintainer__ = "LISA Lab"
__email__ = "pylearn-dev@googlegroups"
import copy
try:
import zmq
zmq_available = True
except:
zmq_available = False
try:
import matplotlib.pyplot as plt
pyplot_available = True
except:
pyplot_available = False
from functools import wraps
from pylearn2.monitor import Monitor
from pylearn2.train_extensions import TrainExtension
class LiveMonitorMsg(object):
"""
Base class that defines the required interface for all Live Monitor
messages.
"""
response_set = False
def get_response(self):
"""
Method that instantiates a response message for a given request
message. It is not necessary to implement this function on response
messages.
"""
raise NotImplementedError('get_response is not implemented.')
class ChannelListResponse(LiveMonitorMsg):
"""
A message containing the list of channels being monitored.
"""
pass
class ChannelListRequest(LiveMonitorMsg):
"""
A message indicating a request for a list of channels being monitored.
"""
@wraps(LiveMonitorMsg.get_response)
def get_response(self):
return ChannelListResponse()
class ChannelsResponse(LiveMonitorMsg):
"""
A message containing monitoring data related to the channels specified.
Data can be requested for all epochs or select epochs.
Parameters
----------
channel_list : list
A list of the channels for which data has been requested.
start : int
The starting epoch for which data should be returned.
end : int
The epoch after which data should be returned.
step : int
The number of epochs to be skipped between data points.
"""
def __init__(self, channel_list, start, end, step=1):
assert(
isinstance(channel_list, list)
and len(channel_list) > 0
)
self.channel_list = channel_list
assert(start >= 0)
self.start = start
self.end = end
assert(step > 0)
self.step = step
class ChannelsRequest(LiveMonitorMsg):
"""
A message for requesting data related to the channels specified.
Parameters
----------
channel_list : list
A list of the channels for which data has been requested.
start : int
The starting epoch for which data should be returned.
end : int
The epoch after which data should be returned.
step : int
The number of epochs to be skipped between data points.
"""
def __init__(self, channel_list, start=0, end=-1, step=1):
assert(
isinstance(channel_list, list)
and len(channel_list) > 0
)
self.channel_list = channel_list
assert(start >= 0)
self.start = start
self.end = end
assert(step > 0)
self.step = step
@wraps(LiveMonitorMsg.get_response)
def get_response(self):
return ChannelsResponse(
self.channel_list,
self.start,
self.end,
self.step
)
class LiveMonitoring(TrainExtension):
"""
A training extension for remotely monitoring and filtering the channels
being monitored in real time. PyZMQ must be installed for this extension
to work.
Parameters
----------
address : string
The IP addresses of the interfaces on which the monitor should listen.
req_port : int
The port number to be used to service request.
pub_port : int
The port number to be used to publish updates.
"""
def __init__(self, address='*', req_port=5555, pub_port=5556):
if not zmq_available:
raise ImportError('zeromq needs to be installed to '
'use this module.')
self.address = 'tcp://%s' % address
assert(req_port != pub_port)
assert(req_port > 1024 and req_port < 65536)
self.req_port = req_port
assert(pub_port > 1024 and pub_port < 65536)
self.pub_port = pub_port
address_template = self.address + ':%d'
self.context = zmq.Context()
self.req_sock = None
if self.req_port > 0:
self.req_sock = self.context.socket(zmq.REP)
self.req_sock.bind(address_template % self.req_port)
self.pub_sock = None
if self.pub_port > 0:
self.pub_sock = self.context.socket(zmq.PUB)
self.req_sock.bind(address_template % self.pub_port)
# Tracks the number of times on_monitor has been called
self.counter = 0
@wraps(TrainExtension.on_monitor)
def on_monitor(self, model, dataset, algorithm):
monitor = Monitor.get_monitor(model)
try:
rsqt_msg = self.req_sock.recv_pyobj(flags=zmq.NOBLOCK)
# Determine what type of message was received
rsp_msg = rsqt_msg.get_response()
if isinstance(rsp_msg, ChannelListResponse):
rsp_msg.data = list(monitor.channels.keys())
if isinstance(rsp_msg, ChannelsResponse):
channel_list = rsp_msg.channel_list
if (
not isinstance(channel_list, list)
or len(channel_list) == 0
):
channel_list = []
result = TypeError(
'ChannelResponse requires a list of channels.'
)
result = {}
for channel_name in channel_list:
if channel_name in monitor.channels.keys():
chan = copy.deepcopy(
monitor.channels[channel_name]
)
end = rsp_msg.end
if end == -1:
end = len(chan.batch_record)
# TODO copying and truncating the records individually
# like this is brittle. Is there a more robust
# solution?
chan.batch_record = chan.batch_record[
rsp_msg.start:end:rsp_msg.step
]
chan.epoch_record = chan.epoch_record[
rsp_msg.start:end:rsp_msg.step
]
chan.example_record = chan.example_record[
rsp_msg.start:end:rsp_msg.step
]
chan.time_record = chan.time_record[
rsp_msg.start:end:rsp_msg.step
]
chan.val_record = chan.val_record[
rsp_msg.start:end:rsp_msg.step
]
result[channel_name] = chan
else:
result[channel_name] = KeyError(
'Invalid channel: %s' % rsp_msg.channel
)
rsp_msg.data = result
self.req_sock.send_pyobj(rsp_msg)
except zmq.Again:
pass
self.counter += 1
class LiveMonitor(object):
"""
A utility class for requested data from a LiveMonitoring training
extension.
Parameters
----------
address : string
The IP address on which a LiveMonitoring process is listening.
req_port : int
The port number on which a LiveMonitoring process is listening.
"""
def __init__(self, address='127.0.0.1', req_port=5555):
"""
"""
if not zmq_available:
raise ImportError('zeromq needs to be installed to '
'use this module.')
self.address = 'tcp://%s' % address
assert(req_port > 0)
self.req_port = req_port
self.context = zmq.Context()
self.req_sock = self.context.socket(zmq.REQ)
self.req_sock.connect(self.address + ':' + str(self.req_port))
self.channels = {}
def list_channels(self):
"""
Returns a list of the channels being monitored.
"""
self.req_sock.send_pyobj(ChannelListRequest())
return self.req_sock.recv_pyobj()
def update_channels(self, channel_list, start=-1, end=-1, step=1):
"""
Retrieves data for a specified set of channels and combines that data
with any previously retrived data.
This assumes all the channels have the same number of values. It is
unclear as to whether this is a reasonable assumption. If they do not
have the same number of values then it may request to much or too
little data leading to duplicated data or wholes in the data
respectively. This could be made more robust by making a call to
retrieve all the data for all of the channels.
Parameters
----------
channel_list : list
A list of the channels for which data should be requested.
start : int
The starting epoch for which data should be requested.
step : int
The number of epochs to be skipped between data points.
"""
assert((start == -1 and end == -1) or end > start)
if start == -1:
start = 0
if len(self.channels.keys()) > 0:
channel_name = list(self.channels.keys())[0]
start = len(self.channels[channel_name].epoch_record)
self.req_sock.send_pyobj(ChannelsRequest(
channel_list, start=start, end=end, step=step
))
rsp_msg = self.req_sock.recv_pyobj()
if isinstance(rsp_msg.data, Exception):
raise rsp_msg.data
for channel in rsp_msg.data.keys():
rsp_chan = rsp_msg.data[channel]
if isinstance(rsp_chan, Exception):
raise rsp_chan
if channel not in self.channels.keys():
self.channels[channel] = rsp_chan
else:
chan = self.channels[channel]
chan.batch_record += rsp_chan.batch_record
chan.epoch_record += rsp_chan.epoch_record
chan.example_record += rsp_chan.example_record
chan.time_record += rsp_chan.time_record
chan.val_record += rsp_chan.val_record
def follow_channels(self, channel_list):
"""
Tracks and plots a specified set of channels in real time.
Parameters
----------
channel_list : list
A list of the channels for which data has been requested.
"""
if not pyplot_available:
raise ImportError('pyplot needs to be installed for '
'this functionality.')
plt.clf()
plt.ion()
while True:
self.update_channel(channel_list)
plt.clf()
for channel_name in self.channels:
plt.plot(
self.channels[channel_name].epoch_record,
self.channels[channel_name].val_record,
label=channel_name
)
plt.legend()
plt.ion()
plt.draw()
| bsd-3-clause |
fbagirov/scikit-learn | examples/applications/face_recognition.py | 191 | 5513 | """
===================================================
Faces recognition example using eigenfaces and SVMs
===================================================
The dataset used in this example is a preprocessed excerpt of the
"Labeled Faces in the Wild", aka LFW_:
http://vis-www.cs.umass.edu/lfw/lfw-funneled.tgz (233MB)
.. _LFW: http://vis-www.cs.umass.edu/lfw/
Expected results for the top 5 most represented people in the dataset::
precision recall f1-score support
Ariel Sharon 0.67 0.92 0.77 13
Colin Powell 0.75 0.78 0.76 60
Donald Rumsfeld 0.78 0.67 0.72 27
George W Bush 0.86 0.86 0.86 146
Gerhard Schroeder 0.76 0.76 0.76 25
Hugo Chavez 0.67 0.67 0.67 15
Tony Blair 0.81 0.69 0.75 36
avg / total 0.80 0.80 0.80 322
"""
from __future__ import print_function
from time import time
import logging
import matplotlib.pyplot as plt
from sklearn.cross_validation import train_test_split
from sklearn.datasets import fetch_lfw_people
from sklearn.grid_search import GridSearchCV
from sklearn.metrics import classification_report
from sklearn.metrics import confusion_matrix
from sklearn.decomposition import RandomizedPCA
from sklearn.svm import SVC
print(__doc__)
# Display progress logs on stdout
logging.basicConfig(level=logging.INFO, format='%(asctime)s %(message)s')
###############################################################################
# Download the data, if not already on disk and load it as numpy arrays
lfw_people = fetch_lfw_people(min_faces_per_person=70, resize=0.4)
# introspect the images arrays to find the shapes (for plotting)
n_samples, h, w = lfw_people.images.shape
# for machine learning we use the 2 data directly (as relative pixel
# positions info is ignored by this model)
X = lfw_people.data
n_features = X.shape[1]
# the label to predict is the id of the person
y = lfw_people.target
target_names = lfw_people.target_names
n_classes = target_names.shape[0]
print("Total dataset size:")
print("n_samples: %d" % n_samples)
print("n_features: %d" % n_features)
print("n_classes: %d" % n_classes)
###############################################################################
# Split into a training set and a test set using a stratified k fold
# split into a training and testing set
X_train, X_test, y_train, y_test = train_test_split(
X, y, test_size=0.25, random_state=42)
###############################################################################
# Compute a PCA (eigenfaces) on the face dataset (treated as unlabeled
# dataset): unsupervised feature extraction / dimensionality reduction
n_components = 150
print("Extracting the top %d eigenfaces from %d faces"
% (n_components, X_train.shape[0]))
t0 = time()
pca = RandomizedPCA(n_components=n_components, whiten=True).fit(X_train)
print("done in %0.3fs" % (time() - t0))
eigenfaces = pca.components_.reshape((n_components, h, w))
print("Projecting the input data on the eigenfaces orthonormal basis")
t0 = time()
X_train_pca = pca.transform(X_train)
X_test_pca = pca.transform(X_test)
print("done in %0.3fs" % (time() - t0))
###############################################################################
# Train a SVM classification model
print("Fitting the classifier to the training set")
t0 = time()
param_grid = {'C': [1e3, 5e3, 1e4, 5e4, 1e5],
'gamma': [0.0001, 0.0005, 0.001, 0.005, 0.01, 0.1], }
clf = GridSearchCV(SVC(kernel='rbf', class_weight='balanced'), param_grid)
clf = clf.fit(X_train_pca, y_train)
print("done in %0.3fs" % (time() - t0))
print("Best estimator found by grid search:")
print(clf.best_estimator_)
###############################################################################
# Quantitative evaluation of the model quality on the test set
print("Predicting people's names on the test set")
t0 = time()
y_pred = clf.predict(X_test_pca)
print("done in %0.3fs" % (time() - t0))
print(classification_report(y_test, y_pred, target_names=target_names))
print(confusion_matrix(y_test, y_pred, labels=range(n_classes)))
###############################################################################
# Qualitative evaluation of the predictions using matplotlib
def plot_gallery(images, titles, h, w, n_row=3, n_col=4):
"""Helper function to plot a gallery of portraits"""
plt.figure(figsize=(1.8 * n_col, 2.4 * n_row))
plt.subplots_adjust(bottom=0, left=.01, right=.99, top=.90, hspace=.35)
for i in range(n_row * n_col):
plt.subplot(n_row, n_col, i + 1)
plt.imshow(images[i].reshape((h, w)), cmap=plt.cm.gray)
plt.title(titles[i], size=12)
plt.xticks(())
plt.yticks(())
# plot the result of the prediction on a portion of the test set
def title(y_pred, y_test, target_names, i):
pred_name = target_names[y_pred[i]].rsplit(' ', 1)[-1]
true_name = target_names[y_test[i]].rsplit(' ', 1)[-1]
return 'predicted: %s\ntrue: %s' % (pred_name, true_name)
prediction_titles = [title(y_pred, y_test, target_names, i)
for i in range(y_pred.shape[0])]
plot_gallery(X_test, prediction_titles, h, w)
# plot the gallery of the most significative eigenfaces
eigenface_titles = ["eigenface %d" % i for i in range(eigenfaces.shape[0])]
plot_gallery(eigenfaces, eigenface_titles, h, w)
plt.show()
| bsd-3-clause |
rishikksh20/scikit-learn | examples/manifold/plot_compare_methods.py | 52 | 3878 | """
=========================================
Comparison of Manifold Learning methods
=========================================
An illustration of dimensionality reduction on the S-curve dataset
with various manifold learning methods.
For a discussion and comparison of these algorithms, see the
:ref:`manifold module page <manifold>`
For a similar example, where the methods are applied to a
sphere dataset, see :ref:`sphx_glr_auto_examples_manifold_plot_manifold_sphere.py`
Note that the purpose of the MDS is to find a low-dimensional
representation of the data (here 2D) in which the distances respect well
the distances in the original high-dimensional space, unlike other
manifold-learning algorithms, it does not seeks an isotropic
representation of the data in the low-dimensional space.
"""
# Author: Jake Vanderplas -- <vanderplas@astro.washington.edu>
print(__doc__)
from time import time
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from matplotlib.ticker import NullFormatter
from sklearn import manifold, datasets
# Next line to silence pyflakes. This import is needed.
Axes3D
n_points = 1000
X, color = datasets.samples_generator.make_s_curve(n_points, random_state=0)
n_neighbors = 10
n_components = 2
fig = plt.figure(figsize=(15, 8))
plt.suptitle("Manifold Learning with %i points, %i neighbors"
% (1000, n_neighbors), fontsize=14)
ax = fig.add_subplot(251, projection='3d')
ax.scatter(X[:, 0], X[:, 1], X[:, 2], c=color, cmap=plt.cm.Spectral)
ax.view_init(4, -72)
methods = ['standard', 'ltsa', 'hessian', 'modified']
labels = ['LLE', 'LTSA', 'Hessian LLE', 'Modified LLE']
for i, method in enumerate(methods):
t0 = time()
Y = manifold.LocallyLinearEmbedding(n_neighbors, n_components,
eigen_solver='auto',
method=method).fit_transform(X)
t1 = time()
print("%s: %.2g sec" % (methods[i], t1 - t0))
ax = fig.add_subplot(252 + i)
plt.scatter(Y[:, 0], Y[:, 1], c=color, cmap=plt.cm.Spectral)
plt.title("%s (%.2g sec)" % (labels[i], t1 - t0))
ax.xaxis.set_major_formatter(NullFormatter())
ax.yaxis.set_major_formatter(NullFormatter())
plt.axis('tight')
t0 = time()
Y = manifold.Isomap(n_neighbors, n_components).fit_transform(X)
t1 = time()
print("Isomap: %.2g sec" % (t1 - t0))
ax = fig.add_subplot(257)
plt.scatter(Y[:, 0], Y[:, 1], c=color, cmap=plt.cm.Spectral)
plt.title("Isomap (%.2g sec)" % (t1 - t0))
ax.xaxis.set_major_formatter(NullFormatter())
ax.yaxis.set_major_formatter(NullFormatter())
plt.axis('tight')
t0 = time()
mds = manifold.MDS(n_components, max_iter=100, n_init=1)
Y = mds.fit_transform(X)
t1 = time()
print("MDS: %.2g sec" % (t1 - t0))
ax = fig.add_subplot(258)
plt.scatter(Y[:, 0], Y[:, 1], c=color, cmap=plt.cm.Spectral)
plt.title("MDS (%.2g sec)" % (t1 - t0))
ax.xaxis.set_major_formatter(NullFormatter())
ax.yaxis.set_major_formatter(NullFormatter())
plt.axis('tight')
t0 = time()
se = manifold.SpectralEmbedding(n_components=n_components,
n_neighbors=n_neighbors)
Y = se.fit_transform(X)
t1 = time()
print("SpectralEmbedding: %.2g sec" % (t1 - t0))
ax = fig.add_subplot(259)
plt.scatter(Y[:, 0], Y[:, 1], c=color, cmap=plt.cm.Spectral)
plt.title("SpectralEmbedding (%.2g sec)" % (t1 - t0))
ax.xaxis.set_major_formatter(NullFormatter())
ax.yaxis.set_major_formatter(NullFormatter())
plt.axis('tight')
t0 = time()
tsne = manifold.TSNE(n_components=n_components, init='pca', random_state=0)
Y = tsne.fit_transform(X)
t1 = time()
print("t-SNE: %.2g sec" % (t1 - t0))
ax = fig.add_subplot(2, 5, 10)
plt.scatter(Y[:, 0], Y[:, 1], c=color, cmap=plt.cm.Spectral)
plt.title("t-SNE (%.2g sec)" % (t1 - t0))
ax.xaxis.set_major_formatter(NullFormatter())
ax.yaxis.set_major_formatter(NullFormatter())
plt.axis('tight')
plt.show()
| bsd-3-clause |
shnizzedy/SM_openSMILE | openSMILE_preprocessing/arff_csv_to_pandas.py | 1 | 6508 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
arff_csv_to_pandas.py
Functions to import openSMILE outputs to Python pandas.
Authors:
– Jon Clucas, 2016 (jon.clucas@childmind.org)
– Arno Klein, 2016 (arno.klein@childmind.org)
– Bonhwang Koo, 2016 (bonhwang.koo@childmind.org)
© 2016, Child Mind Institute, Apache v2.0 License
Created on Thu Dec 8 10:43:37 2016
@author: jon.clucas
"""
import arff, csv, os, pandas as pd, subprocess
def main():
pass
def arff_to_pandas(arff_data, method, config_file, condition):
"""
Function to convert python arff data into a pandas series
Parameters
----------
arff_data : string
arff formatted data string
method : string
["clone_all", "replaced_clone", "replaced_brownian", "replaced_pink",
"replaced_stretch", "replaced_white", "replaced_timeshift",
"silenced", "original"]
config_file : string
openSMILE configuration file filename
condition : string
["ambient", "noise", "only_ambient_noise"]
Returns
-------
oS_series : pandas series
pandas series
"""
indicies = []
for attribute in arff_data["attributes"]:
indicies.append(attribute[0])
return(pd.Series(arff_data["data"][0], indicies, name = " > ".join([
config_file, condition, method])))
def build_dataframe(wd, config_file, condition, methods):
"""
Function to pull openSMILE output csv into a pandas dataframe
Parameters
----------
wd : string
working directory
config_file : string
openSMILE configuration file filename
condition : string
["ambient", "noise"]
methods : list
["clone_all", "replaced_clone", "replaced_brownian", "replaced_pink",
"replaced_stretch", "replaced_white", "replaced_timeshift",
"silenced"]
Returns
-------
d : pandas dataframe
a dataframe for the relevant set of files and features
"""
if condition == 'only_ambient_noise':
s = get_oS_data(os.path.join(wd, config_file,
"only_ambient_noise_original.csv"), "original", config_file,
condition)
else:
s = get_oS_data(os.path.join(wd, config_file, "full_original.csv"),
"original", config_file, condition)
d = s.to_frame()
for method in methods:
try:
if condition == 'only_ambient_noise':
s = get_oS_data(os.path.join(
wd, config_file,
condition, "".join([condition,
"_", method, ".csv"])), method, config_file,
condition)
else:
s = get_oS_data(os.path.join(
wd,config_file,
condition, "".join(["full_", condition,
"_", method, ".csv"])), method, config_file,
condition)
d = d.join(s.to_frame())
except FileNotFoundError as e404:
pass
# transpose dataframe
d = d.T
# convert numeric strings to numeric data
d = d.apply(pd.to_numeric, errors='ignore')
return(d)
def get_oS_data(csvpath, method, config_file, condition):
"""
Function to pull openSMILE output csv into a pandas series
Parameters
----------
csvpath : string
absolute path to csv file
method : string
["clone_all", "replaced_clone", "replaced_brownian", "replaced_pink",
"replaced_stretch", "replaced_white", "replaced_timeshift",
"silenced", "original"]
config_file : string
openSMILE configuration file filename
condition : string
["ambient", "noise", "only_ambient_noise"]
Returns
-------
oS_series : pandas series
"""
try:
# print(''.join(["Loading ", csvpath, '\n']))
oS_data = arff.load(open(csvpath))
except arff.BadLayout:
# remove index column
temp_data = ""
with open(csvpath, 'r') as csvfile:
# print(''.join(["Loading ", csvpath, '\n']))
csv_reader = csv.reader(csvfile)
temp_i = 0
temp_label = ''
for row in csv_reader:
if temp_label != '@data':
if((len(row[0]) == 0) or (int(row[0]) == 0) or (int(row[
0]) == int(temp_i) + 1)):
temp_data = "".join([temp_data, row[1], "\n"])
else:
temp_data = "".join([temp_data, row[0], row[1]])
if(len(row[0]) != 0):
temp_i = row[0]
temp_label = row[1][:5]
else:
temp_data = "".join([temp_data, row[1], ','])
temp_data = temp_data[:-1]
tempcsv = "temp.csv"
tof = open(tempcsv, "w")
tof.write(temp_data)
tof.close()
oS_data = replace_unknown(tempcsv)
except arff.BadAttributeType:
# replace "unknown" attribute type with "string" attribute type
oS_data = replace_unknown(csvpath)
return arff_to_pandas(oS_data, method, config_file, condition)
def replace_unknown(arff_path):
"""
Function to pull openSMILE output csv into a pandas series
Parameters
----------
arff_path : string
absolute path to csv file
Returns
-------
oS_data : string
arff formatted data string
"""
temp_oS = open(arff_path, 'r')
temp_oS_lines = temp_oS.readlines()
temp_oS_string = ""
for temp_oS_line in temp_oS_lines:
words = temp_oS_line.split()
if(len(words) == 3):
if ((words[0] == "@attribute") and (words[2] == "unknown")):
temp_oS_string = "".join([temp_oS_string,
" ".join([words[0], words[1],
"string\n"])])
else:
temp_oS_string = "".join([temp_oS_string, temp_oS_line])
else:
temp_oS_string = "".join([temp_oS_string, temp_oS_line])
tempcsv = "temp.csv"
tof = open(tempcsv, "w")
tof.write(temp_oS_string)
tof.close()
oS_data = arff.loads(open(tempcsv))
subprocess.run("rm temp.csv", shell=True)
return(oS_data)
# ============================================================================
if __name__ == '__main__':
main() | apache-2.0 |
blaze/dask | dask/tests/test_distributed.py | 3 | 5475 | import pytest
distributed = pytest.importorskip("distributed")
import asyncio
from functools import partial
from operator import add
from tornado import gen
import dask
from dask import persist, delayed, compute
from dask.delayed import Delayed
from dask.utils import tmpdir, get_named_args
from distributed import futures_of
from distributed.client import wait
from distributed.utils_test import ( # noqa F401
gen_cluster,
inc,
cluster,
cluster_fixture,
loop,
client as c,
)
if "should_check_state" in get_named_args(gen_cluster):
gen_cluster = partial(gen_cluster, should_check_state=False)
cluster = partial(cluster, should_check_state=False)
def test_can_import_client():
from dask.distributed import Client # noqa: F401
@gen_cluster(client=True)
def test_persist(c, s, a, b):
x = delayed(inc)(1)
(x2,) = persist(x)
yield wait(x2)
assert x2.key in a.data or x2.key in b.data
y = delayed(inc)(10)
y2, one = persist(y, 1)
yield wait(y2)
assert y2.key in a.data or y2.key in b.data
def test_persist_nested(c):
a = delayed(1) + 5
b = a + 1
c = a + 2
result = persist({"a": a, "b": [1, 2, b]}, (c, 2), 4, [5])
assert isinstance(result[0]["a"], Delayed)
assert isinstance(result[0]["b"][2], Delayed)
assert isinstance(result[1][0], Delayed)
sol = ({"a": 6, "b": [1, 2, 7]}, (8, 2), 4, [5])
assert compute(*result) == sol
res = persist([a, b], c, 4, [5], traverse=False)
assert res[0][0] is a
assert res[0][1] is b
assert res[1].compute() == 8
assert res[2:] == (4, [5])
def test_futures_to_delayed_dataframe(c):
pd = pytest.importorskip("pandas")
dd = pytest.importorskip("dask.dataframe")
df = pd.DataFrame({"x": [1, 2, 3]})
futures = c.scatter([df, df])
ddf = dd.from_delayed(futures)
dd.utils.assert_eq(ddf.compute(), pd.concat([df, df], axis=0))
with pytest.raises(TypeError):
ddf = dd.from_delayed([1, 2])
def test_futures_to_delayed_bag(c):
db = pytest.importorskip("dask.bag")
L = [1, 2, 3]
futures = c.scatter([L, L])
b = db.from_delayed(futures)
assert list(b) == L + L
def test_futures_to_delayed_array(c):
da = pytest.importorskip("dask.array")
from dask.array.utils import assert_eq
np = pytest.importorskip("numpy")
x = np.arange(5)
futures = c.scatter([x, x])
A = da.concatenate(
[da.from_delayed(f, shape=x.shape, dtype=x.dtype) for f in futures], axis=0
)
assert_eq(A.compute(), np.concatenate([x, x], axis=0))
@gen_cluster(client=True)
def test_local_get_with_distributed_active(c, s, a, b):
with dask.config.set(scheduler="sync"):
x = delayed(inc)(1).persist()
yield gen.sleep(0.01)
assert not s.tasks # scheduler hasn't done anything
x = delayed(inc)(2).persist(scheduler="sync") # noqa F841
yield gen.sleep(0.01)
assert not s.tasks # scheduler hasn't done anything
def test_to_hdf_distributed(c):
from ..dataframe.io.tests.test_hdf import test_to_hdf
test_to_hdf()
@pytest.mark.parametrize(
"npartitions",
[
1,
pytest.param(
4,
marks=pytest.mark.xfail(reason="HDF not multi-process safe", strict=False),
),
pytest.param(
10,
marks=pytest.mark.xfail(reason="HDF not multi-process safe", strict=False),
),
],
)
def test_to_hdf_scheduler_distributed(npartitions, c):
from ..dataframe.io.tests.test_hdf import test_to_hdf_schedulers
test_to_hdf_schedulers(None, npartitions)
@gen_cluster(client=True)
def test_serializable_groupby_agg(c, s, a, b):
pd = pytest.importorskip("pandas")
dd = pytest.importorskip("dask.dataframe")
df = pd.DataFrame({"x": [1, 2, 3, 4], "y": [1, 0, 1, 0]})
ddf = dd.from_pandas(df, npartitions=2)
result = ddf.groupby("y").agg("count")
yield c.compute(result)
def test_futures_in_graph(c):
x, y = delayed(1), delayed(2)
xx = delayed(add)(x, x)
yy = delayed(add)(y, y)
xxyy = delayed(add)(xx, yy)
xxyy2 = c.persist(xxyy)
xxyy3 = delayed(add)(xxyy2, 10)
assert xxyy3.compute(scheduler="dask.distributed") == ((1 + 1) + (2 + 2)) + 10
def test_zarr_distributed_roundtrip():
da = pytest.importorskip("dask.array")
pytest.importorskip("zarr")
assert_eq = da.utils.assert_eq
with tmpdir() as d:
a = da.zeros((3, 3), chunks=(1, 1))
a.to_zarr(d)
a2 = da.from_zarr(d)
assert_eq(a, a2)
assert a2.chunks == a.chunks
def test_zarr_in_memory_distributed_err(c):
da = pytest.importorskip("dask.array")
zarr = pytest.importorskip("zarr")
c = (1, 1)
a = da.ones((3, 3), chunks=c)
z = zarr.zeros_like(a, chunks=c)
with pytest.raises(RuntimeError):
a.to_zarr(z)
def test_scheduler_equals_client(c):
x = delayed(lambda: 1)()
assert x.compute(scheduler=c) == 1
assert c.run_on_scheduler(lambda dask_scheduler: dask_scheduler.story(x.key))
@gen_cluster(client=True)
async def test_await(c, s, a, b):
x = dask.delayed(inc)(1)
x = await x.persist()
assert x.key in s.tasks
assert a.data or b.data
assert all(f.done() for f in futures_of(x))
def test_local_scheduler():
async def f():
x = dask.delayed(inc)(1)
y = x + 1
z = await y.persist()
assert len(z.dask) == 1
asyncio.get_event_loop().run_until_complete(f())
| bsd-3-clause |
nicholasmalaya/arcanus | disputatio/routines/vanes/bottom.py | 2 | 4039 | #!/bin/py
#
# interpolate over data field for bottom vanes
#
#
#
import numpy as np
import matplotlib
matplotlib.use('Agg')
import itertools
import matplotlib.pyplot as plt
from scipy import integrate
from scipy.integrate import ode
radprime=3.0
radmin=0.6
def vf(t,x):
#
# Vector field function
#
thetabs_f = 80*np.pi/180.0 # moving from 70 to 80
thetabs_b = 50*np.pi/180.0 # moving from 70 to 80
rb = radmin
rmb = radprime-0.1
r = np.sqrt(x[0]**2 + x[1]**2)
theta = np.arctan2(x[1],x[0])
if(x[0]>0):
thetab = -(thetabs_f)*np.power(np.abs((r-rb)/(rmb-rb)),0.5)+thetabs_f
else:
thetab = -(thetabs_b)*np.power(np.abs((r-rb)/(rmb-rb)),1.2)+thetabs_b
thetabb = theta + thetab
dx=np.zeros(2)
dx[0]=-np.cos(thetabb)
dx[1]=-np.sin(thetabb)
return dx
def arr():
#
# Solution curves
#
#rad = 0.4
rad = radprime-0.1
theta = np.linspace(0, 2*np.pi, 13)
ic = np.stack((rad*np.cos(theta),rad*np.sin(theta)),axis=-1)
end = 0.0
t0=0; dt=0.01;
r = ode(vf).set_integrator('vode', method='bdf',max_step=dt)
for k in range(len(ic)):
#
# tEnd=np.sqrt(ic[k][0]**2 + ic[k][1]**2)-end
#
tEnd=radprime+0.0
Y=[];T=[];S=[];
r.set_initial_value(ic[k], t0)
while r.successful() and r.t +dt < tEnd:
r.integrate(r.t+dt)
Y.append(r.y)
S=np.array(np.real(Y))
plt.plot(S[:,0],S[:,1], color = 'red', lw = 4.25)
#
# main function: execute
#
def main():
fig = plt.figure()
ax = fig.add_subplot(1,1,1)
dom=5
xmin = -dom
xmax = dom
ymin = -dom
ymax = dom
#
# Evaluate it on a grid...
#
nx, ny = 200, 200
xx, yy = np.meshgrid(np.linspace(xmin, xmax, nx),
np.linspace(ymin, ymax, ny))
#
# m is a matrix of polynomial values...
# e.g.
#
# Plot!
#
arr()
#
# ----------------------------------------
plt.suptitle("SoV Configuration: Bottom Tier")
plt.title("12 Vane")
major_ticksx = np.arange(xmin, xmax, 1)
minor_ticksx = np.arange(xmin, xmax, 0.1)
major_ticksy = np.arange(ymin, ymax, 1)
minor_ticksy = np.arange(ymin, ymax, .1)
ax.set_xticks(major_ticksx)
ax.set_xticks(minor_ticksx, minor=True)
ax.set_yticks(major_ticksy)
ax.set_yticks(minor_ticksy, minor=True)
plt.xlim([xmin,xmax])
plt.ylim([ymin,ymax])
plt.xlabel('Streamwise (X) [Meters]')
plt.ylabel('Spanwise (Y) [Meters]')
plt.grid()
# add circle(s)
R = radprime
circleout=plt.Circle((0,0),R,color='black',linestyle='dashed',fill=False,linewidth=2)
Rin = radmin
circlein=plt.Circle((0,0),Rin,color='black',linestyle='dashed',fill=False,linewidth=1)
# adding text
ax.text(-4.4, radprime, r'Upstream Side', fontsize=15)
ax.text(2.6, radprime, r'Downstream Side', fontsize=15)
# angles
ax.text(-3.9, 0, r'$\phi^{b,u}$', fontsize=20,color='blue')
ax.text(3.2, 0, r'$\phi^{b,d}$', fontsize=20,color='blue')
# annotate
ax.annotate(r'$\theta^{b,u}$', xy=(-0.2, 0), xytext=(-radprime, -radprime),
arrowprops=dict(facecolor='black', shrink=0.05),color='blue',fontsize=20)
ax.annotate(r'$\theta^{b,d}$', xy=(0.2, 0), xytext=(radprime, -radprime),
arrowprops=dict(facecolor='black', shrink=0.05),color='blue',fontsize=20)
# outer and inner radius
ax.annotate(r'$r^{b}_{max}$', xy=(-3.1, 0), xytext=(0.2, .15),
arrowprops=dict(facecolor='black', shrink=0.05),color='blue',fontsize=20)
fig = plt.gcf()
fig.gca().add_artist(circleout)
fig.gca().add_artist(circlein)
plt.axes().set_aspect('equal', 'datalim')
plt.savefig('interp_entire_bottom.png',dpi=500)
plt.savefig('interp_entire_bottom.pdf', format='pdf', dpi=1000)
#
# EXECUTE
#
main()
#
# nick
# 4/28/16
#
| mit |
secimTools/SECIMTools | src/scripts/secimtools/anovaModules/volcano.py | 2 | 2710 | #Add-on packages
import matplotlib
matplotlib.use('Agg')
import numpy as np
import pandas as pd
from matplotlib.backends.backend_pdf import PdfPages
# Plotting packages
from secimtools.visualManager import module_box as box
from secimtools.visualManager import module_hist as hist
from secimtools.visualManager import module_lines as lines
from secimtools.visualManager import module_scatter as scatter
from secimtools.visualManager.manager_color import colorHandler
from secimtools.visualManager.manager_figure import figureHandler
def volcano(combo, results, oname, cutoff=2):
"""
Plot volcano plots.
Creates volcano plots to compare means, for all pairwise differences.
:Arguments:
:type combo: dictionary
:param combo: A dictionary of dictionaries with all possible pairwise
combinations. Used this to create the various column headers in the
results table.
:type results: pandas DataFrame
:param results: TODO
:type oname: string
:param oname: Name of the output file in pdf format.
:type cutoff: int
:param cutoff: The cutoff value for significance.
:Returns:
:rtype: PD
:returns: Outputs a pdf file containing all plots.
"""
# Getting data for lpvals
lpvals = {col.split("_")[-1]:results[col] for col in results.columns.tolist() \
if col.startswith("-log10_p-value_")}
# Gettign data for diffs
difs = {col.split("_")[-1]:results[col] for col in results.columns.tolist() \
if col.startswith("diff_of")}
# Making plots
with PdfPages(oname) as pdf:
for key in sorted(difs.keys()):
# Set Up Figure
volcanoPlot = figureHandler(proj="2d")
# Plot all results
scatter.scatter2D(x=list(difs[key]), y=list(lpvals[key]),
colorList=list('b'), ax=volcanoPlot.ax[0])
# Color results beyond treshold red
cutLpvals = lpvals[key][lpvals[key]>cutoff]
if not cutLpvals.empty:
cutDiff = difs[key][cutLpvals.index]
scatter.scatter2D(x=list(cutDiff), y=list(cutLpvals),
colorList=list('r'), ax=volcanoPlot.ax[0])
# Drawing cutoffs
lines.drawCutoffHoriz(y=cutoff, ax=volcanoPlot.ax[0])
# Format axis (volcanoPlot)
volcanoPlot.formatAxis(axTitle=key, grid=False,
yTitle="-log10(p-value) for Diff of treatment = {0}".format(key),
xTitle="Diff of treatment = {0}".format(key))
# Add figure to PDF
volcanoPlot.addToPdf(pdfPages=pdf)
| mit |
simon-pepin/scikit-learn | sklearn/tests/test_kernel_ridge.py | 342 | 3027 | import numpy as np
import scipy.sparse as sp
from sklearn.datasets import make_regression
from sklearn.linear_model import Ridge
from sklearn.kernel_ridge import KernelRidge
from sklearn.metrics.pairwise import pairwise_kernels
from sklearn.utils.testing import ignore_warnings
from sklearn.utils.testing import assert_array_almost_equal
X, y = make_regression(n_features=10)
Xcsr = sp.csr_matrix(X)
Xcsc = sp.csc_matrix(X)
Y = np.array([y, y]).T
def test_kernel_ridge():
pred = Ridge(alpha=1, fit_intercept=False).fit(X, y).predict(X)
pred2 = KernelRidge(kernel="linear", alpha=1).fit(X, y).predict(X)
assert_array_almost_equal(pred, pred2)
def test_kernel_ridge_csr():
pred = Ridge(alpha=1, fit_intercept=False,
solver="cholesky").fit(Xcsr, y).predict(Xcsr)
pred2 = KernelRidge(kernel="linear", alpha=1).fit(Xcsr, y).predict(Xcsr)
assert_array_almost_equal(pred, pred2)
def test_kernel_ridge_csc():
pred = Ridge(alpha=1, fit_intercept=False,
solver="cholesky").fit(Xcsc, y).predict(Xcsc)
pred2 = KernelRidge(kernel="linear", alpha=1).fit(Xcsc, y).predict(Xcsc)
assert_array_almost_equal(pred, pred2)
def test_kernel_ridge_singular_kernel():
# alpha=0 causes a LinAlgError in computing the dual coefficients,
# which causes a fallback to a lstsq solver. This is tested here.
pred = Ridge(alpha=0, fit_intercept=False).fit(X, y).predict(X)
kr = KernelRidge(kernel="linear", alpha=0)
ignore_warnings(kr.fit)(X, y)
pred2 = kr.predict(X)
assert_array_almost_equal(pred, pred2)
def test_kernel_ridge_precomputed():
for kernel in ["linear", "rbf", "poly", "cosine"]:
K = pairwise_kernels(X, X, metric=kernel)
pred = KernelRidge(kernel=kernel).fit(X, y).predict(X)
pred2 = KernelRidge(kernel="precomputed").fit(K, y).predict(K)
assert_array_almost_equal(pred, pred2)
def test_kernel_ridge_precomputed_kernel_unchanged():
K = np.dot(X, X.T)
K2 = K.copy()
KernelRidge(kernel="precomputed").fit(K, y)
assert_array_almost_equal(K, K2)
def test_kernel_ridge_sample_weights():
K = np.dot(X, X.T) # precomputed kernel
sw = np.random.RandomState(0).rand(X.shape[0])
pred = Ridge(alpha=1,
fit_intercept=False).fit(X, y, sample_weight=sw).predict(X)
pred2 = KernelRidge(kernel="linear",
alpha=1).fit(X, y, sample_weight=sw).predict(X)
pred3 = KernelRidge(kernel="precomputed",
alpha=1).fit(K, y, sample_weight=sw).predict(K)
assert_array_almost_equal(pred, pred2)
assert_array_almost_equal(pred, pred3)
def test_kernel_ridge_multi_output():
pred = Ridge(alpha=1, fit_intercept=False).fit(X, Y).predict(X)
pred2 = KernelRidge(kernel="linear", alpha=1).fit(X, Y).predict(X)
assert_array_almost_equal(pred, pred2)
pred3 = KernelRidge(kernel="linear", alpha=1).fit(X, y).predict(X)
pred3 = np.array([pred3, pred3]).T
assert_array_almost_equal(pred2, pred3)
| bsd-3-clause |
stylianos-kampakis/scikit-learn | sklearn/mixture/tests/test_gmm.py | 200 | 17427 | import unittest
import copy
import sys
from nose.tools import assert_true
import numpy as np
from numpy.testing import (assert_array_equal, assert_array_almost_equal,
assert_raises)
from scipy import stats
from sklearn import mixture
from sklearn.datasets.samples_generator import make_spd_matrix
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_raise_message
from sklearn.metrics.cluster import adjusted_rand_score
from sklearn.externals.six.moves import cStringIO as StringIO
rng = np.random.RandomState(0)
def test_sample_gaussian():
# Test sample generation from mixture.sample_gaussian where covariance
# is diagonal, spherical and full
n_features, n_samples = 2, 300
axis = 1
mu = rng.randint(10) * rng.rand(n_features)
cv = (rng.rand(n_features) + 1.0) ** 2
samples = mixture.sample_gaussian(
mu, cv, covariance_type='diag', n_samples=n_samples)
assert_true(np.allclose(samples.mean(axis), mu, atol=1.3))
assert_true(np.allclose(samples.var(axis), cv, atol=1.5))
# the same for spherical covariances
cv = (rng.rand() + 1.0) ** 2
samples = mixture.sample_gaussian(
mu, cv, covariance_type='spherical', n_samples=n_samples)
assert_true(np.allclose(samples.mean(axis), mu, atol=1.5))
assert_true(np.allclose(
samples.var(axis), np.repeat(cv, n_features), atol=1.5))
# and for full covariances
A = rng.randn(n_features, n_features)
cv = np.dot(A.T, A) + np.eye(n_features)
samples = mixture.sample_gaussian(
mu, cv, covariance_type='full', n_samples=n_samples)
assert_true(np.allclose(samples.mean(axis), mu, atol=1.3))
assert_true(np.allclose(np.cov(samples), cv, atol=2.5))
# Numerical stability check: in SciPy 0.12.0 at least, eigh may return
# tiny negative values in its second return value.
from sklearn.mixture import sample_gaussian
x = sample_gaussian([0, 0], [[4, 3], [1, .1]],
covariance_type='full', random_state=42)
print(x)
assert_true(np.isfinite(x).all())
def _naive_lmvnpdf_diag(X, mu, cv):
# slow and naive implementation of lmvnpdf
ref = np.empty((len(X), len(mu)))
stds = np.sqrt(cv)
for i, (m, std) in enumerate(zip(mu, stds)):
ref[:, i] = np.log(stats.norm.pdf(X, m, std)).sum(axis=1)
return ref
def test_lmvnpdf_diag():
# test a slow and naive implementation of lmvnpdf and
# compare it to the vectorized version (mixture.lmvnpdf) to test
# for correctness
n_features, n_components, n_samples = 2, 3, 10
mu = rng.randint(10) * rng.rand(n_components, n_features)
cv = (rng.rand(n_components, n_features) + 1.0) ** 2
X = rng.randint(10) * rng.rand(n_samples, n_features)
ref = _naive_lmvnpdf_diag(X, mu, cv)
lpr = mixture.log_multivariate_normal_density(X, mu, cv, 'diag')
assert_array_almost_equal(lpr, ref)
def test_lmvnpdf_spherical():
n_features, n_components, n_samples = 2, 3, 10
mu = rng.randint(10) * rng.rand(n_components, n_features)
spherecv = rng.rand(n_components, 1) ** 2 + 1
X = rng.randint(10) * rng.rand(n_samples, n_features)
cv = np.tile(spherecv, (n_features, 1))
reference = _naive_lmvnpdf_diag(X, mu, cv)
lpr = mixture.log_multivariate_normal_density(X, mu, spherecv,
'spherical')
assert_array_almost_equal(lpr, reference)
def test_lmvnpdf_full():
n_features, n_components, n_samples = 2, 3, 10
mu = rng.randint(10) * rng.rand(n_components, n_features)
cv = (rng.rand(n_components, n_features) + 1.0) ** 2
X = rng.randint(10) * rng.rand(n_samples, n_features)
fullcv = np.array([np.diag(x) for x in cv])
reference = _naive_lmvnpdf_diag(X, mu, cv)
lpr = mixture.log_multivariate_normal_density(X, mu, fullcv, 'full')
assert_array_almost_equal(lpr, reference)
def test_lvmpdf_full_cv_non_positive_definite():
n_features, n_samples = 2, 10
rng = np.random.RandomState(0)
X = rng.randint(10) * rng.rand(n_samples, n_features)
mu = np.mean(X, 0)
cv = np.array([[[-1, 0], [0, 1]]])
expected_message = "'covars' must be symmetric, positive-definite"
assert_raise_message(ValueError, expected_message,
mixture.log_multivariate_normal_density,
X, mu, cv, 'full')
def test_GMM_attributes():
n_components, n_features = 10, 4
covariance_type = 'diag'
g = mixture.GMM(n_components, covariance_type, random_state=rng)
weights = rng.rand(n_components)
weights = weights / weights.sum()
means = rng.randint(-20, 20, (n_components, n_features))
assert_true(g.n_components == n_components)
assert_true(g.covariance_type == covariance_type)
g.weights_ = weights
assert_array_almost_equal(g.weights_, weights)
g.means_ = means
assert_array_almost_equal(g.means_, means)
covars = (0.1 + 2 * rng.rand(n_components, n_features)) ** 2
g.covars_ = covars
assert_array_almost_equal(g.covars_, covars)
assert_raises(ValueError, g._set_covars, [])
assert_raises(ValueError, g._set_covars,
np.zeros((n_components - 2, n_features)))
assert_raises(ValueError, mixture.GMM, n_components=20,
covariance_type='badcovariance_type')
class GMMTester():
do_test_eval = True
def _setUp(self):
self.n_components = 10
self.n_features = 4
self.weights = rng.rand(self.n_components)
self.weights = self.weights / self.weights.sum()
self.means = rng.randint(-20, 20, (self.n_components, self.n_features))
self.threshold = -0.5
self.I = np.eye(self.n_features)
self.covars = {
'spherical': (0.1 + 2 * rng.rand(self.n_components,
self.n_features)) ** 2,
'tied': (make_spd_matrix(self.n_features, random_state=0)
+ 5 * self.I),
'diag': (0.1 + 2 * rng.rand(self.n_components,
self.n_features)) ** 2,
'full': np.array([make_spd_matrix(self.n_features, random_state=0)
+ 5 * self.I for x in range(self.n_components)])}
def test_eval(self):
if not self.do_test_eval:
return # DPGMM does not support setting the means and
# covariances before fitting There is no way of fixing this
# due to the variational parameters being more expressive than
# covariance matrices
g = self.model(n_components=self.n_components,
covariance_type=self.covariance_type, random_state=rng)
# Make sure the means are far apart so responsibilities.argmax()
# picks the actual component used to generate the observations.
g.means_ = 20 * self.means
g.covars_ = self.covars[self.covariance_type]
g.weights_ = self.weights
gaussidx = np.repeat(np.arange(self.n_components), 5)
n_samples = len(gaussidx)
X = rng.randn(n_samples, self.n_features) + g.means_[gaussidx]
ll, responsibilities = g.score_samples(X)
self.assertEqual(len(ll), n_samples)
self.assertEqual(responsibilities.shape,
(n_samples, self.n_components))
assert_array_almost_equal(responsibilities.sum(axis=1),
np.ones(n_samples))
assert_array_equal(responsibilities.argmax(axis=1), gaussidx)
def test_sample(self, n=100):
g = self.model(n_components=self.n_components,
covariance_type=self.covariance_type, random_state=rng)
# Make sure the means are far apart so responsibilities.argmax()
# picks the actual component used to generate the observations.
g.means_ = 20 * self.means
g.covars_ = np.maximum(self.covars[self.covariance_type], 0.1)
g.weights_ = self.weights
samples = g.sample(n)
self.assertEqual(samples.shape, (n, self.n_features))
def test_train(self, params='wmc'):
g = mixture.GMM(n_components=self.n_components,
covariance_type=self.covariance_type)
g.weights_ = self.weights
g.means_ = self.means
g.covars_ = 20 * self.covars[self.covariance_type]
# Create a training set by sampling from the predefined distribution.
X = g.sample(n_samples=100)
g = self.model(n_components=self.n_components,
covariance_type=self.covariance_type,
random_state=rng, min_covar=1e-1,
n_iter=1, init_params=params)
g.fit(X)
# Do one training iteration at a time so we can keep track of
# the log likelihood to make sure that it increases after each
# iteration.
trainll = []
for _ in range(5):
g.params = params
g.init_params = ''
g.fit(X)
trainll.append(self.score(g, X))
g.n_iter = 10
g.init_params = ''
g.params = params
g.fit(X) # finish fitting
# Note that the log likelihood will sometimes decrease by a
# very small amount after it has more or less converged due to
# the addition of min_covar to the covariance (to prevent
# underflow). This is why the threshold is set to -0.5
# instead of 0.
delta_min = np.diff(trainll).min()
self.assertTrue(
delta_min > self.threshold,
"The min nll increase is %f which is lower than the admissible"
" threshold of %f, for model %s. The likelihoods are %s."
% (delta_min, self.threshold, self.covariance_type, trainll))
def test_train_degenerate(self, params='wmc'):
# Train on degenerate data with 0 in some dimensions
# Create a training set by sampling from the predefined distribution.
X = rng.randn(100, self.n_features)
X.T[1:] = 0
g = self.model(n_components=2, covariance_type=self.covariance_type,
random_state=rng, min_covar=1e-3, n_iter=5,
init_params=params)
g.fit(X)
trainll = g.score(X)
self.assertTrue(np.sum(np.abs(trainll / 100 / X.shape[1])) < 5)
def test_train_1d(self, params='wmc'):
# Train on 1-D data
# Create a training set by sampling from the predefined distribution.
X = rng.randn(100, 1)
# X.T[1:] = 0
g = self.model(n_components=2, covariance_type=self.covariance_type,
random_state=rng, min_covar=1e-7, n_iter=5,
init_params=params)
g.fit(X)
trainll = g.score(X)
if isinstance(g, mixture.DPGMM):
self.assertTrue(np.sum(np.abs(trainll / 100)) < 5)
else:
self.assertTrue(np.sum(np.abs(trainll / 100)) < 2)
def score(self, g, X):
return g.score(X).sum()
class TestGMMWithSphericalCovars(unittest.TestCase, GMMTester):
covariance_type = 'spherical'
model = mixture.GMM
setUp = GMMTester._setUp
class TestGMMWithDiagonalCovars(unittest.TestCase, GMMTester):
covariance_type = 'diag'
model = mixture.GMM
setUp = GMMTester._setUp
class TestGMMWithTiedCovars(unittest.TestCase, GMMTester):
covariance_type = 'tied'
model = mixture.GMM
setUp = GMMTester._setUp
class TestGMMWithFullCovars(unittest.TestCase, GMMTester):
covariance_type = 'full'
model = mixture.GMM
setUp = GMMTester._setUp
def test_multiple_init():
# Test that multiple inits does not much worse than a single one
X = rng.randn(30, 5)
X[:10] += 2
g = mixture.GMM(n_components=2, covariance_type='spherical',
random_state=rng, min_covar=1e-7, n_iter=5)
train1 = g.fit(X).score(X).sum()
g.n_init = 5
train2 = g.fit(X).score(X).sum()
assert_true(train2 >= train1 - 1.e-2)
def test_n_parameters():
# Test that the right number of parameters is estimated
n_samples, n_dim, n_components = 7, 5, 2
X = rng.randn(n_samples, n_dim)
n_params = {'spherical': 13, 'diag': 21, 'tied': 26, 'full': 41}
for cv_type in ['full', 'tied', 'diag', 'spherical']:
g = mixture.GMM(n_components=n_components, covariance_type=cv_type,
random_state=rng, min_covar=1e-7, n_iter=1)
g.fit(X)
assert_true(g._n_parameters() == n_params[cv_type])
def test_1d_1component():
# Test all of the covariance_types return the same BIC score for
# 1-dimensional, 1 component fits.
n_samples, n_dim, n_components = 100, 1, 1
X = rng.randn(n_samples, n_dim)
g_full = mixture.GMM(n_components=n_components, covariance_type='full',
random_state=rng, min_covar=1e-7, n_iter=1)
g_full.fit(X)
g_full_bic = g_full.bic(X)
for cv_type in ['tied', 'diag', 'spherical']:
g = mixture.GMM(n_components=n_components, covariance_type=cv_type,
random_state=rng, min_covar=1e-7, n_iter=1)
g.fit(X)
assert_array_almost_equal(g.bic(X), g_full_bic)
def assert_fit_predict_correct(model, X):
model2 = copy.deepcopy(model)
predictions_1 = model.fit(X).predict(X)
predictions_2 = model2.fit_predict(X)
assert adjusted_rand_score(predictions_1, predictions_2) == 1.0
def test_fit_predict():
"""
test that gmm.fit_predict is equivalent to gmm.fit + gmm.predict
"""
lrng = np.random.RandomState(101)
n_samples, n_dim, n_comps = 100, 2, 2
mu = np.array([[8, 8]])
component_0 = lrng.randn(n_samples, n_dim)
component_1 = lrng.randn(n_samples, n_dim) + mu
X = np.vstack((component_0, component_1))
for m_constructor in (mixture.GMM, mixture.VBGMM, mixture.DPGMM):
model = m_constructor(n_components=n_comps, covariance_type='full',
min_covar=1e-7, n_iter=5,
random_state=np.random.RandomState(0))
assert_fit_predict_correct(model, X)
model = mixture.GMM(n_components=n_comps, n_iter=0)
z = model.fit_predict(X)
assert np.all(z == 0), "Quick Initialization Failed!"
def test_aic():
# Test the aic and bic criteria
n_samples, n_dim, n_components = 50, 3, 2
X = rng.randn(n_samples, n_dim)
SGH = 0.5 * (X.var() + np.log(2 * np.pi)) # standard gaussian entropy
for cv_type in ['full', 'tied', 'diag', 'spherical']:
g = mixture.GMM(n_components=n_components, covariance_type=cv_type,
random_state=rng, min_covar=1e-7)
g.fit(X)
aic = 2 * n_samples * SGH * n_dim + 2 * g._n_parameters()
bic = (2 * n_samples * SGH * n_dim +
np.log(n_samples) * g._n_parameters())
bound = n_dim * 3. / np.sqrt(n_samples)
assert_true(np.abs(g.aic(X) - aic) / n_samples < bound)
assert_true(np.abs(g.bic(X) - bic) / n_samples < bound)
def check_positive_definite_covars(covariance_type):
r"""Test that covariance matrices do not become non positive definite
Due to the accumulation of round-off errors, the computation of the
covariance matrices during the learning phase could lead to non-positive
definite covariance matrices. Namely the use of the formula:
.. math:: C = (\sum_i w_i x_i x_i^T) - \mu \mu^T
instead of:
.. math:: C = \sum_i w_i (x_i - \mu)(x_i - \mu)^T
while mathematically equivalent, was observed a ``LinAlgError`` exception,
when computing a ``GMM`` with full covariance matrices and fixed mean.
This function ensures that some later optimization will not introduce the
problem again.
"""
rng = np.random.RandomState(1)
# we build a dataset with 2 2d component. The components are unbalanced
# (respective weights 0.9 and 0.1)
X = rng.randn(100, 2)
X[-10:] += (3, 3) # Shift the 10 last points
gmm = mixture.GMM(2, params="wc", covariance_type=covariance_type,
min_covar=1e-3)
# This is a non-regression test for issue #2640. The following call used
# to trigger:
# numpy.linalg.linalg.LinAlgError: 2-th leading minor not positive definite
gmm.fit(X)
if covariance_type == "diag" or covariance_type == "spherical":
assert_greater(gmm.covars_.min(), 0)
else:
if covariance_type == "tied":
covs = [gmm.covars_]
else:
covs = gmm.covars_
for c in covs:
assert_greater(np.linalg.det(c), 0)
def test_positive_definite_covars():
# Check positive definiteness for all covariance types
for covariance_type in ["full", "tied", "diag", "spherical"]:
yield check_positive_definite_covars, covariance_type
def test_verbose_first_level():
# Create sample data
X = rng.randn(30, 5)
X[:10] += 2
g = mixture.GMM(n_components=2, n_init=2, verbose=1)
old_stdout = sys.stdout
sys.stdout = StringIO()
try:
g.fit(X)
finally:
sys.stdout = old_stdout
def test_verbose_second_level():
# Create sample data
X = rng.randn(30, 5)
X[:10] += 2
g = mixture.GMM(n_components=2, n_init=2, verbose=2)
old_stdout = sys.stdout
sys.stdout = StringIO()
try:
g.fit(X)
finally:
sys.stdout = old_stdout
| bsd-3-clause |
Dapid/pywt | demo/benchmark.py | 2 | 1911 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import gc
import sys
import time
import numpy as np
import matplotlib.pyplot as plt
import pywt
if sys.platform == 'win32':
clock = time.clock
else:
clock = time.time
sizes = [20, 50, 100, 120, 150, 200, 250, 300, 400, 500, 600, 750,
1000, 2000, 3000, 4000, 5000, 6000, 7500,
10000, 15000, 20000, 25000, 30000, 40000, 50000, 75000,
100000, 150000, 200000, 250000, 300000, 400000, 500000,
600000, 750000, 1000000, 2000000, 5000000][:-4]
wavelet_names = ['db1', 'db2', 'db3', 'db4', 'db5', 'db6', 'db7',
'db8', 'db9', 'db10', 'sym10', 'coif1', 'coif2',
'coif3', 'coif4', 'coif5']
wavelets = [pywt.Wavelet(n) for n in wavelet_names]
mode = pywt.MODES.zpd
times_dwt = [[] for i in range(len(wavelets))]
times_idwt = [[] for i in range(len(wavelets))]
for j, size in enumerate(sizes):
data = np.ones((size,), dtype=np.float64)
print((("%d/%d" % (j + 1, len(sizes))).rjust(6), str(size).rjust(9)))
for i, w in enumerate(wavelets):
min_t1, min_t2 = 9999., 9999.
for _ in range(5):
# Repeat timing 5 times to reduce run-to-run variation
t1 = clock()
(a, d) = pywt.dwt(data, w, mode)
t1 = clock() - t1
min_t1 = min(t1, min_t1)
t2 = clock()
a0 = pywt.idwt(a, d, w, mode)
t2 = clock() - t2
min_t2 = min(t2, min_t2)
times_dwt[i].append(min_t1)
times_idwt[i].append(min_t2)
gc.collect()
for j, (times, name) in enumerate([(times_dwt, 'dwt'), (times_idwt, 'idwt')]):
fig = plt.figure(j)
ax = fig.add_subplot(111)
ax.set_title(name)
for i, n in enumerate(wavelet_names):
ax.loglog(sizes, times[i], label=n)
ax.legend(loc='best')
ax.set_xlabel('len(x)')
ax.set_ylabel('time [s]')
plt.show()
| mit |
jbogaardt/chainladder-python | chainladder/adjustments/parallelogram.py | 1 | 4390 | # This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at https://mozilla.org/MPL/2.0/.
from sklearn.base import BaseEstimator, TransformerMixin
from chainladder.core.io import EstimatorIO
class ParallelogramOLF(BaseEstimator, TransformerMixin, EstimatorIO):
"""
Estimator to create and apply on-level factors to a Triangle object. This
is commonly used for premium vectors expressed as a Triangle object.
Parameters
----------
rate_history : pd.DataFrame
A DataFrame with
change_col : str
The column containing the rate changes expressed as a decimal. For example,
5% decrease should be stated as -0.05
date_col : str
A list-like set of effective dates corresponding to each of the changes
vertical_line :
Rates are typically stated on an effective date basis and premiums on
and earned basis. By default, this argument is False and produces
parallelogram OLFs. If True, Parallelograms become squares. This is
commonly seen in Workers Compensation with benefit on-leveling or if
the premium origin is also stated on an effective date basis.
Attributes
----------
olf_ :
A triangle representation of the on-level factors
"""
def __init__(
self, rate_history=None, change_col="", date_col="", vertical_line=False
):
self.rate_history = rate_history
self.change_col = change_col
self.date_col = date_col
self.vertical_line = vertical_line
def fit(self, X, y=None, sample_weight=None):
"""Fit the model with X.
Parameters
----------
X : Triangle-like
Data to which the model will be applied.
y : Ignored
sample_weight : Ignored
Returns
-------
self : object
Returns the instance itself.
"""
from chainladder.utils.utility_functions import parallelogram_olf, concat
if X.array_backend == "sparse":
obj = X.set_backend("numpy")
else:
obj = X.copy()
groups = list(set(X.key_labels).intersection(self.rate_history.columns))
if len(groups) == 0:
idx = obj
else:
idx = obj.groupby(groups).sum()
kw = dict(
start_date=X.origin[0].to_timestamp(how="s"),
end_date=X.origin[-1].to_timestamp(how="e"),
grain=X.origin_grain,
vertical_line=self.vertical_line,
)
if len(groups) > 0:
tris = []
for item in idx.index.set_index(groups).iterrows():
r = self.rate_history.set_index(groups).loc[item[0]].copy()
r[self.change_col] = r[self.change_col] + 1
r = (r.groupby(self.date_col)[self.change_col].prod() - 1).reset_index()
date = r[self.date_col]
values = r[self.change_col]
olf = parallelogram_olf(values=values, date=date, **kw).values[
None, None
]
if X.array_backend == "cupy":
olf = X.get_array_module().array(olf)
tris.append((idx.loc[item[0]] * 0 + 1) * olf)
self.olf_ = concat(tris, 0).latest_diagonal
else:
r = self.rate_history.copy()
r[self.change_col] = r[self.change_col] + 1
r = (r.groupby(self.date_col)[self.change_col].prod() - 1).reset_index()
date = r[self.date_col]
values = r[self.change_col]
olf = parallelogram_olf(values=values, date=date, **kw)
self.olf_ = ((idx * 0 + 1) * olf.values[None, None]).latest_diagonal
return self
def transform(self, X, y=None, sample_weight=None):
""" If X and self are of different shapes, align self to X, else
return self.
Parameters
----------
X : Triangle
The triangle to be transformed
Returns
-------
X_new : New triangle with transformed attributes.
"""
X_new = X.copy()
triangles = ["olf_"]
for item in triangles:
setattr(X_new, item, getattr(self, item))
X_new._set_slicers()
return X_new
| mit |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.