repo_name
stringlengths 7
90
| path
stringlengths 5
191
| copies
stringlengths 1
3
| size
stringlengths 4
6
| content
stringlengths 976
581k
| license
stringclasses 15
values |
|---|---|---|---|---|---|
nrhine1/scikit-learn
|
sklearn/datasets/twenty_newsgroups.py
|
126
|
13591
|
"""Caching loader for the 20 newsgroups text classification dataset
The description of the dataset is available on the official website at:
http://people.csail.mit.edu/jrennie/20Newsgroups/
Quoting the introduction:
The 20 Newsgroups data set is a collection of approximately 20,000
newsgroup documents, partitioned (nearly) evenly across 20 different
newsgroups. To the best of my knowledge, it was originally collected
by Ken Lang, probably for his Newsweeder: Learning to filter netnews
paper, though he does not explicitly mention this collection. The 20
newsgroups collection has become a popular data set for experiments
in text applications of machine learning techniques, such as text
classification and text clustering.
This dataset loader will download the recommended "by date" variant of the
dataset and which features a point in time split between the train and
test sets. The compressed dataset size is around 14 Mb compressed. Once
uncompressed the train set is 52 MB and the test set is 34 MB.
The data is downloaded, extracted and cached in the '~/scikit_learn_data'
folder.
The `fetch_20newsgroups` function will not vectorize the data into numpy
arrays but the dataset lists the filenames of the posts and their categories
as target labels.
The `fetch_20newsgroups_vectorized` function will in addition do a simple
tf-idf vectorization step.
"""
# Copyright (c) 2011 Olivier Grisel <olivier.grisel@ensta.org>
# License: BSD 3 clause
import os
import logging
import tarfile
import pickle
import shutil
import re
import codecs
import numpy as np
import scipy.sparse as sp
from .base import get_data_home
from .base import Bunch
from .base import load_files
from ..utils import check_random_state
from ..feature_extraction.text import CountVectorizer
from ..preprocessing import normalize
from ..externals import joblib, six
if six.PY3:
from urllib.request import urlopen
else:
from urllib2 import urlopen
logger = logging.getLogger(__name__)
URL = ("http://people.csail.mit.edu/jrennie/"
"20Newsgroups/20news-bydate.tar.gz")
ARCHIVE_NAME = "20news-bydate.tar.gz"
CACHE_NAME = "20news-bydate.pkz"
TRAIN_FOLDER = "20news-bydate-train"
TEST_FOLDER = "20news-bydate-test"
def download_20newsgroups(target_dir, cache_path):
"""Download the 20 newsgroups data and stored it as a zipped pickle."""
archive_path = os.path.join(target_dir, ARCHIVE_NAME)
train_path = os.path.join(target_dir, TRAIN_FOLDER)
test_path = os.path.join(target_dir, TEST_FOLDER)
if not os.path.exists(target_dir):
os.makedirs(target_dir)
if os.path.exists(archive_path):
# Download is not complete as the .tar.gz file is removed after
# download.
logger.warning("Download was incomplete, downloading again.")
os.remove(archive_path)
logger.warning("Downloading dataset from %s (14 MB)", URL)
opener = urlopen(URL)
with open(archive_path, 'wb') as f:
f.write(opener.read())
logger.info("Decompressing %s", archive_path)
tarfile.open(archive_path, "r:gz").extractall(path=target_dir)
os.remove(archive_path)
# Store a zipped pickle
cache = dict(train=load_files(train_path, encoding='latin1'),
test=load_files(test_path, encoding='latin1'))
compressed_content = codecs.encode(pickle.dumps(cache), 'zlib_codec')
with open(cache_path, 'wb') as f:
f.write(compressed_content)
shutil.rmtree(target_dir)
return cache
def strip_newsgroup_header(text):
"""
Given text in "news" format, strip the headers, by removing everything
before the first blank line.
"""
_before, _blankline, after = text.partition('\n\n')
return after
_QUOTE_RE = re.compile(r'(writes in|writes:|wrote:|says:|said:'
r'|^In article|^Quoted from|^\||^>)')
def strip_newsgroup_quoting(text):
"""
Given text in "news" format, strip lines beginning with the quote
characters > or |, plus lines that often introduce a quoted section
(for example, because they contain the string 'writes:'.)
"""
good_lines = [line for line in text.split('\n')
if not _QUOTE_RE.search(line)]
return '\n'.join(good_lines)
def strip_newsgroup_footer(text):
"""
Given text in "news" format, attempt to remove a signature block.
As a rough heuristic, we assume that signatures are set apart by either
a blank line or a line made of hyphens, and that it is the last such line
in the file (disregarding blank lines at the end).
"""
lines = text.strip().split('\n')
for line_num in range(len(lines) - 1, -1, -1):
line = lines[line_num]
if line.strip().strip('-') == '':
break
if line_num > 0:
return '\n'.join(lines[:line_num])
else:
return text
def fetch_20newsgroups(data_home=None, subset='train', categories=None,
shuffle=True, random_state=42,
remove=(),
download_if_missing=True):
"""Load the filenames and data from the 20 newsgroups dataset.
Read more in the :ref:`User Guide <20newsgroups>`.
Parameters
----------
subset: 'train' or 'test', 'all', optional
Select the dataset to load: 'train' for the training set, 'test'
for the test set, 'all' for both, with shuffled ordering.
data_home: optional, default: None
Specify a download and cache folder for the datasets. If None,
all scikit-learn data is stored in '~/scikit_learn_data' subfolders.
categories: None or collection of string or unicode
If None (default), load all the categories.
If not None, list of category names to load (other categories
ignored).
shuffle: bool, optional
Whether or not to shuffle the data: might be important for models that
make the assumption that the samples are independent and identically
distributed (i.i.d.), such as stochastic gradient descent.
random_state: numpy random number generator or seed integer
Used to shuffle the dataset.
download_if_missing: optional, True by default
If False, raise an IOError if the data is not locally available
instead of trying to download the data from the source site.
remove: tuple
May contain any subset of ('headers', 'footers', 'quotes'). Each of
these are kinds of text that will be detected and removed from the
newsgroup posts, preventing classifiers from overfitting on
metadata.
'headers' removes newsgroup headers, 'footers' removes blocks at the
ends of posts that look like signatures, and 'quotes' removes lines
that appear to be quoting another post.
'headers' follows an exact standard; the other filters are not always
correct.
"""
data_home = get_data_home(data_home=data_home)
cache_path = os.path.join(data_home, CACHE_NAME)
twenty_home = os.path.join(data_home, "20news_home")
cache = None
if os.path.exists(cache_path):
try:
with open(cache_path, 'rb') as f:
compressed_content = f.read()
uncompressed_content = codecs.decode(
compressed_content, 'zlib_codec')
cache = pickle.loads(uncompressed_content)
except Exception as e:
print(80 * '_')
print('Cache loading failed')
print(80 * '_')
print(e)
if cache is None:
if download_if_missing:
cache = download_20newsgroups(target_dir=twenty_home,
cache_path=cache_path)
else:
raise IOError('20Newsgroups dataset not found')
if subset in ('train', 'test'):
data = cache[subset]
elif subset == 'all':
data_lst = list()
target = list()
filenames = list()
for subset in ('train', 'test'):
data = cache[subset]
data_lst.extend(data.data)
target.extend(data.target)
filenames.extend(data.filenames)
data.data = data_lst
data.target = np.array(target)
data.filenames = np.array(filenames)
else:
raise ValueError(
"subset can only be 'train', 'test' or 'all', got '%s'" % subset)
data.description = 'the 20 newsgroups by date dataset'
if 'headers' in remove:
data.data = [strip_newsgroup_header(text) for text in data.data]
if 'footers' in remove:
data.data = [strip_newsgroup_footer(text) for text in data.data]
if 'quotes' in remove:
data.data = [strip_newsgroup_quoting(text) for text in data.data]
if categories is not None:
labels = [(data.target_names.index(cat), cat) for cat in categories]
# Sort the categories to have the ordering of the labels
labels.sort()
labels, categories = zip(*labels)
mask = np.in1d(data.target, labels)
data.filenames = data.filenames[mask]
data.target = data.target[mask]
# searchsorted to have continuous labels
data.target = np.searchsorted(labels, data.target)
data.target_names = list(categories)
# Use an object array to shuffle: avoids memory copy
data_lst = np.array(data.data, dtype=object)
data_lst = data_lst[mask]
data.data = data_lst.tolist()
if shuffle:
random_state = check_random_state(random_state)
indices = np.arange(data.target.shape[0])
random_state.shuffle(indices)
data.filenames = data.filenames[indices]
data.target = data.target[indices]
# Use an object array to shuffle: avoids memory copy
data_lst = np.array(data.data, dtype=object)
data_lst = data_lst[indices]
data.data = data_lst.tolist()
return data
def fetch_20newsgroups_vectorized(subset="train", remove=(), data_home=None):
"""Load the 20 newsgroups dataset and transform it into tf-idf vectors.
This is a convenience function; the tf-idf transformation is done using the
default settings for `sklearn.feature_extraction.text.Vectorizer`. For more
advanced usage (stopword filtering, n-gram extraction, etc.), combine
fetch_20newsgroups with a custom `Vectorizer` or `CountVectorizer`.
Read more in the :ref:`User Guide <20newsgroups>`.
Parameters
----------
subset: 'train' or 'test', 'all', optional
Select the dataset to load: 'train' for the training set, 'test'
for the test set, 'all' for both, with shuffled ordering.
data_home: optional, default: None
Specify an download and cache folder for the datasets. If None,
all scikit-learn data is stored in '~/scikit_learn_data' subfolders.
remove: tuple
May contain any subset of ('headers', 'footers', 'quotes'). Each of
these are kinds of text that will be detected and removed from the
newsgroup posts, preventing classifiers from overfitting on
metadata.
'headers' removes newsgroup headers, 'footers' removes blocks at the
ends of posts that look like signatures, and 'quotes' removes lines
that appear to be quoting another post.
Returns
-------
bunch : Bunch object
bunch.data: sparse matrix, shape [n_samples, n_features]
bunch.target: array, shape [n_samples]
bunch.target_names: list, length [n_classes]
"""
data_home = get_data_home(data_home=data_home)
filebase = '20newsgroup_vectorized'
if remove:
filebase += 'remove-' + ('-'.join(remove))
target_file = os.path.join(data_home, filebase + ".pk")
# we shuffle but use a fixed seed for the memoization
data_train = fetch_20newsgroups(data_home=data_home,
subset='train',
categories=None,
shuffle=True,
random_state=12,
remove=remove)
data_test = fetch_20newsgroups(data_home=data_home,
subset='test',
categories=None,
shuffle=True,
random_state=12,
remove=remove)
if os.path.exists(target_file):
X_train, X_test = joblib.load(target_file)
else:
vectorizer = CountVectorizer(dtype=np.int16)
X_train = vectorizer.fit_transform(data_train.data).tocsr()
X_test = vectorizer.transform(data_test.data).tocsr()
joblib.dump((X_train, X_test), target_file, compress=9)
# the data is stored as int16 for compactness
# but normalize needs floats
X_train = X_train.astype(np.float64)
X_test = X_test.astype(np.float64)
normalize(X_train, copy=False)
normalize(X_test, copy=False)
target_names = data_train.target_names
if subset == "train":
data = X_train
target = data_train.target
elif subset == "test":
data = X_test
target = data_test.target
elif subset == "all":
data = sp.vstack((X_train, X_test)).tocsr()
target = np.concatenate((data_train.target, data_test.target))
else:
raise ValueError("%r is not a valid subset: should be one of "
"['train', 'test', 'all']" % subset)
return Bunch(data=data, target=target, target_names=target_names)
|
bsd-3-clause
|
dingocuster/scikit-learn
|
examples/bicluster/bicluster_newsgroups.py
|
162
|
7103
|
"""
================================================================
Biclustering documents with the Spectral Co-clustering algorithm
================================================================
This example demonstrates the Spectral Co-clustering algorithm on the
twenty newsgroups dataset. The 'comp.os.ms-windows.misc' category is
excluded because it contains many posts containing nothing but data.
The TF-IDF vectorized posts form a word frequency matrix, which is
then biclustered using Dhillon's Spectral Co-Clustering algorithm. The
resulting document-word biclusters indicate subsets words used more
often in those subsets documents.
For a few of the best biclusters, its most common document categories
and its ten most important words get printed. The best biclusters are
determined by their normalized cut. The best words are determined by
comparing their sums inside and outside the bicluster.
For comparison, the documents are also clustered using
MiniBatchKMeans. The document clusters derived from the biclusters
achieve a better V-measure than clusters found by MiniBatchKMeans.
Output::
Vectorizing...
Coclustering...
Done in 9.53s. V-measure: 0.4455
MiniBatchKMeans...
Done in 12.00s. V-measure: 0.3309
Best biclusters:
----------------
bicluster 0 : 1951 documents, 4373 words
categories : 23% talk.politics.guns, 19% talk.politics.misc, 14% sci.med
words : gun, guns, geb, banks, firearms, drugs, gordon, clinton, cdt, amendment
bicluster 1 : 1165 documents, 3304 words
categories : 29% talk.politics.mideast, 26% soc.religion.christian, 25% alt.atheism
words : god, jesus, christians, atheists, kent, sin, morality, belief, resurrection, marriage
bicluster 2 : 2219 documents, 2830 words
categories : 18% comp.sys.mac.hardware, 16% comp.sys.ibm.pc.hardware, 16% comp.graphics
words : voltage, dsp, board, receiver, circuit, shipping, packages, stereo, compression, package
bicluster 3 : 1860 documents, 2745 words
categories : 26% rec.motorcycles, 23% rec.autos, 13% misc.forsale
words : bike, car, dod, engine, motorcycle, ride, honda, cars, bmw, bikes
bicluster 4 : 12 documents, 155 words
categories : 100% rec.sport.hockey
words : scorer, unassisted, reichel, semak, sweeney, kovalenko, ricci, audette, momesso, nedved
"""
from __future__ import print_function
print(__doc__)
from collections import defaultdict
import operator
import re
from time import time
import numpy as np
from sklearn.cluster.bicluster import SpectralCoclustering
from sklearn.cluster import MiniBatchKMeans
from sklearn.externals.six import iteritems
from sklearn.datasets.twenty_newsgroups import fetch_20newsgroups
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.metrics.cluster import v_measure_score
def number_aware_tokenizer(doc):
""" Tokenizer that maps all numeric tokens to a placeholder.
For many applications, tokens that begin with a number are not directly
useful, but the fact that such a token exists can be relevant. By applying
this form of dimensionality reduction, some methods may perform better.
"""
token_pattern = re.compile(u'(?u)\\b\\w\\w+\\b')
tokens = token_pattern.findall(doc)
tokens = ["#NUMBER" if token[0] in "0123456789_" else token
for token in tokens]
return tokens
# exclude 'comp.os.ms-windows.misc'
categories = ['alt.atheism', 'comp.graphics',
'comp.sys.ibm.pc.hardware', 'comp.sys.mac.hardware',
'comp.windows.x', 'misc.forsale', 'rec.autos',
'rec.motorcycles', 'rec.sport.baseball',
'rec.sport.hockey', 'sci.crypt', 'sci.electronics',
'sci.med', 'sci.space', 'soc.religion.christian',
'talk.politics.guns', 'talk.politics.mideast',
'talk.politics.misc', 'talk.religion.misc']
newsgroups = fetch_20newsgroups(categories=categories)
y_true = newsgroups.target
vectorizer = TfidfVectorizer(stop_words='english', min_df=5,
tokenizer=number_aware_tokenizer)
cocluster = SpectralCoclustering(n_clusters=len(categories),
svd_method='arpack', random_state=0)
kmeans = MiniBatchKMeans(n_clusters=len(categories), batch_size=20000,
random_state=0)
print("Vectorizing...")
X = vectorizer.fit_transform(newsgroups.data)
print("Coclustering...")
start_time = time()
cocluster.fit(X)
y_cocluster = cocluster.row_labels_
print("Done in {:.2f}s. V-measure: {:.4f}".format(
time() - start_time,
v_measure_score(y_cocluster, y_true)))
print("MiniBatchKMeans...")
start_time = time()
y_kmeans = kmeans.fit_predict(X)
print("Done in {:.2f}s. V-measure: {:.4f}".format(
time() - start_time,
v_measure_score(y_kmeans, y_true)))
feature_names = vectorizer.get_feature_names()
document_names = list(newsgroups.target_names[i] for i in newsgroups.target)
def bicluster_ncut(i):
rows, cols = cocluster.get_indices(i)
if not (np.any(rows) and np.any(cols)):
import sys
return sys.float_info.max
row_complement = np.nonzero(np.logical_not(cocluster.rows_[i]))[0]
col_complement = np.nonzero(np.logical_not(cocluster.columns_[i]))[0]
weight = X[rows[:, np.newaxis], cols].sum()
cut = (X[row_complement[:, np.newaxis], cols].sum() +
X[rows[:, np.newaxis], col_complement].sum())
return cut / weight
def most_common(d):
"""Items of a defaultdict(int) with the highest values.
Like Counter.most_common in Python >=2.7.
"""
return sorted(iteritems(d), key=operator.itemgetter(1), reverse=True)
bicluster_ncuts = list(bicluster_ncut(i)
for i in range(len(newsgroups.target_names)))
best_idx = np.argsort(bicluster_ncuts)[:5]
print()
print("Best biclusters:")
print("----------------")
for idx, cluster in enumerate(best_idx):
n_rows, n_cols = cocluster.get_shape(cluster)
cluster_docs, cluster_words = cocluster.get_indices(cluster)
if not len(cluster_docs) or not len(cluster_words):
continue
# categories
counter = defaultdict(int)
for i in cluster_docs:
counter[document_names[i]] += 1
cat_string = ", ".join("{:.0f}% {}".format(float(c) / n_rows * 100, name)
for name, c in most_common(counter)[:3])
# words
out_of_cluster_docs = cocluster.row_labels_ != cluster
out_of_cluster_docs = np.where(out_of_cluster_docs)[0]
word_col = X[:, cluster_words]
word_scores = np.array(word_col[cluster_docs, :].sum(axis=0) -
word_col[out_of_cluster_docs, :].sum(axis=0))
word_scores = word_scores.ravel()
important_words = list(feature_names[cluster_words[i]]
for i in word_scores.argsort()[:-11:-1])
print("bicluster {} : {} documents, {} words".format(
idx, n_rows, n_cols))
print("categories : {}".format(cat_string))
print("words : {}\n".format(', '.join(important_words)))
|
bsd-3-clause
|
readevalprint/zipline
|
tests/test_utils.py
|
7
|
2173
|
#
# Copyright 2013 Quantopian, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from unittest import TestCase
from zipline.utils.factory import (load_from_yahoo,
load_bars_from_yahoo)
import pandas as pd
import pytz
import numpy as np
class TestFactory(TestCase):
def test_load_from_yahoo(self):
stocks = ['AAPL', 'GE']
start = pd.datetime(1993, 1, 1, 0, 0, 0, 0, pytz.utc)
end = pd.datetime(2002, 1, 1, 0, 0, 0, 0, pytz.utc)
data = load_from_yahoo(stocks=stocks, start=start, end=end)
assert data.index[0] == pd.Timestamp('1993-01-04 00:00:00+0000')
assert data.index[-1] == pd.Timestamp('2001-12-31 00:00:00+0000')
for stock in stocks:
assert stock in data.columns
np.testing.assert_raises(
AssertionError, load_from_yahoo, stocks=stocks,
start=end, end=start
)
def test_load_bars_from_yahoo(self):
stocks = ['AAPL', 'GE']
start = pd.datetime(1993, 1, 1, 0, 0, 0, 0, pytz.utc)
end = pd.datetime(2002, 1, 1, 0, 0, 0, 0, pytz.utc)
data = load_bars_from_yahoo(stocks=stocks, start=start, end=end)
assert data.major_axis[0] == pd.Timestamp('1993-01-04 00:00:00+0000')
assert data.major_axis[-1] == pd.Timestamp('2001-12-31 00:00:00+0000')
for stock in stocks:
assert stock in data.items
for ohlc in ['open', 'high', 'low', 'close', 'volume', 'price']:
assert ohlc in data.minor_axis
np.testing.assert_raises(
AssertionError, load_bars_from_yahoo, stocks=stocks,
start=end, end=start
)
|
apache-2.0
|
Sandia2014/intrepid
|
ArrayOfDotProducts/generatePlots.py
|
1
|
18894
|
import math
import os
import sys
import numpy
import scipy
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import matplotlib as mpl
import matplotlib.cm as cm
import csv
from mpl_toolkits.mplot3d import Axes3D
from numpy import log10
prefix = 'data/ArrayOfDotProducts_'
suffix = '_clearCache_shadowfax'
outputPrefix = 'figures/'
# read in all of the data.
# TODO: you'll need to disable everything that's not relevant here or it'll be angry about missing files
dotProductSize = numpy.loadtxt(open(prefix + 'dotProductSize' + suffix + '.csv','rb'),delimiter=',',skiprows=0)
memorySize = numpy.loadtxt(open(prefix + 'memorySize' + suffix + '.csv','rb'),delimiter=',',skiprows=0)
numberOfDotProducts = numpy.loadtxt(open(prefix + 'numberOfDotProducts' + suffix + '.csv','rb'),delimiter=',',skiprows=0)
serialTimes = numpy.loadtxt(open(prefix + 'serialTimes' + suffix + '.csv','rb'),delimiter=',',skiprows=0)
ompTimes = numpy.loadtxt(open(prefix + 'ompTimes' + suffix + '.csv','rb'),delimiter=',',skiprows=0)
cudaIndependentTimes = numpy.loadtxt(open(prefix + 'cudaIndependentTimes' + suffix + '.csv','rb'),delimiter=',',skiprows=0)
cudaReductionTimes = numpy.loadtxt(open(prefix + 'cudaReductionTimes' + suffix + '.csv','rb'),delimiter=',',skiprows=0)
cudaSwitchingTimes = numpy.loadtxt(open(prefix + 'cudaSwitchingTimes' + suffix + '.csv','rb'),delimiter=',',skiprows=0)
kokkosOmpTimes = numpy.loadtxt(open(prefix + 'kokkosOmpTimes' + suffix + '.csv','rb'),delimiter=',',skiprows=0)
kokkosCudaIndependentTimes = numpy.loadtxt(open(prefix + 'kokkosCudaIndependentTimes' + suffix + '.csv','rb'),delimiter=',',skiprows=0)
# set up a list of the times and names, for easy iteration later
# TODO: make this consistent with the files that you read in and/or care about
allTimes = []
allNames = []
# NOTE: if you are doing comparisons against serial time, it's assumed that the first entry in allTimes is serial
allTimes.append(serialTimes)
allNames.append('serial')
# NOTE: if you are doing comparisons against omp time, it's assumed that the second entry in allTimes is openmp. if you aren't doing those comparisons, you should go disable that portion of this script.
allTimes.append(ompTimes)
allNames.append('omp')
# NOTE: if you are doing comparisons against cuda time, it's assumed that the third entry in allTimes is cuda. if you aren't doing those comparisons, you should go disable that portion of this script.
allTimes.append(cudaIndependentTimes)
allNames.append('cudaIndependent')
# there are no assumptions about the rest of the ordering
allTimes.append(cudaReductionTimes)
allNames.append('cudaReduction')
allTimes.append(cudaSwitchingTimes)
allNames.append('cudaSwitching')
allTimes.append(kokkosOmpTimes)
allNames.append('kokkosOmp')
allTimes.append(kokkosCudaIndependentTimes)
allNames.append('kokkosCudaIndependent')
# these are toggles for whether to make image files and whether to make orbit files for making movies
makeImageFiles = True
#makeImageFiles = False
makeOrbitFilesForMovies = True
#makeOrbitFilesForMovies = False
numberOfOrbitFrames = 100
#markerPool = ['-', '--', ':']
markerPool = ['-', '--']
colors = cm.gist_ncar(numpy.linspace(1, 0, len(allTimes)))
markers = []
for i in range(len(allTimes)):
markers.append(markerPool[i % len(markerPool)])
fig3d = plt.figure(0)
fig2d = plt.figure(1, figsize=(14, 6))
ax2d = plt.subplot(111)
box2d = ax2d.get_position()
ax2d.set_position([box2d.x0, box2d.y0, box2d.width * 0.60, box2d.height])
bbox_to_anchor2d = (1.87, 0.5)
# make an image of just the number of dot products
# TODO: you might want to make an image of the number of cells, so you'd adjust this.
fig3d = plt.figure(0)
ax = fig3d.gca(projection='3d')
ax.view_init(elev=0, azim=-111)
surf = ax.plot_surface(log10(dotProductSize), log10(memorySize), log10(numberOfDotProducts), rstride=1, cstride=1, cmap=cm.coolwarm, linewidth=0.5, antialiased=False)
plt.xlabel('log10(dotProductSize)')
plt.ylabel('log10(memorySize)')
ax.set_zlabel('log10(numberOfDotProducts)')
plt.title('number of dot products')
if (makeImageFiles == True):
ax.view_init(elev=2, azim=-23)
filename = outputPrefix + 'NumberOfDotProducts' + suffix
plt.savefig(filename + '.pdf')
print 'saved file to %s' % filename
else:
plt.show()
# goal: make images showing just the raw times
# find the min and max values across all flavors so that the color scale is the same for each graph
maxValue = -10
minValue = 10
for timesIndex in numpy.arange(0, len(allTimes)):
maxValue = numpy.max([maxValue, numpy.max(log10(allTimes[timesIndex]))])
minValue = numpy.min([minValue, numpy.min(log10(allTimes[timesIndex]))])
# make the color scale
colorNormalizer = matplotlib.colors.Normalize(vmin=minValue, vmax=maxValue)
# for each time
for timesIndex in range(len(allTimes)):
# make a 3d plot
fig3d = plt.figure(0)
plt.clf()
times = allTimes[timesIndex]
name = allNames[timesIndex]
ax = fig3d.gca(projection='3d')
ax.view_init(elev=0, azim=-111)
surf = ax.plot_surface(log10(dotProductSize), log10(memorySize), log10(times), rstride=1, cstride=1, cmap=cm.coolwarm, linewidth=0.5, antialiased=False)
surf.set_norm(colorNormalizer)
plt.xlabel('log10(dotProductSize)')
plt.ylabel('log10(memorySize)')
ax.set_zlabel('log10(raw time) [seconds]')
ax.set_zlim([minValue, maxValue])
plt.title(name + ' raw time')
if (makeImageFiles == True):
ax.view_init(elev=2, azim=-23)
filename = outputPrefix + 'RawTimes_' + name + suffix
plt.savefig(filename + '.pdf')
print 'saved file to %s' % filename
else:
plt.show()
# make a 2D plot of all flavors, for the smallest and largest sizes of memory
fig2d = plt.figure(1)
for memorySizeIndex in [-1, 0]:
legendNames = []
plt.cla()
for timesIndex in range(len(allTimes)):
times = allTimes[timesIndex]
name = allNames[timesIndex]
plt.plot(dotProductSize[:, memorySizeIndex], times[:, memorySizeIndex], markers[timesIndex], color=colors[timesIndex], hold='on', linewidth=2)
legendNames.append(name)
plt.xscale('log')
plt.yscale('log')
plt.title('raw times for memory size %.2e' % memorySize[0, memorySizeIndex], fontsize=16)
plt.xlabel('dot product size', fontsize=16)
plt.ylabel('raw time [seconds]', fontsize=16)
plt.xlim([dotProductSize[0, 0], dotProductSize[-1, 0]])
ax2d.legend(legendNames, loc='center right', bbox_to_anchor=bbox_to_anchor2d)
if (makeImageFiles == True):
sizeDescription = 'largestSize' if (memorySizeIndex == -1) else 'smallestSize'
filename = outputPrefix + 'RawTimes_2d_' + sizeDescription + suffix
plt.savefig(filename + '.pdf')
print 'saved file to %s' % filename
else:
plt.show()
# now make plots that are normalized by memory size
maxValue = -10
minValue = 10
for timesIndex in numpy.arange(0, len(allTimes)):
maxValue = numpy.max([maxValue, numpy.max(log10(allTimes[timesIndex] / memorySize))])
minValue = numpy.min([minValue, numpy.min(log10(allTimes[timesIndex] / memorySize))])
colorNormalizer = matplotlib.colors.Normalize(vmin=minValue, vmax=maxValue)
for timesIndex in range(len(allTimes)):
fig3d = plt.figure(0)
plt.clf()
times = allTimes[timesIndex]
name = allNames[timesIndex]
ax = fig3d.gca(projection='3d')
ax.view_init(elev=0, azim=-111)
surf = ax.plot_surface(log10(dotProductSize), log10(memorySize), log10(times / memorySize), rstride=1, cstride=1, cmap=cm.coolwarm, linewidth=0.5, antialiased=False)
surf.set_norm(colorNormalizer)
plt.xlabel('log10(dotProductSize)')
plt.ylabel('log10(memorySize)')
ax.set_zlabel('log10(normalized time [seconds / memorySize])')
ax.set_zlim([minValue, maxValue])
plt.title(name + ' normalized time')
if (makeImageFiles == True):
ax.view_init(elev=2, azim=-23)
filename = outputPrefix + 'NormalizedTime_' + name + suffix
plt.savefig(filename + '.pdf')
print 'saved file to %s' % filename
# possibly make orbit plots for movies
if (makeOrbitFilesForMovies == True):
for frameIndex in range(numberOfOrbitFrames):
ax.view_init(elev=2, azim=360 * frameIndex / (numberOfOrbitFrames - 1))
filename = outputPrefix + 'orbitFrames/NormalizedTime_' + name + suffix + '_%02d.pdf' % frameIndex
plt.savefig(filename)
print 'saved file to %s' % filename
else:
plt.show()
# now make relative speedups over serial
maxSpeedup = -10
minSpeedup = 10
for timesIndex in numpy.arange(1, len(allTimes)):
maxSpeedup = numpy.max([maxSpeedup, numpy.max(log10(allTimes[0] / allTimes[timesIndex]))])
minSpeedup = numpy.min([minSpeedup, numpy.min(log10(allTimes[0] / allTimes[timesIndex]))])
colorNormalizer = matplotlib.colors.Normalize(vmin=minSpeedup, vmax=maxSpeedup)
# intentionally start at 1 so that i don't compare serial to serial
for timesIndex in numpy.arange(1, len(allTimes)):
fig3d = plt.figure(0)
plt.clf()
times = allTimes[timesIndex]
name = allNames[timesIndex]
ax = fig3d.gca(projection='3d')
ax.view_init(elev=0, azim=-111)
surf = ax.plot_surface(log10(dotProductSize), log10(memorySize), log10(allTimes[0] / times), rstride=1, cstride=1, cmap=cm.coolwarm, linewidth=0.5, antialiased=False)
surf.set_norm(colorNormalizer)
plt.xlabel('log10(dotProductSize)')
plt.ylabel('log10(memorySize)')
ax.set_zlabel('log10(speedup) [unitless]')
ax.set_zlim([minSpeedup, maxSpeedup])
plt.title(name + ' speedup over serial')
if (makeImageFiles == True):
ax.view_init(elev=2, azim=-23)
filename = outputPrefix + 'VersusSerial_' + name + suffix
plt.savefig(filename + '.pdf')
print 'saved file to %s' % filename
if (makeOrbitFilesForMovies == True and timesIndex > 0):
for frameIndex in range(numberOfOrbitFrames):
ax.view_init(elev=2, azim=360 * frameIndex / (numberOfOrbitFrames - 1))
filename = outputPrefix + 'orbitFrames/VersusSerial_' + name + suffix + '_%02d.pdf' % frameIndex
plt.savefig(filename)
print 'saved file to %s' % filename
else:
plt.show()
fig2d = plt.figure(1)
for memorySizeIndex in [-1, 0]:
legendNames = []
plt.cla()
for timesIndex in range(len(allTimes)):
times = allTimes[timesIndex]
name = allNames[timesIndex]
plt.plot(dotProductSize[:, memorySizeIndex], allTimes[0][:, memorySizeIndex] / times[:, memorySizeIndex], markers[timesIndex], color=colors[timesIndex], hold='on', linewidth=2)
legendNames.append(name)
plt.xscale('log')
plt.yscale('log')
plt.title('speedup over serial for memory size %.2e' % memorySize[0, memorySizeIndex], fontsize=16)
plt.xlabel('dot product size', fontsize=16)
plt.ylabel('speedup [unitless]', fontsize=16)
#plt.ylim([0, 6])
plt.xlim([dotProductSize[0, 0], dotProductSize[-1, 0]])
ax2d.legend(legendNames, loc='center right', bbox_to_anchor=bbox_to_anchor2d)
if (makeImageFiles == True):
sizeDescription = 'largestSize' if (memorySizeIndex == -1) else 'smallestSize'
filename = outputPrefix + 'VersusSerial_2d_' + sizeDescription + suffix
plt.savefig(filename + '.pdf')
print 'saved file to %s' % filename
else:
plt.show()
# now make relative speedup over openmp
# TODO: you might disable this part
maxSpeedup = -10
minSpeedup = 10
for timesIndex in numpy.arange(2, len(allTimes)):
maxSpeedup = numpy.max([maxSpeedup, numpy.max(log10(allTimes[1] / allTimes[timesIndex]))])
minSpeedup = numpy.min([minSpeedup, numpy.min(log10(allTimes[1] / allTimes[timesIndex]))])
colorNormalizer = matplotlib.colors.Normalize(vmin=minSpeedup, vmax=maxSpeedup)
# intentionally start at 2 so that i don't compare serial or omp to omp
for timesIndex in numpy.arange(2, len(allTimes)):
fig3d = plt.figure(0)
plt.clf()
times = allTimes[timesIndex]
name = allNames[timesIndex]
ax = fig3d.gca(projection='3d')
ax.view_init(elev=0, azim=-111)
surf = ax.plot_surface(log10(dotProductSize), log10(memorySize), log10(allTimes[1] / times), rstride=1, cstride=1, cmap=cm.coolwarm, linewidth=0.5, antialiased=False)
surf.set_norm(colorNormalizer)
plt.xlabel('log10(dotProductSize)')
plt.ylabel('log10(memorySize)')
ax.set_zlabel('log10(speedup) [unitless]')
ax.set_zlim([minSpeedup, maxSpeedup])
plt.title(name + ' speedup over omp')
if (makeImageFiles == True):
ax.view_init(elev=2, azim=-23)
filename = outputPrefix + 'VersusOmp_' + name + suffix
plt.savefig(filename + '.pdf')
print 'saved file to %s' % filename
if (makeOrbitFilesForMovies == True and timesIndex > 1):
for frameIndex in range(numberOfOrbitFrames):
ax.view_init(elev=2, azim=360 * frameIndex / (numberOfOrbitFrames - 1))
filename = outputPrefix + 'orbitFrames/VersusOmp_' + name + suffix + '_%02d.pdf' % frameIndex
plt.savefig(filename)
print 'saved file to %s' % filename
else:
plt.show()
fig2d = plt.figure(1)
for memorySizeIndex in [-1, 0]:
legendNames = []
plt.cla()
for timesIndex in range(len(allTimes)):
times = allTimes[timesIndex]
name = allNames[timesIndex]
plt.plot(dotProductSize[:, memorySizeIndex], allTimes[1][:, memorySizeIndex] / times[:, memorySizeIndex], markers[timesIndex], color=colors[timesIndex], hold='on', linewidth=2)
legendNames.append(name)
plt.xscale('log')
plt.yscale('log')
plt.title('speedup over openmp for memory size %.2e' % memorySize[0, memorySizeIndex], fontsize=16)
plt.xlabel('dot product size', fontsize=16)
plt.ylabel('speedup [unitless]', fontsize=16)
plt.xlim([dotProductSize[0, 0], dotProductSize[-1, 0]])
ax2d.legend(legendNames, loc='center right', bbox_to_anchor=bbox_to_anchor2d)
if (makeImageFiles == True):
sizeDescription = 'largestSize' if (memorySizeIndex == -1) else 'smallestSize'
filename = outputPrefix + 'VersusOmp_2d_' + sizeDescription + suffix
plt.savefig(filename + '.pdf')
print 'saved file to %s' % filename
else:
plt.show()
# relative speedup over cudaIndependent
# TODO: you might disable this part
maxSpeedup = -10
minSpeedup = 10
for timesIndex in numpy.arange(3, len(allTimes)):
maxSpeedup = numpy.max([maxSpeedup, numpy.max(log10(allTimes[2] / allTimes[timesIndex]))])
minSpeedup = numpy.min([minSpeedup, numpy.min(log10(allTimes[2] / allTimes[timesIndex]))])
colorNormalizer = matplotlib.colors.Normalize(vmin=minSpeedup, vmax=maxSpeedup)
# intentionally start at 3 so that i don't compare cuda or serial or omp to cuda
for timesIndex in numpy.arange(3, len(allTimes)):
fig3d = plt.figure(0)
plt.clf()
times = allTimes[timesIndex]
name = allNames[timesIndex]
ax = fig3d.gca(projection='3d')
ax.view_init(elev=0, azim=-111)
surf = ax.plot_surface(log10(dotProductSize), log10(memorySize), log10(allTimes[2] / times), rstride=1, cstride=1, cmap=cm.coolwarm, linewidth=0.5, antialiased=False)
surf.set_norm(colorNormalizer)
plt.xlabel('log10(dotProductSize)')
plt.ylabel('log10(memorySize)')
ax.set_zlabel('log10(speedup) [unitless]')
ax.set_zlim([minSpeedup, maxSpeedup])
plt.title(name + ' speedup over cudaIndependent')
if (makeImageFiles == True):
ax.view_init(elev=2, azim=-23)
filename = outputPrefix + 'VersusCudaIndependent_' + name + suffix
plt.savefig(filename + '.pdf')
print 'saved file to %s' % filename
if (makeOrbitFilesForMovies == True and timesIndex > 2):
for frameIndex in range(numberOfOrbitFrames):
ax.view_init(elev=2, azim=360 * frameIndex / (numberOfOrbitFrames - 1))
filename = outputPrefix + 'orbitFrames/VersusCudaIndependent_' + name + suffix + '_%02d.pdf' % frameIndex
plt.savefig(filename)
print 'saved file to %s' % filename
else:
plt.show()
fig2d = plt.figure(1)
for memorySizeIndex in [-1, 0]:
legendNames = []
plt.cla()
for timesIndex in range(len(allTimes)):
times = allTimes[timesIndex]
name = allNames[timesIndex]
plt.plot(dotProductSize[:, memorySizeIndex], allTimes[2][:, memorySizeIndex] / times[:, memorySizeIndex], markers[timesIndex], color=colors[timesIndex], hold='on', linewidth=2)
legendNames.append(name)
plt.xscale('log')
plt.yscale('log')
plt.title('speedup over cuda independent for memory size %.2e' % memorySize[0, memorySizeIndex], fontsize=16)
plt.xlabel('dot product size', fontsize=16)
plt.ylabel('speedup [unitless]', fontsize=16)
plt.xlim([dotProductSize[0, 0], dotProductSize[-1, 0]])
ax2d.legend(legendNames, loc='center right', bbox_to_anchor=bbox_to_anchor2d)
if (makeImageFiles == True):
sizeDescription = 'largestSize' if (memorySizeIndex == -1) else 'smallestSize'
filename = outputPrefix + 'VersusCudaIndependent_2d_' + sizeDescription + suffix
plt.savefig(filename + '.pdf')
print 'saved file to %s' % filename
else:
plt.show()
# these graphs are essentially duplicates of ones made already, but with a linear scale instead of logarithmic (by request of carter).
# these graphs just compare kokkos omp versus openmp and kokkos cuda versus cuda
# omp
fig3d = plt.figure(0)
plt.clf()
ax = fig3d.gca(projection='3d')
ax.view_init(elev=0, azim=-111)
surf = ax.plot_surface(log10(dotProductSize), log10(memorySize), (allTimes[1] / allTimes[5]), rstride=1, cstride=1, cmap=cm.coolwarm, linewidth=0.5, antialiased=False)
plt.xlabel('log10(dotProductSize)')
plt.ylabel('log10(memorySize)')
ax.set_zlabel('speedup [unitless]')
plt.title('kokkos omp speedup over omp')
if (makeImageFiles == True):
ax.view_init(elev=2, azim=-23)
filename = outputPrefix + 'VersusOmp_kokkosOmp_linear' + suffix
plt.savefig(filename + '.pdf')
print 'saved file to %s' % filename
if (makeOrbitFilesForMovies == True):
for frameIndex in range(numberOfOrbitFrames):
ax.view_init(elev=2, azim=360 * frameIndex / (numberOfOrbitFrames - 1))
filename = outputPrefix + 'orbitFrames/VersusOmp_kokkosOmp_linear' + suffix + '_%02d.pdf' % frameIndex
plt.savefig(filename)
print 'saved file to %s' % filename
else:
plt.show()
# cuda
fig3d = plt.figure(0)
plt.clf()
ax = fig3d.gca(projection='3d')
ax.view_init(elev=0, azim=-111)
surf = ax.plot_surface(log10(dotProductSize), log10(memorySize), (allTimes[2] / allTimes[6]), rstride=1, cstride=1, cmap=cm.coolwarm, linewidth=0.5, antialiased=False)
plt.xlabel('log10(dotProductSize)')
plt.ylabel('log10(memorySize)')
ax.set_zlabel('speedup [unitless]')
plt.title('kokkos cuda speedup over cuda')
if (makeImageFiles == True):
ax.view_init(elev=2, azim=-23)
filename = outputPrefix + 'VersusCudaIndependent_kokkosCudaIndependent_linear' + suffix
plt.savefig(filename + '.pdf')
print 'saved file to %s' % filename
if (makeOrbitFilesForMovies == True):
for frameIndex in range(numberOfOrbitFrames):
ax.view_init(elev=2, azim=360 * frameIndex / (numberOfOrbitFrames - 1))
filename = outputPrefix + 'orbitFrames/VersusCudaIndependent_kokkosCudaIndependent_linear' + suffix + '_%02d.pdf' % frameIndex
plt.savefig(filename)
print 'saved file to %s' % filename
else:
plt.show()
|
mit
|
tmthydvnprt/pfcompute
|
pf/io.py
|
1
|
21872
|
"""
io.py
Input and Output functions.
project : pf
version : 0.0.0
status : development
modifydate :
createdate :
website : https://github.com/tmthydvnprt/pf
author : tmthydvnprt
email : tim@tmthydvnprt.com
maintainer : tmthydvnprt
license : MIT
copyright : Copyright 2016, tmthydvnprt
credits :
"""
from __future__ import division
import re
import os
import glob
import cStringIO
import numpy as np
import pandas as pd
from pdfminer.pdfparser import PDFParser
from pdfminer.pdfdocument import PDFDocument
from pdfminer.pdfpage import PDFPage, PDFTextExtractionNotAllowed
from pdfminer.converter import TextConverter
from pdfminer.pdfinterp import PDFResourceManager, PDFPageInterpreter
from pdfminer.layout import LAParams
from pf.constants import DATE_RE
from pf.util import read_date_csv_file
################################################################################################################################
# Account Functions
################################################################################################################################
def clean_accounts(accounts=None):
"""
Clean account data for any user reason necessary. This is called at the end of `read_in_accounts()`. This is optional and
will always need to be overriden by the user.
After importing the module you may override the function with the following definition and assignment:
```
import pf.io
def clean_accounts(accounts):
# Do something cleaning here
return accounts
# Override user functions
pf.io.clean_accounts = clean_accounts
accounts = pf.io.read_in_accounts('/path/to/accounts/file')
```
"""
return accounts
def read_in_accounts(filepath=''):
"""
Read in the data file containing monlhty account balances, credit card limits, and miscallaneous loans.
This should be an simple excel with sheets for each section, or the function should overriden by the user.
The sheets should be in the following form:
```
Accounts sheet:
______________________________________________________________________________________________
| | Cash | Credit | Investment | Loan | Property |
|---------|---------------------|-------------------|------------|----------------|----------|
| | Ally Online Savings | BofA Cash Rewards | Motif | Student Loan 1 | Car |
|---------|---------------------|-------------------|------------|----------------|----------|
| 09/2016 | 1000.00 | -500.00 | 5000.00 | -10000.00 | 5000.00 |
|---------|---------------------|-------------------|------------|----------------|----------|
| 10/2016 | 2000.00 | -550.00 | 6000.00 | -9000.00 | 5000.00 |
|---------|---------------------|-------------------|------------|----------------|----------|
| 11/2016 | 3000.00 | -450.00 | 7000.00 | -8000.00 | 5000.00 |
|---------|---------------------|-------------------|------------|----------------|----------|
|_________|_____________________|___________________|____________|________________|__________|
Limits sheet: Loans sheet:
________________________________________________ ____________________________
| | Credit | Loan | | | Loan |
|---------|-------------------|----------------| |---------|----------------|
| | BofA Cash Rewards | Student Loan 1 | | | 401(k) Loan |
|---------|-------------------|----------------| |---------|----------------|
| 09/2016 | -5000.00 | -20000.00 | | 09/2016 | -20000.00 |
|---------|-------------------|----------------| |---------|----------------|
| 10/2016 | -5000.00 | -20000.00 | | 10/2016 | -19500.00 |
|---------|-------------------|----------------| |---------|----------------|
| 11/2016 | -6000.00 | -20000.00 | | 11/2016 | -19000.00 |
|---------|-------------------|----------------| |---------|----------------|
|_________|___________________|________________| |_________|________________|
Sales Tax sheet:
__________________________________________
| | Sales Tax | Sales Tax |
|---------|-------------|----------------|
| | Location 1 | Location 2 |
|---------|-------------|----------------|
| 08/2016 | 7.75% | |
|---------|-------------|----------------|
| 09/2016 | 7.75% | |
|---------|-------------|----------------|
| 10/2016 | | 8.00% |
|---------|-------------|----------------|
| 11/2016 | | 8.00% |
|---------|-------------|----------------|
|_________|_____________|________________|
```
Example:
```
accounts, limits, loan, taxes, salestax = read_in_accounts('/path/to/estate.xlsx')
```
"""
# Read in account data as excel
xlsx = pd.read_excel(
filepath,
sheetname=None,
index_col=0,
header=[0, 1]
)
# Separate out worksheet and fill NaNs
accounts = xlsx['Accounts'].fillna(0.0)
limits = xlsx['Limits'].fillna(0.0)
loan = xlsx['Loans'].fillna(0.0)
incometaxes = xlsx['Income Taxes'].fillna(0.0)
salestax = xlsx['Sales Tax'].fillna(0.0)
# Set Index to DatetimeIndex
accounts.index = pd.to_datetime(accounts.index, format='%m/%Y').to_period('M').to_timestamp('M')
limits.index = pd.to_datetime(limits.index, format='%m/%Y').to_period('M').to_timestamp('M')
loan.index = pd.to_datetime(loan.index, format='%m/%Y').to_period('M').to_timestamp('M')
incometaxes.index = pd.to_datetime(incometaxes.index, format='%m/%Y').to_period('M').to_timestamp('M')
salestax.index = pd.to_datetime(salestax.index, format='%m/%Y').to_period('M').to_timestamp('M')
# Set Index name for later
accounts.index.name = 'Date'
limits.index.name = 'Date'
loan.index.name = 'Date'
incometaxes.index.name = 'Date'
salestax.index.name = 'Date'
# Clean up account data by user
accounts = clean_accounts(accounts)
return (accounts, limits, loan, incometaxes, salestax)
################################################################################################################################
# Transaction Functions
################################################################################################################################
def clean_transactions(transactions=None):
"""
Clean transaction data for any user reason necessary. This is called at the end of `read_in_transactions()`, this will
always need to be overriden by the user.
After importing the module you may override the function with the following definition and assignment:
```
import pf.io
def clean_transactions(transactions):
# Do some cleaning here
return transactions
# Override user functions
pf.io.clean_transactions = clean_transactions
transactions = pf.io.read_in_transactions('/path/to/transactions/file')
"""
return transactions
def read_in_transactions(filepath='', labels=None):
"""
Read in the data file containing all transaction details, this should be a `csv` file of transactions (e.g. `csv` exported
from mint.com) or the function should overriden by the user.
The `csv` file should be in the following form:
```
Date, Description, Original Description, Amount, Transaction Type, Category, Account Name, Labels, Notes
2016-11-20, Trader Joe's, TRADER JOES #110, $23.40, debit, Groceries, Amex Blue Cash Everyday,,
...
```
Example:
```
transactions = read_in_transactions('/path/to/transactions.csv')
```
"""
# Read Transaction info
transactions = read_date_csv_file(filepath)
# Process labels
transactions['Labels'] = [{label for label in labels
if label in str(transaction)}
for transaction in transactions['Labels']]
# Set debit transactions as negative
debit_index = transactions['Transaction Type'] == 'debit'
transactions.loc[debit_index, 'Amount'] = -1.0 * transactions.loc[debit_index, 'Amount']
# Drop unnecessary columns
transactions = transactions.drop(['Notes', 'Transaction Type'], 1)
# Fill NaNs
transactions = transactions.fillna(0.0)
# Clean up transaction data by user
transactions = clean_transactions(transactions)
return transactions
################################################################################################################################
# Paycheck Functions
################################################################################################################################
def set_paycheck_sign(paychecks, paycheck_negative_categories):
"""
Set the sign of each paycheck category. Paycheck reading make all values absolute for ease,
negative signed values will need to be reset. `paycheck_negative_categories` is a list of column names.
"""
# Set sign of each column
sign = pd.Series(np.ones(len(paychecks.columns)), index=paychecks.columns)
sign[paycheck_negative_categories] = -1
paychecks = sign * paychecks
return paychecks
def standardize_paycheck(paychecks, categories):
"""
Convert user's paycheck categories into standardized categories used in the rest of processing.
Categories is a dictionary of user category keys with standard category values. Duplicate
standard categories will be summed. A standard category value of "drop" will ignore that user
category in resulting output.
Standard Categories should be:
salary
straight
overtime
bonus
paid time off
holiday
reimbursement
401k loan
pre tax retire
post tax retire
pre tax deductions
post tax deductions
total post tax
totat pre tax
gross
state tax
federal tax
total tax
net
employer match
employer retire
"""
# Create empty DataFrame
std_paychecks = pd.DataFrame([], index=paychecks.index)
for col in paychecks.columns:
std_col = categories[col]
if std_col != "drop":
# If category already exists add to category
if std_col in std_paychecks.columns:
std_paychecks[std_col] += paychecks[col]
# Or create new category
else:
std_paychecks[std_col] = paychecks[col]
return std_paychecks
def paycheck_parser(paychecks_dict=None):
"""
User defined function to convert dictonary of paycheck list of lists into a DataFrame. You could always replace of the whole
function or you can use the default one as a starting point. This fuction is called from within `read_in_paychecks()`.
Necessary user defined parts are sectioned off with `#####`.
"""
#pylint: disable=too-many-branches,too-many-statements,too-many-locals
# User Defined Constants like precompiled regex
############################################################################################################################
check_date_re = re.compile(r'.*Check Date: +(.*?) +.*')
baserate_re = re.compile(r'(.*) Base Rate:')
top_re = re.compile(r'Total Gross')
earnings_re = re.compile(r'Earnings')
end_re = re.compile(r'Total:')
deductions_re = re.compile(r'Description')
############################################################################################################################
# Loop thru dictionary (these lines are generic)
paychecks = []
for _, paycheck_text in paychecks_dict.items():
# Parse data into dictionary of 'fields'
df = {}
# User Defined parsing 'flags' (usefull to have different parsing logic for different sections)
# and internal or intermediate variables (usefull for saving data across rows without putting in final dictionary)
########################################################################################################################
t = -1
t0 = -1
t1 = -1
t2 = -1
in_base = False
in_top = False
in_earnings = False
in_deductions = False
baserate = ''
earnings_table = []
taxes_table = []
pre_table = []
post_table = []
other_table = []
########################################################################################################################
lines = [line for line in paycheck_text.split('\n') if line.strip()]
# Process each row of paycheck
for line in lines:
line = str(line)
# User Defined Parser Logic, this will be totally dependent on the format of your paycheck pdf... good luck!
####################################################################################################################
if in_base:
number = float(line.strip().replace(',', ''))
df['Base Rate'] = number * 80.0 if baserate == 'Hourly' else number
in_base = False
elif in_top:
row = line.split()
row = [row[0]] + [float(r.replace(',', '')) for r in row[1:]]
try:
df['Total Gross'] = row[1]
df['Fed Taxable Gross'] = row[2]
df['OASDI Gross'] = row[3]
df['MEDI Gross'] = row[4]
df['Net Pay'] = row[5]
except:
pass
in_top = False
elif in_earnings:
end_earnings = end_re.search(line)
# Parse tables
earnings = line[:t]
taxes = line[t:]
row0 = [earnings[:e+1].split(' ')[-1].strip().replace(',', '') for e in earnings_table]
row1 = [taxes[:e+1].split(' ')[-1].strip().replace(',', '') for e in taxes_table]
key0 = earnings.split(' ')[0]
key1 = taxes.split(' ')[0]
if end_earnings:
in_earnings = False
key0 = key0.replace(':', '') + ' Pay'
key1 = key1.replace(':', '') + ' Tax'
if key0:
df[key0] = float(row0[2]) if row0[2] else 0.0
if key1:
df[key1] = float(row1[1]) if row1[1] else 0.0
elif in_deductions:
end_deductions = end_re.search(line)
# Parse tables
pre = line[:t1]
post = line[t1:t2]
other = line[t2:]
row0 = [pre[:e+1].split(' ')[-1].strip().replace(',', '') for e in pre_table]
row1 = [post[:e+1].split(' ')[-1].strip().replace(',', '') for e in post_table]
row2 = [other[:e+1].split(' ')[-1].strip().replace(',', '') for e in other_table]
key0 = pre.split(' ')[0]
key1 = post.split(' ')[0]
key2 = other.split(' ')[0]
if end_deductions:
in_deductions = False
key0 = key0.replace(':', '') + ' Before Tax'
key1 = key1.replace(':', '') + ' After Tax'
key2 = key2.replace(':', '') + ' Other Tax'
if key0:
df[key0] = float(row0[1]) if row0[1] else 0.0
if key1:
df[key1] = float(row1[1]) if row1[1] else 0.0
if key2:
df[key2] = float(row2[1]) if row2[1] else 0.0
# Regular parsing
else:
# Search for Strings
date_search = check_date_re.search(line)
baserate_search = baserate_re.search(line)
top_search = top_re.search(line)
earnings_search = earnings_re.search(line)
deductions_search = deductions_re.search(line)
# Set flags and parse out table positions
if date_search:
df['Date'] = pd.to_datetime(date_search.groups()[0])
if baserate_search:
baserate = baserate_search.groups()[0]
in_base = True
if top_search:
in_top = True
if earnings_search:
t = line.find('Taxes')
earnings = line[:t]
taxes = line[t:]
earnigs_enum_zip = enumerate(zip(earnings[:-2], earnings[1:-1], earnings[2:]))
taxes_enum_zip = enumerate(zip(taxes[:-2], taxes[1:-1], taxes[2:]))
earnings_table = [i for i, (c0, c1, c2) in earnigs_enum_zip if c0 != ' ' and c1 + c2 == ' ']
taxes_table = [i for i, (c0, c1, c2) in taxes_enum_zip if c0 != ' ' and c1 + c2 == ' ']
in_earnings = True
if deductions_search:
t0 = line.find('Description')
t1 = line.find('Description', t0 + 1)
t2 = line.find('Description', t1 + 1)
pre = line[:t1]
post = line[t1:t2]
other = line[t2:]
pre_enum_zip = enumerate(zip(pre[:-2], pre[1:-1], pre[2:]))
post_enum_zip = enumerate(zip(post[:-2], post[1:-1], post[2:]))
other_enum_zip = enumerate(zip(other[:-2], other[1:-1], other[2:]))
pre_table = [i for i, (c0, c1, c2) in pre_enum_zip if c0 != ' ' and c1 + c2 == ' ']
post_table = [i for i, (c0, c1, c2) in post_enum_zip if c0 != ' ' and c1 + c2 == ' ']
other_table = [i for i, (c0, c1, c2) in other_enum_zip if c0 != ' ' and c1 + c2 == ' ']
in_deductions = True
# Store paycheck fields in list
paychecks.append(df)
# Convert list of paycheck field dictionary to DataFrame
paycheck_df = pd.DataFrame(paychecks).set_index('Date')
# Up until now paychecks are not necessarilly read or parsed in chronological order so sort chronologically
paycheck_df = paycheck_df.sort_index()
return paycheck_df
def read_in_paychecks(filepaths='', password='', parser=paycheck_parser, cache=True):
"""
Read in all the paychecks from a directory full of PDFs and return a DataFrame. If a password is supplied encrypted PDFs
*can* be read. PDFs are converted to text lines, which are assumed to be mostly tabular and converted to lists of lists
using multiple spaces as elimiters. Since PDFs are unstructured the parsing function will almost definetly need to be
overriden by the user.
Note:
Assumes PDF file names contain date.
Example:
```
paychecks = read_in_paychecks('/path/to/paycheck/directory/*.pdf', password='secret', parser=paycheck_parser)
```
"""
# Get PDFs from directory and check for cached file
paycheckfiles = glob.glob(filepaths)
paycheck_cache_file = os.path.dirname(filepaths) + '.csv'
cached = os.path.exists(paycheck_cache_file)
# Read in cached file if it exists
if cache and cached:
paycheck_df = read_date_csv_file(paycheck_cache_file)
# Read paycheck data if need be (not cached or new paycheck)
if not cache or not cached or len(paycheckfiles) > len(paycheck_df):
# Read in paycheck data to dictionary
paycheck_dict = {}
for paycheckfile in paycheckfiles:
# Open a PDF file
fp = open(paycheckfile, 'rb')
# Get the date
date = DATE_RE.findall(paycheckfile)[0]
# Create string to put PDF
output = cStringIO.StringIO()
# Create a PDF parser object associated with the file object.
pdfparser = PDFParser(fp)
# Create a PDF document object that stores the document structure. Supply the password for initialization.
document = PDFDocument(pdfparser, password)
# Check if the document allows text extraction. If not, abort.
if not document.is_extractable:
raise PDFTextExtractionNotAllowed
# Create a PDF resource manager object that stores shared resources.
manager = PDFResourceManager()
# Create a PDF converter object.
converter = TextConverter(manager, output, laparams=LAParams())
# Create a PDF interpreter object.
interpreter = PDFPageInterpreter(manager, converter)
# Process each page contained in the document.
pages = list(PDFPage.create_pages(document))
interpreter.process_page(pages[0])
# Get text
text = output.getvalue()
# Close up file objects
pdfparser.close()
fp.close()
converter.close()
output.close()
# Add to dictionary
paycheck_dict[date] = text
# Parse paycheck data with user defined function
paycheck_df = parser(paycheck_dict)
# Enforce pennies
paycheck_df = paycheck_df.fillna(0.0).round(2)
if cache:
paycheck_df.to_csv(paycheck_cache_file)
return paycheck_df
|
mit
|
AnatolyPavlov/smart-battery-for-smart-energy-usage
|
src/price_data_London.py
|
1
|
1031
|
""" This script reads and transforms pricing data to pd.DataFrame as time-series"""
import pandas as pd
from datetime import timedelta
# Custom Modules:
from data_preprocessing import ExtractTimeSeries
from auxiliary_functions import print_process
def main():
df = pd.read_excel('../data/Tariffs.xlsx')
df.loc[df['Tariff'] == 'Low', 'Tariff'] = 0.0399
df.loc[df['Tariff'] == 'Normal', 'Tariff'] = 0.1176
df.loc[df['Tariff'] == 'High', 'Tariff'] = 0.6720
#
ets = ExtractTimeSeries(datetime_col='TariffDateTime', yt_col='Tariff')
df = ets.transform(df)
#
day = pd.to_datetime('2013-12-27').date()
next_day = day + timedelta(days=1)
df_out = df.query('index >= @day and index < @next_day')
df_out.columns=['Tariff (UK Pounds)']
#
print_process('Saving Post-Processed Data')
path_to_price = '../clean_data/price_data_London.csv'
df_out.to_csv(path_to_price)
print 'Tariff data saved into: {}'.format(path_to_price)
print
if __name__ == '__main__':
main()
|
gpl-3.0
|
ngoix/OCRF
|
benchmarks/bench_tree.py
|
297
|
3617
|
"""
To run this, you'll need to have installed.
* scikit-learn
Does two benchmarks
First, we fix a training set, increase the number of
samples to classify and plot number of classified samples as a
function of time.
In the second benchmark, we increase the number of dimensions of the
training set, classify a sample and plot the time taken as a function
of the number of dimensions.
"""
import numpy as np
import pylab as pl
import gc
from datetime import datetime
# to store the results
scikit_classifier_results = []
scikit_regressor_results = []
mu_second = 0.0 + 10 ** 6 # number of microseconds in a second
def bench_scikit_tree_classifier(X, Y):
"""Benchmark with scikit-learn decision tree classifier"""
from sklearn.tree import DecisionTreeClassifier
gc.collect()
# start time
tstart = datetime.now()
clf = DecisionTreeClassifier()
clf.fit(X, Y).predict(X)
delta = (datetime.now() - tstart)
# stop time
scikit_classifier_results.append(
delta.seconds + delta.microseconds / mu_second)
def bench_scikit_tree_regressor(X, Y):
"""Benchmark with scikit-learn decision tree regressor"""
from sklearn.tree import DecisionTreeRegressor
gc.collect()
# start time
tstart = datetime.now()
clf = DecisionTreeRegressor()
clf.fit(X, Y).predict(X)
delta = (datetime.now() - tstart)
# stop time
scikit_regressor_results.append(
delta.seconds + delta.microseconds / mu_second)
if __name__ == '__main__':
print('============================================')
print('Warning: this is going to take a looong time')
print('============================================')
n = 10
step = 10000
n_samples = 10000
dim = 10
n_classes = 10
for i in range(n):
print('============================================')
print('Entering iteration %s of %s' % (i, n))
print('============================================')
n_samples += step
X = np.random.randn(n_samples, dim)
Y = np.random.randint(0, n_classes, (n_samples,))
bench_scikit_tree_classifier(X, Y)
Y = np.random.randn(n_samples)
bench_scikit_tree_regressor(X, Y)
xx = range(0, n * step, step)
pl.figure('scikit-learn tree benchmark results')
pl.subplot(211)
pl.title('Learning with varying number of samples')
pl.plot(xx, scikit_classifier_results, 'g-', label='classification')
pl.plot(xx, scikit_regressor_results, 'r-', label='regression')
pl.legend(loc='upper left')
pl.xlabel('number of samples')
pl.ylabel('Time (s)')
scikit_classifier_results = []
scikit_regressor_results = []
n = 10
step = 500
start_dim = 500
n_classes = 10
dim = start_dim
for i in range(0, n):
print('============================================')
print('Entering iteration %s of %s' % (i, n))
print('============================================')
dim += step
X = np.random.randn(100, dim)
Y = np.random.randint(0, n_classes, (100,))
bench_scikit_tree_classifier(X, Y)
Y = np.random.randn(100)
bench_scikit_tree_regressor(X, Y)
xx = np.arange(start_dim, start_dim + n * step, step)
pl.subplot(212)
pl.title('Learning in high dimensional spaces')
pl.plot(xx, scikit_classifier_results, 'g-', label='classification')
pl.plot(xx, scikit_regressor_results, 'r-', label='regression')
pl.legend(loc='upper left')
pl.xlabel('number of dimensions')
pl.ylabel('Time (s)')
pl.axis('tight')
pl.show()
|
bsd-3-clause
|
jkoelker/python-tradeking
|
tradeking/api.py
|
1
|
11467
|
# -*- coding: utf-8 -*-
import urllib.parse
import requests_oauthlib as roauth
import pandas as pd
from tradeking import utils
BASE_URL = 'https://api.tradeking.com/v1'
_DATE_KEYS = ('date', 'datetime', 'divexdate', 'divpaydt', 'timestamp',
'pr_date', 'wk52hidate', 'wk52lodate', 'xdate')
_FLOAT_KEYS = ('ask', 'bid', 'chg', 'cl', 'div', 'dollar_value', 'eps',
'hi', 'iad', 'idelta', 'igamma', 'imp_volatility', 'irho',
'itheta', 'ivega', 'last', 'lo', 'opn', 'opt_val', 'pchg',
'pcls', 'pe', 'phi', 'plo', 'popn', 'pr_adp_100', 'pr_adp_200',
'pr_adp_50', 'prbook', 'prchg', 'strikeprice', 'volatility12',
'vwap', 'wk52hi', 'wk52lo', 'yield')
_INT_KEYS = ('asksz', 'basis', 'bidsz', 'bidtick', 'days_to_expiration',
'incr_vl', 'openinterest', 'pr_openinterest', 'prem_mult', 'pvol',
'sho', 'tr_num', 'vl', 'xday', 'xmonth', 'xyear')
def _quotes_to_df(quotes):
if not isinstance(quotes, list):
quotes = [quotes]
df = pd.DataFrame.from_records(quotes, index='symbol')
for col in df.keys().intersection(_DATE_KEYS):
kwargs = {}
if col == 'timestamp':
kwargs['unit'] = 's'
try:
df[col] = pd.to_datetime(df[col], **kwargs)
except ValueError:
pass
for col in df.keys().intersection(_INT_KEYS):
cleaned = df[col].str.replace(r'[$,%]', '')
df[col] = cleaned.astype('int', errors='ignore')
for col in df.keys().intersection(_FLOAT_KEYS):
cleaned = df[col].str.replace(r'[$,%]', '')
df[col] = cleaned.astype('float', errors='ignore')
return df
# TODO(jkoelker) Would be nice to do a proper DSL
class OptionQuery(object):
FIELDS = ('strikeprice', 'xdate', 'xmonth', 'xyear', 'put_call', 'unique')
OPS = {'<': 'lt', 'lt': 'lt',
'>': 'gt', 'gt': 'gt',
'>=': 'gte', 'gte': 'gte',
'<=': 'lte', 'lte': 'lte',
'=': 'eq', '==': 'eq', 'eq': 'eq'}
def __init__(self, query):
if isinstance(query, str):
query = [query]
self._query = []
for part in query:
field, op, value = part.split()
field = field.lower()
if field not in self.FIELDS or op not in self.OPS:
continue
if field == 'xdate':
value = pd.to_datetime(value).strftime('%Y%m%d')
self._query.append((field, self.OPS[op], value))
def __str__(self):
return ' AND '.join(['%s-%s:%s' % (field, op, value)
for field, op, value in self._query])
class API(object):
def __init__(self, consumer_key, consumer_secret,
oauth_token, oauth_secret):
self._api = roauth.OAuth1Session(client_key=consumer_key,
client_secret=consumer_secret,
resource_owner_key=oauth_token,
resource_owner_secret=oauth_secret)
def join(self, *paths, **kwargs):
if len(paths) == 1:
paths = paths[0]
if kwargs.get('clean', True):
paths = [p.rstrip('/') for p in paths]
return '/'.join(paths)
def request(self, method, url, format='json', decode=True, **kwargs):
if format:
url = '.'.join((url, format))
r = self._api.request(method, url, **kwargs)
if decode:
r = r.json()
return r
def get(self, url, format='json', decode=True, **kwargs):
return self.request('GET', url=url, format=format, decode=decode,
**kwargs)
def post(self, url, format='json', decode=True, **kwargs):
return self.request('POST', url=url, format=format, decode=decode,
**kwargs)
class Account(object):
def __init__(self, api, account_id):
self._api = api
self.account_id = account_id
def _get(self, what=None, **kwargs):
params = [BASE_URL, 'accounts', self.account_id]
if what is not None:
params.append(what)
path = self._api.join(params)
return self._api.get(path, **kwargs)
def _balances(self, **kwargs):
return self._get('balances', **kwargs)
def _history(self, date_range='all', transactions='all', **kwargs):
params = {'range': date_range, 'transactions': transactions}
return self._get('history', params=params, **kwargs)
def _holdings(self, **kwargs):
return self._get('holdings', **kwargs)
def _orders(self, **kwargs):
return self._get('orders', **kwargs)
@property
def balances(self):
r = self._balances()
return r['response']['accountbalance']
def history(self, date_range='all', transactions='all'):
r = self._history(date_range=date_range, transactions=transactions)
return r['response']['transactions']['transaction']
@property
def holdings(self):
r = self._holdings()
return r['response']['accountholdings']['holding']
# TODO(jkoelker)
def order(self, order, preview=True):
pass
@property
def orders(self):
r = self._orders()
return r['response']['orderstatus']
class News(object):
def __init__(self, api):
self._api = api
def _article(self, article_id, **kwargs):
path = self._api.join(BASE_URL, 'market', 'news', article_id)
return self._api.get(path, **kwargs)
def _search(self, keywords=None, symbols=None, maxhits=None,
startdate=None, enddate=None, **kwargs):
if not keywords and not symbols:
raise ValueError('Either keywords or symbols are required')
data = {}
if keywords:
if isinstance(keywords, str):
keywords = [keywords]
data['keywords'] = ','.join(keywords)
if symbols:
if isinstance(symbols, str):
symbols = [symbols]
data['symbols'] = ','.join(symbols)
if maxhits:
data['maxhits'] = maxhits
# TODO(jkoelker) calculate enddate to be now()
if (not startdate and enddate) or (not enddate and startdate):
raise ValueError('Both startdate and endate are required if one '
'is specified')
if startdate and enddate:
data['startdate'] = startdate
data['enddate'] = enddate
path = self._api.join(BASE_URL, 'market', 'news', 'search')
return self._api.post(path, data=data, **kwargs)
def article(self, article_id):
r = self._article(article_id=article_id)
return r['response']['article']
def search(self, keywords=None, symbols=None, maxhits=None, startdate=None,
enddate=None):
r = self._search(keywords=keywords, symbols=symbols, maxhits=maxhits,
startdate=startdate, enddate=enddate)
return r['response']['articles']['article']
class Options(object):
def __init__(self, api, market):
self._api = api
self._market = market
symbol = staticmethod(utils.option_symbol)
symbols = staticmethod(utils.option_symbols)
decode = staticmethod(utils.parse_option_symbol)
def _expirations(self, symbol, **kwargs):
params = {'symbol': symbol}
path = self._api.join(BASE_URL, 'market', 'options', 'expirations')
return self._api.get(path, params=params, **kwargs)
def _search(self, symbol, query, fields=None, query_is_prepared=False,
**kwargs):
if not isinstance(query, OptionQuery) and not query_is_prepared:
query = OptionQuery(query)
data = {'symbol': symbol, 'query': query}
if fields is not None:
data['fids'] = ','.join(fields)
path = self._api.join(BASE_URL, 'market', 'options', 'search')
return self._api.post(path, data=data, **kwargs)
def _strikes(self, symbol, **kwargs):
params = {'symbol': symbol}
path = self._api.join(BASE_URL, 'market', 'options', 'strikes')
return self._api.get(path, params=params, **kwargs)
def expirations(self, symbol):
r = self._expirations(symbol=symbol)
expirations = r['response']['expirationdates']['date']
return pd.to_datetime(pd.Series(expirations))
def search(self, symbol, query, fields=None):
r = self._search(symbol=symbol, query=query, fields=fields)
return _quotes_to_df(r['response']['quotes']['quote'])
def strikes(self, symbol):
r = self._strikes(symbol=symbol)
strikes = r['response']['prices']['price']
return pd.Series(strikes, dtype=float)
def quote(self, symbol, strikes=None, expirations=None, calls=True,
puts=True, fields=None):
if strikes is None:
strikes = self.strikes(symbol)
if expirations is None:
expirations = self.expirations(symbol)
symbols = utils.option_symbols(symbol, expirations, strikes, calls,
puts)
return self._market.quotes(symbols=symbols, fields=fields)
class Market(object):
def __init__(self, api):
self._api = api
self.news = News(self._api)
self.options = Options(self._api, self)
def _clock(self, **kwargs):
path = self._api.join(BASE_URL, 'market', 'clock')
return self._api.get(path, **kwargs)
def _quotes(self, symbols, fields=None, **kwargs):
if isinstance(symbols, (list, tuple)):
symbols = ','.join(symbols)
params = {'symbols': symbols}
if fields is not None:
params['fids'] = ','.join(fields)
path = self._api.join(BASE_URL, 'market', 'ext', 'quotes')
return self._api.post(path, data=params, **kwargs)
def _toplist(self, list_type='toppctgainers', **kwargs):
path = self._api.join(BASE_URL, 'market', 'toplists', list_type)
return self._api.get(path, **kwargs)
@property
def clock(self):
r = self._clock()
r = r['response']
del r['@id']
return r
def quotes(self, symbols, fields=None):
r = self._quotes(symbols=symbols, fields=fields)
return _quotes_to_df(r['response']['quotes']['quote'])
def toplist(self, list_type='toppctgainers'):
r = self._toplist(list_type=list_type)
return _quotes_to_df(r['response']['quotes']['quote'])
# TODO(jkoelker) market/timesales
# TODO(jkoelker) market/quotes (iterator)
class TradeKing(object):
def __init__(self, consumer_key, consumer_secret,
oauth_token, oauth_secret):
self._api = API(consumer_key=consumer_key,
consumer_secret=consumer_secret,
oauth_token=oauth_token,
oauth_secret=oauth_secret)
self.market = Market(self._api)
def _accounts(self, **kwargs):
path = urllib.parse.urljoin(BASE_URL, 'accounts')
return self._api.get(path, **kwargs)
def account(self, account_id):
return Account(self._api, account_id)
# TODO(jkoelker) member/profile
# TODO(jkoelker) utility/status
# TODO(jkoelker) utility/version
# TODO(jkoelker) utility/version
# TODO(jkoelker) watchlists
|
mit
|
warmspringwinds/scikit-image
|
skimage/io/tests/test_mpl_imshow.py
|
1
|
2822
|
from __future__ import division
import numpy as np
from skimage import io
from skimage._shared._warnings import expected_warnings
import matplotlib.pyplot as plt
def setup():
io.reset_plugins()
# test images. Note that they don't have their full range for their dtype,
# but we still expect the display range to equal the full dtype range.
im8 = np.array([[0, 64], [128, 240]], np.uint8)
im16 = im8.astype(np.uint16) * 256
im64 = im8.astype(np.uint64)
imf = im8 / 255
im_lo = imf / 1000
im_hi = imf + 10
def n_subplots(ax_im):
"""Return the number of subplots in the figure containing an ``AxesImage``.
Parameters
----------
ax_im : matplotlib.pyplot.AxesImage object
The input ``AxesImage``.
Returns
-------
n : int
The number of subplots in the corresponding figure.
Notes
-----
This function is intended to check whether a colorbar was drawn, in
which case two subplots are expected. For standard imshows, one
subplot is expected.
"""
return len(ax_im.get_figure().get_axes())
def test_uint8():
ax_im = io.imshow(im8)
assert ax_im.cmap.name == 'gray'
assert ax_im.get_clim() == (0, 255)
# check that no colorbar was created
assert n_subplots(ax_im) == 1
assert ax_im.colorbar is None
def test_uint16():
ax_im = io.imshow(im16)
assert ax_im.cmap.name == 'gray'
assert ax_im.get_clim() == (0, 65535)
assert n_subplots(ax_im) == 1
assert ax_im.colorbar is None
def test_float():
ax_im = io.imshow(imf)
assert ax_im.cmap.name == 'gray'
assert ax_im.get_clim() == (0, 1)
assert n_subplots(ax_im) == 1
assert ax_im.colorbar is None
def test_low_dynamic_range():
with expected_warnings(["Low image dynamic range"]):
ax_im = io.imshow(im_lo)
assert ax_im.get_clim() == (im_lo.min(), im_lo.max())
# check that a colorbar was created
assert n_subplots(ax_im) == 2
assert ax_im.colorbar is not None
def test_outside_standard_range():
plt.figure()
with expected_warnings(["out of standard range"]):
ax_im = io.imshow(im_hi)
assert ax_im.get_clim() == (im_hi.min(), im_hi.max())
assert n_subplots(ax_im) == 2
assert ax_im.colorbar is not None
def test_nonstandard_type():
plt.figure()
with expected_warnings(["Non-standard image type"]):
ax_im = io.imshow(im64)
assert ax_im.get_clim() == (im64.min(), im64.max())
assert n_subplots(ax_im) == 2
assert ax_im.colorbar is not None
def test_signed_image():
plt.figure()
im_signed = np.array([[-0.5, -0.2], [0.1, 0.4]])
ax_im = io.imshow(im_signed)
assert ax_im.get_clim() == (-0.5, 0.5)
assert n_subplots(ax_im) == 2
assert ax_im.colorbar is not None
if __name__ == '__main__':
np.testing.run_module_suite()
|
bsd-3-clause
|
SmokinCaterpillar/pypet
|
pypet/tests/profiling/speed_analysis/avg_runtima_as_function_of_length.py
|
2
|
2266
|
__author__ = 'robert'
from pypet import Environment, Trajectory
from pypet.tests.testutils.ioutils import make_temp_dir, get_log_config
import os
import matplotlib.pyplot as plt
import numpy as np
import time
def job(traj):
traj.f_ares('$set.$', 42, comment='A result')
def get_runtime(length):
filename = os.path.join('tmp', 'hdf5', 'many_runs.hdf5')
with Environment(filename = filename,
log_levels=50, report_progress=(0.0002, 'progress', 50),
overwrite_file=True, purge_duplicate_comments=False,
log_stdout=False,
multiproc=False, ncores=2, use_pool=True,
wrap_mode='PIPE', #freeze_input=True,
summary_tables=False, small_overview_tables=False) as env:
traj = env.v_traj
traj.par.f_apar('x', 0, 'parameter')
traj.f_explore({'x': range(length)})
# traj.v_full_copy = False
max_run = 1000
for idx in range(len(traj)):
if idx > max_run:
traj.f_get_run_information(idx, copy=False)['completed'] = 1
start = time.time()
env.f_run(job)
end = time.time()
# dicts = [traj.f_get_run_information(x) for x in range(min(len(traj), max_run))]
total = end - start
return total/float(min(len(traj), max_run)), total/float(min(len(traj), max_run)) * len(traj)
def main():
#lengths = [1000000, 500000, 100000, 50000, 10000, 5000, 1000, 500, 100, 50, 10, 5, 1]
lengths = [100000, 50000, 10000, 5000, 1000, 500, 100, 50, 10, 5, 1]
runtimes = [get_runtime(x) for x in lengths]
avg_runtimes = [x[0] for x in runtimes]
summed_runtime = [x[1] for x in runtimes]
plt.subplot(2, 1, 1)
plt.semilogx(list(reversed(lengths)), list(reversed(avg_runtimes)), linewidth=2)
plt.xlabel('Runs')
plt.ylabel('t[s]')
plt.title('Average Runtime per single run')
plt.grid()
plt.subplot(2, 1, 2)
plt.loglog(lengths, summed_runtime, linewidth=2)
plt.grid()
plt.xlabel('Runs')
plt.ylabel('t[s]')
plt.title('Total runtime of experiment')
plt.savefig('avg_runtime_as_func_of_lenght_1000_single_core')
plt.show()
if __name__ == '__main__':
main()
|
bsd-3-clause
|
rohit21122012/DCASE2013
|
runs/2013/dnn_layerwise/bs1024/dnn_5layer/src/dataset.py
|
37
|
78389
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import urllib2
import socket
import locale
import zipfile
import tarfile
from sklearn.cross_validation import StratifiedShuffleSplit, KFold
from ui import *
from general import *
from files import *
class Dataset(object):
"""Dataset base class.
The specific dataset classes are inherited from this class, and only needed methods are reimplemented.
"""
def __init__(self, data_path='data', name='dataset'):
"""__init__ method.
Parameters
----------
data_path : str
Basepath where the dataset is stored.
(Default value='data')
"""
# Folder name for dataset
self.name = name
# Path to the dataset
self.local_path = os.path.join(data_path, self.name)
# Create the dataset path if does not exist
if not os.path.isdir(self.local_path):
os.makedirs(self.local_path)
# Evaluation setup folder
self.evaluation_setup_folder = 'evaluation_setup'
# Path to the folder containing evaluation setup files
self.evaluation_setup_path = os.path.join(self.local_path, self.evaluation_setup_folder)
# Meta data file, csv-format
self.meta_filename = 'meta.txt'
# Path to meta data file
self.meta_file = os.path.join(self.local_path, self.meta_filename)
# Hash file to detect removed or added files
self.filelisthash_filename = 'filelist.hash'
# Number of evaluation folds
self.evaluation_folds = 1
# List containing dataset package items
# Define this in the inherited class.
# Format:
# {
# 'remote_package': download_url,
# 'local_package': os.path.join(self.local_path, 'name_of_downloaded_package'),
# 'local_audio_path': os.path.join(self.local_path, 'name_of_folder_containing_audio_files'),
# }
self.package_list = []
# List of audio files
self.files = None
# List of meta data dict
self.meta_data = None
# Training meta data for folds
self.evaluation_data_train = {}
# Testing meta data for folds
self.evaluation_data_test = {}
# Recognized audio extensions
self.audio_extensions = {'wav', 'flac'}
# Info fields for dataset
self.authors = ''
self.name_remote = ''
self.url = ''
self.audio_source = ''
self.audio_type = ''
self.recording_device_model = ''
self.microphone_model = ''
@property
def audio_files(self):
"""Get all audio files in the dataset
Parameters
----------
Nothing
Returns
-------
filelist : list
File list with absolute paths
"""
if self.files is None:
self.files = []
for item in self.package_list:
path = item['local_audio_path']
if path:
l = os.listdir(path)
for f in l:
file_name, file_extension = os.path.splitext(f)
if file_extension[1:] in self.audio_extensions:
self.files.append(os.path.abspath(os.path.join(path, f)))
self.files.sort()
return self.files
@property
def audio_file_count(self):
"""Get number of audio files in dataset
Parameters
----------
Nothing
Returns
-------
filecount : int
Number of audio files
"""
return len(self.audio_files)
@property
def meta(self):
"""Get meta data for dataset. If not already read from disk, data is read and returned.
Parameters
----------
Nothing
Returns
-------
meta_data : list
List containing meta data as dict.
Raises
-------
IOError
meta file not found.
"""
if self.meta_data is None:
self.meta_data = []
meta_id = 0
if os.path.isfile(self.meta_file):
f = open(self.meta_file, 'rt')
try:
reader = csv.reader(f, delimiter='\t')
for row in reader:
if len(row) == 2:
# Scene meta
self.meta_data.append({'file': row[0], 'scene_label': row[1].rstrip()})
elif len(row) == 4:
# Audio tagging meta
self.meta_data.append(
{'file': row[0], 'scene_label': row[1].rstrip(), 'tag_string': row[2].rstrip(),
'tags': row[3].split(';')})
elif len(row) == 6:
# Event meta
self.meta_data.append({'file': row[0],
'scene_label': row[1].rstrip(),
'event_onset': float(row[2]),
'event_offset': float(row[3]),
'event_label': row[4].rstrip(),
'event_type': row[5].rstrip(),
'id': meta_id
})
meta_id += 1
finally:
f.close()
else:
raise IOError("Meta file not found [%s]" % self.meta_file)
return self.meta_data
@property
def meta_count(self):
"""Number of meta data items.
Parameters
----------
Nothing
Returns
-------
meta_item_count : int
Meta data item count
"""
return len(self.meta)
@property
def fold_count(self):
"""Number of fold in the evaluation setup.
Parameters
----------
Nothing
Returns
-------
fold_count : int
Number of folds
"""
return self.evaluation_folds
@property
def scene_labels(self):
"""List of unique scene labels in the meta data.
Parameters
----------
Nothing
Returns
-------
labels : list
List of scene labels in alphabetical order.
"""
labels = []
for item in self.meta:
if 'scene_label' in item and item['scene_label'] not in labels:
labels.append(item['scene_label'])
labels.sort()
return labels
@property
def scene_label_count(self):
"""Number of unique scene labels in the meta data.
Parameters
----------
Nothing
Returns
-------
scene_label_count : int
Number of unique scene labels.
"""
return len(self.scene_labels)
@property
def event_labels(self):
"""List of unique event labels in the meta data.
Parameters
----------
Nothing
Returns
-------
labels : list
List of event labels in alphabetical order.
"""
labels = []
for item in self.meta:
if 'event_label' in item and item['event_label'].rstrip() not in labels:
labels.append(item['event_label'].rstrip())
labels.sort()
return labels
@property
def event_label_count(self):
"""Number of unique event labels in the meta data.
Parameters
----------
Nothing
Returns
-------
event_label_count : int
Number of unique event labels
"""
return len(self.event_labels)
@property
def audio_tags(self):
"""List of unique audio tags in the meta data.
Parameters
----------
Nothing
Returns
-------
labels : list
List of audio tags in alphabetical order.
"""
tags = []
for item in self.meta:
if 'tags' in item:
for tag in item['tags']:
if tag and tag not in tags:
tags.append(tag)
tags.sort()
return tags
@property
def audio_tag_count(self):
"""Number of unique audio tags in the meta data.
Parameters
----------
Nothing
Returns
-------
audio_tag_count : int
Number of unique audio tags
"""
return len(self.audio_tags)
def __getitem__(self, i):
"""Getting meta data item
Parameters
----------
i : int
item id
Returns
-------
meta_data : dict
Meta data item
"""
if i < len(self.meta):
return self.meta[i]
else:
return None
def __iter__(self):
"""Iterator for meta data items
Parameters
----------
Nothing
Returns
-------
Nothing
"""
i = 0
meta = self[i]
# yield window while it's valid
while meta is not None:
yield meta
# get next item
i += 1
meta = self[i]
@staticmethod
def print_bytes(num_bytes):
"""Output number of bytes according to locale and with IEC binary prefixes
Parameters
----------
num_bytes : int > 0 [scalar]
Bytes
Returns
-------
bytes : str
Human readable string
"""
KiB = 1024
MiB = KiB * KiB
GiB = KiB * MiB
TiB = KiB * GiB
PiB = KiB * TiB
EiB = KiB * PiB
ZiB = KiB * EiB
YiB = KiB * ZiB
locale.setlocale(locale.LC_ALL, '')
output = locale.format("%d", num_bytes, grouping=True) + ' bytes'
if num_bytes > YiB:
output += ' (%.4g YiB)' % (num_bytes / YiB)
elif num_bytes > ZiB:
output += ' (%.4g ZiB)' % (num_bytes / ZiB)
elif num_bytes > EiB:
output += ' (%.4g EiB)' % (num_bytes / EiB)
elif num_bytes > PiB:
output += ' (%.4g PiB)' % (num_bytes / PiB)
elif num_bytes > TiB:
output += ' (%.4g TiB)' % (num_bytes / TiB)
elif num_bytes > GiB:
output += ' (%.4g GiB)' % (num_bytes / GiB)
elif num_bytes > MiB:
output += ' (%.4g MiB)' % (num_bytes / MiB)
elif num_bytes > KiB:
output += ' (%.4g KiB)' % (num_bytes / KiB)
return output
def download(self):
"""Download dataset over the internet to the local path
Parameters
----------
Nothing
Returns
-------
Nothing
Raises
-------
IOError
Download failed.
"""
section_header('Download dataset')
for item in self.package_list:
try:
if item['remote_package'] and not os.path.isfile(item['local_package']):
data = None
req = urllib2.Request(item['remote_package'], data, {})
handle = urllib2.urlopen(req)
if "Content-Length" in handle.headers.items():
size = int(handle.info()["Content-Length"])
else:
size = None
actualSize = 0
blocksize = 64 * 1024
tmp_file = os.path.join(self.local_path, 'tmp_file')
fo = open(tmp_file, "wb")
terminate = False
while not terminate:
block = handle.read(blocksize)
actualSize += len(block)
if size:
progress(title_text=os.path.split(item['local_package'])[1],
percentage=actualSize / float(size),
note=self.print_bytes(actualSize))
else:
progress(title_text=os.path.split(item['local_package'])[1],
note=self.print_bytes(actualSize))
if len(block) == 0:
break
fo.write(block)
fo.close()
os.rename(tmp_file, item['local_package'])
except (urllib2.URLError, socket.timeout), e:
try:
fo.close()
except:
raise IOError('Download failed [%s]' % (item['remote_package']))
foot()
def extract(self):
"""Extract the dataset packages
Parameters
----------
Nothing
Returns
-------
Nothing
"""
section_header('Extract dataset')
for item_id, item in enumerate(self.package_list):
if item['local_package']:
if item['local_package'].endswith('.zip'):
with zipfile.ZipFile(item['local_package'], "r") as z:
# Trick to omit first level folder
parts = []
for name in z.namelist():
if not name.endswith('/'):
parts.append(name.split('/')[:-1])
prefix = os.path.commonprefix(parts) or ''
if prefix:
if len(prefix) > 1:
prefix_ = list()
prefix_.append(prefix[0])
prefix = prefix_
prefix = '/'.join(prefix) + '/'
offset = len(prefix)
# Start extraction
members = z.infolist()
file_count = 1
for i, member in enumerate(members):
if len(member.filename) > offset:
member.filename = member.filename[offset:]
if not os.path.isfile(os.path.join(self.local_path, member.filename)):
z.extract(member, self.local_path)
progress(title_text='Extracting ['+str(item_id)+'/'+str(len(self.package_list))+']', percentage=(file_count / float(len(members))),
note=member.filename)
file_count += 1
elif item['local_package'].endswith('.tar.gz'):
tar = tarfile.open(item['local_package'], "r:gz")
for i, tar_info in enumerate(tar):
if not os.path.isfile(os.path.join(self.local_path, tar_info.name)):
tar.extract(tar_info, self.local_path)
progress(title_text='Extracting ['+str(item_id)+'/'+str(len(self.package_list))+']', note=tar_info.name)
tar.members = []
tar.close()
foot()
def on_after_extract(self):
"""Dataset meta data preparation, this will be overloaded in dataset specific classes
Parameters
----------
Nothing
Returns
-------
Nothing
"""
pass
def get_filelist(self):
"""List of files under local_path
Parameters
----------
Nothing
Returns
-------
filelist: list
File list
"""
filelist = []
for path, subdirs, files in os.walk(self.local_path):
for name in files:
filelist.append(os.path.join(path, name))
return filelist
def check_filelist(self):
"""Generates hash from file list and check does it matches with one saved in filelist.hash.
If some files have been deleted or added, checking will result False.
Parameters
----------
Nothing
Returns
-------
result: bool
Result
"""
if os.path.isfile(os.path.join(self.local_path, self.filelisthash_filename)):
hash = load_text(os.path.join(self.local_path, self.filelisthash_filename))[0]
if hash != get_parameter_hash(sorted(self.get_filelist())):
return False
else:
return True
else:
return False
def save_filelist_hash(self):
"""Generates file list hash, and saves it as filelist.hash under local_path.
Parameters
----------
Nothing
Returns
-------
Nothing
"""
filelist = self.get_filelist()
filelist_hash_not_found = True
for file in filelist:
if self.filelisthash_filename in file:
filelist_hash_not_found = False
if filelist_hash_not_found:
filelist.append(os.path.join(self.local_path, self.filelisthash_filename))
save_text(os.path.join(self.local_path, self.filelisthash_filename), get_parameter_hash(sorted(filelist)))
def fetch(self):
"""Download, extract and prepare the dataset.
Parameters
----------
Nothing
Returns
-------
Nothing
"""
if not self.check_filelist():
self.download()
self.extract()
self.on_after_extract()
self.save_filelist_hash()
return self
def train(self, fold=0):
"""List of training items.
Parameters
----------
fold : int > 0 [scalar]
Fold id, if zero all meta data is returned.
(Default value=0)
Returns
-------
list : list of dicts
List containing all meta data assigned to training set for given fold.
"""
if fold not in self.evaluation_data_train:
self.evaluation_data_train[fold] = []
if fold > 0:
with open(os.path.join(self.evaluation_setup_path, 'fold' + str(fold) + '_train.txt'), 'rt') as f:
for row in csv.reader(f, delimiter='\t'):
if len(row) == 2:
# Scene meta
self.evaluation_data_train[fold].append({
'file': self.relative_to_absolute_path(row[0]),
'scene_label': row[1]
})
elif len(row) == 4:
# Audio tagging meta
self.evaluation_data_train[fold].append({
'file': self.relative_to_absolute_path(row[0]),
'scene_label': row[1],
'tag_string': row[2],
'tags': row[3].split(';')
})
elif len(row) == 5:
# Event meta
self.evaluation_data_train[fold].append({
'file': self.relative_to_absolute_path(row[0]),
'scene_label': row[1],
'event_onset': float(row[2]),
'event_offset': float(row[3]),
'event_label': row[4]
})
else:
data = []
for item in self.meta:
if 'event_label' in item:
data.append({'file': self.relative_to_absolute_path(item['file']),
'scene_label': item['scene_label'],
'event_onset': item['event_onset'],
'event_offset': item['event_offset'],
'event_label': item['event_label'],
})
else:
data.append({'file': self.relative_to_absolute_path(item['file']),
'scene_label': item['scene_label']
})
self.evaluation_data_train[0] = data
return self.evaluation_data_train[fold]
def test(self, fold=0):
"""List of testing items.
Parameters
----------
fold : int > 0 [scalar]
Fold id, if zero all meta data is returned.
(Default value=0)
Returns
-------
list : list of dicts
List containing all meta data assigned to testing set for given fold.
"""
if fold not in self.evaluation_data_test:
self.evaluation_data_test[fold] = []
if fold > 0:
with open(os.path.join(self.evaluation_setup_path, 'fold' + str(fold) + '_test.txt'), 'rt') as f:
for row in csv.reader(f, delimiter='\t'):
self.evaluation_data_test[fold].append({'file': self.relative_to_absolute_path(row[0])})
else:
data = []
files = []
for item in self.meta:
if self.relative_to_absolute_path(item['file']) not in files:
data.append({'file': self.relative_to_absolute_path(item['file'])})
files.append(self.relative_to_absolute_path(item['file']))
self.evaluation_data_test[fold] = data
return self.evaluation_data_test[fold]
def folds(self, mode='folds'):
"""List of fold ids
Parameters
----------
mode : str {'folds','full'}
Fold setup type, possible values are 'folds' and 'full'. In 'full' mode fold number is set 0 and all data is used for training.
(Default value=folds)
Returns
-------
list : list of integers
Fold ids
"""
if mode == 'folds':
return range(1, self.evaluation_folds + 1)
elif mode == 'full':
return [0]
def file_meta(self, file):
"""Meta data for given file
Parameters
----------
file : str
File name
Returns
-------
list : list of dicts
List containing all meta data related to given file.
"""
file = self.absolute_to_relative(file)
file_meta = []
for item in self.meta:
if item['file'] == file:
file_meta.append(item)
return file_meta
def relative_to_absolute_path(self, path):
"""Converts relative path into absolute path.
Parameters
----------
path : str
Relative path
Returns
-------
path : str
Absolute path
"""
return os.path.abspath(os.path.join(self.local_path, path))
def absolute_to_relative(self, path):
"""Converts absolute path into relative path.
Parameters
----------
path : str
Absolute path
Returns
-------
path : str
Relative path
"""
if path.startswith(os.path.abspath(self.local_path)):
return os.path.relpath(path, self.local_path)
else:
return path
# =====================================================
# DCASE 2016
# =====================================================
class TUTAcousticScenes_2016_DevelopmentSet(Dataset):
"""TUT Acoustic scenes 2016 development dataset
This dataset is used in DCASE2016 - Task 1, Acoustic scene classification
"""
def __init__(self, data_path='data'):
Dataset.__init__(self, data_path=data_path, name='TUT-acoustic-scenes-2016-development')
self.authors = 'Annamaria Mesaros, Toni Heittola, and Tuomas Virtanen'
self.name_remote = 'TUT Acoustic Scenes 2016, development dataset'
self.url = 'https://zenodo.org/record/45739'
self.audio_source = 'Field recording'
self.audio_type = 'Natural'
self.recording_device_model = 'Roland Edirol R-09'
self.microphone_model = 'Soundman OKM II Klassik/studio A3 electret microphone'
self.evaluation_folds = 4
self.package_list = [
{
'remote_package': None,
'local_package': None,
'local_audio_path': os.path.join(self.local_path, 'audio'),
},
{
'remote_package': 'https://zenodo.org/record/45739/files/TUT-acoustic-scenes-2016-development.doc.zip',
'local_package': os.path.join(self.local_path, 'TUT-acoustic-scenes-2016-development.doc.zip'),
'local_audio_path': os.path.join(self.local_path, 'audio'),
},
{
'remote_package': 'https://zenodo.org/record/45739/files/TUT-acoustic-scenes-2016-development.meta.zip',
'local_package': os.path.join(self.local_path, 'TUT-acoustic-scenes-2016-development.meta.zip'),
'local_audio_path': os.path.join(self.local_path, 'audio'),
},
{
'remote_package': 'https://zenodo.org/record/45739/files/TUT-acoustic-scenes-2016-development.audio.1.zip',
'local_package': os.path.join(self.local_path, 'TUT-acoustic-scenes-2016-development.audio.1.zip'),
'local_audio_path': os.path.join(self.local_path, 'audio'),
},
{
'remote_package': 'https://zenodo.org/record/45739/files/TUT-acoustic-scenes-2016-development.audio.2.zip',
'local_package': os.path.join(self.local_path, 'TUT-acoustic-scenes-2016-development.audio.2.zip'),
'local_audio_path': os.path.join(self.local_path, 'audio'),
},
{
'remote_package': 'https://zenodo.org/record/45739/files/TUT-acoustic-scenes-2016-development.audio.3.zip',
'local_package': os.path.join(self.local_path, 'TUT-acoustic-scenes-2016-development.audio.3.zip'),
'local_audio_path': os.path.join(self.local_path, 'audio'),
},
{
'remote_package': 'https://zenodo.org/record/45739/files/TUT-acoustic-scenes-2016-development.audio.4.zip',
'local_package': os.path.join(self.local_path, 'TUT-acoustic-scenes-2016-development.audio.4.zip'),
'local_audio_path': os.path.join(self.local_path, 'audio'),
},
{
'remote_package': 'https://zenodo.org/record/45739/files/TUT-acoustic-scenes-2016-development.audio.5.zip',
'local_package': os.path.join(self.local_path, 'TUT-acoustic-scenes-2016-development.audio.5.zip'),
'local_audio_path': os.path.join(self.local_path, 'audio'),
},
{
'remote_package': 'https://zenodo.org/record/45739/files/TUT-acoustic-scenes-2016-development.audio.6.zip',
'local_package': os.path.join(self.local_path, 'TUT-acoustic-scenes-2016-development.audio.6.zip'),
'local_audio_path': os.path.join(self.local_path, 'audio'),
},
{
'remote_package': 'https://zenodo.org/record/45739/files/TUT-acoustic-scenes-2016-development.audio.7.zip',
'local_package': os.path.join(self.local_path, 'TUT-acoustic-scenes-2016-development.audio.7.zip'),
'local_audio_path': os.path.join(self.local_path, 'audio'),
},
{
'remote_package': 'https://zenodo.org/record/45739/files/TUT-acoustic-scenes-2016-development.audio.8.zip',
'local_package': os.path.join(self.local_path, 'TUT-acoustic-scenes-2016-development.audio.8.zip'),
'local_audio_path': os.path.join(self.local_path, 'audio'),
}
]
def on_after_extract(self):
"""After dataset packages are downloaded and extracted, meta-files are checked.
Parameters
----------
nothing
Returns
-------
nothing
"""
if not os.path.isfile(self.meta_file):
section_header('Generating meta file for dataset')
meta_data = {}
for fold in xrange(1, self.evaluation_folds):
# Read train files in
train_filename = os.path.join(self.evaluation_setup_path, 'fold' + str(fold) + '_train.txt')
f = open(train_filename, 'rt')
reader = csv.reader(f, delimiter='\t')
for row in reader:
if row[0] not in meta_data:
meta_data[row[0]] = row[1]
f.close()
# Read evaluation files in
eval_filename = os.path.join(self.evaluation_setup_path, 'fold' + str(fold) + '_evaluate.txt')
f = open(eval_filename, 'rt')
reader = csv.reader(f, delimiter='\t')
for row in reader:
if row[0] not in meta_data:
meta_data[row[0]] = row[1]
f.close()
f = open(self.meta_file, 'wt')
try:
writer = csv.writer(f, delimiter='\t')
for file in meta_data:
raw_path, raw_filename = os.path.split(file)
relative_path = self.absolute_to_relative(raw_path)
label = meta_data[file]
writer.writerow((os.path.join(relative_path, raw_filename), label))
finally:
f.close()
foot()
class TUTAcousticScenes_2016_EvaluationSet(Dataset):
"""TUT Acoustic scenes 2016 evaluation dataset
This dataset is used in DCASE2016 - Task 1, Acoustic scene classification
"""
def __init__(self, data_path='data'):
Dataset.__init__(self, data_path=data_path, name='TUT-acoustic-scenes-2016-evaluation')
self.authors = 'Annamaria Mesaros, Toni Heittola, and Tuomas Virtanen'
self.name_remote = 'TUT Acoustic Scenes 2016, evaluation dataset'
self.url = 'http://www.cs.tut.fi/sgn/arg/dcase2016/download/'
self.audio_source = 'Field recording'
self.audio_type = 'Natural'
self.recording_device_model = 'Roland Edirol R-09'
self.microphone_model = 'Soundman OKM II Klassik/studio A3 electret microphone'
self.evaluation_folds = 1
self.package_list = [
{
'remote_package': None,
'local_package': None,
'local_audio_path': os.path.join(self.local_path, 'audio'),
},
]
def on_after_extract(self):
"""After dataset packages are downloaded and extracted, meta-files are checked.
Parameters
----------
nothing
Returns
-------
nothing
"""
eval_filename = os.path.join(self.evaluation_setup_path, 'evaluate.txt')
if not os.path.isfile(self.meta_file) and os.path.isfile(eval_filename):
section_header('Generating meta file for dataset')
meta_data = {}
f = open(eval_filename, 'rt')
reader = csv.reader(f, delimiter='\t')
for row in reader:
if row[0] not in meta_data:
meta_data[row[0]] = row[1]
f.close()
f = open(self.meta_file, 'wt')
try:
writer = csv.writer(f, delimiter='\t')
for file in meta_data:
raw_path, raw_filename = os.path.split(file)
relative_path = self.absolute_to_relative(raw_path)
label = meta_data[file]
writer.writerow((os.path.join(relative_path, raw_filename), label))
finally:
f.close()
foot()
def train(self, fold=0):
raise IOError('Train setup not available.')
# TUT Sound events 2016 development and evaluation sets
class TUTSoundEvents_2016_DevelopmentSet(Dataset):
"""TUT Sound events 2016 development dataset
This dataset is used in DCASE2016 - Task 3, Sound event detection in real life audio
"""
def __init__(self, data_path='data'):
Dataset.__init__(self, data_path=data_path, name='TUT-sound-events-2016-development')
self.authors = 'Annamaria Mesaros, Toni Heittola, and Tuomas Virtanen'
self.name_remote = 'TUT Sound Events 2016, development dataset'
self.url = 'https://zenodo.org/record/45759'
self.audio_source = 'Field recording'
self.audio_type = 'Natural'
self.recording_device_model = 'Roland Edirol R-09'
self.microphone_model = 'Soundman OKM II Klassik/studio A3 electret microphone'
self.evaluation_folds = 4
self.package_list = [
{
'remote_package': None,
'local_package': None,
'local_audio_path': os.path.join(self.local_path, 'audio'),
},
{
'remote_package': None,
'local_package': None,
'local_audio_path': os.path.join(self.local_path, 'audio', 'residential_area'),
},
{
'remote_package': None,
'local_package': None,
'local_audio_path': os.path.join(self.local_path, 'audio', 'home'),
},
{
'remote_package': 'https://zenodo.org/record/45759/files/TUT-sound-events-2016-development.doc.zip',
'local_package': os.path.join(self.local_path, 'TUT-sound-events-2016-development.doc.zip'),
'local_audio_path': os.path.join(self.local_path, 'audio'),
},
{
'remote_package': 'https://zenodo.org/record/45759/files/TUT-sound-events-2016-development.meta.zip',
'local_package': os.path.join(self.local_path, 'TUT-sound-events-2016-development.meta.zip'),
'local_audio_path': os.path.join(self.local_path, 'audio'),
},
{
'remote_package': 'https://zenodo.org/record/45759/files/TUT-sound-events-2016-development.audio.zip',
'local_package': os.path.join(self.local_path, 'TUT-sound-events-2016-development.audio.zip'),
'local_audio_path': os.path.join(self.local_path, 'audio'),
},
]
def event_label_count(self, scene_label=None):
return len(self.event_labels(scene_label=scene_label))
def event_labels(self, scene_label=None):
labels = []
for item in self.meta:
if scene_label is None or item['scene_label'] == scene_label:
if 'event_label' in item and item['event_label'].rstrip() not in labels:
labels.append(item['event_label'].rstrip())
labels.sort()
return labels
def on_after_extract(self):
"""After dataset packages are downloaded and extracted, meta-files are checked.
Parameters
----------
nothing
Returns
-------
nothing
"""
if not os.path.isfile(self.meta_file):
meta_file_handle = open(self.meta_file, 'wt')
try:
writer = csv.writer(meta_file_handle, delimiter='\t')
for filename in self.audio_files:
raw_path, raw_filename = os.path.split(filename)
relative_path = self.absolute_to_relative(raw_path)
scene_label = relative_path.replace('audio', '')[1:]
base_filename, file_extension = os.path.splitext(raw_filename)
annotation_filename = os.path.join(self.local_path, relative_path.replace('audio', 'meta'), base_filename + '.ann')
if os.path.isfile(annotation_filename):
annotation_file_handle = open(annotation_filename, 'rt')
try:
annotation_file_reader = csv.reader(annotation_file_handle, delimiter='\t')
for annotation_file_row in annotation_file_reader:
writer.writerow((os.path.join(relative_path, raw_filename),
scene_label,
float(annotation_file_row[0].replace(',', '.')),
float(annotation_file_row[1].replace(',', '.')),
annotation_file_row[2], 'm'))
finally:
annotation_file_handle.close()
finally:
meta_file_handle.close()
def train(self, fold=0, scene_label=None):
if fold not in self.evaluation_data_train:
self.evaluation_data_train[fold] = {}
for scene_label_ in self.scene_labels:
if scene_label_ not in self.evaluation_data_train[fold]:
self.evaluation_data_train[fold][scene_label_] = []
if fold > 0:
with open(os.path.join(self.evaluation_setup_path, scene_label_+'_fold' + str(fold) + '_train.txt'), 'rt') as f:
for row in csv.reader(f, delimiter='\t'):
if len(row) == 5:
# Event meta
self.evaluation_data_train[fold][scene_label_].append({
'file': self.relative_to_absolute_path(row[0]),
'scene_label': row[1],
'event_onset': float(row[2]),
'event_offset': float(row[3]),
'event_label': row[4]
})
else:
data = []
for item in self.meta:
if item['scene_label'] == scene_label_:
if 'event_label' in item:
data.append({'file': self.relative_to_absolute_path(item['file']),
'scene_label': item['scene_label'],
'event_onset': item['event_onset'],
'event_offset': item['event_offset'],
'event_label': item['event_label'],
})
self.evaluation_data_train[0][scene_label_] = data
if scene_label:
return self.evaluation_data_train[fold][scene_label]
else:
data = []
for scene_label_ in self.scene_labels:
for item in self.evaluation_data_train[fold][scene_label_]:
data.append(item)
return data
def test(self, fold=0, scene_label=None):
if fold not in self.evaluation_data_test:
self.evaluation_data_test[fold] = {}
for scene_label_ in self.scene_labels:
if scene_label_ not in self.evaluation_data_test[fold]:
self.evaluation_data_test[fold][scene_label_] = []
if fold > 0:
with open(os.path.join(self.evaluation_setup_path, scene_label_+'_fold' + str(fold) + '_test.txt'), 'rt') as f:
for row in csv.reader(f, delimiter='\t'):
self.evaluation_data_test[fold][scene_label_].append({'file': self.relative_to_absolute_path(row[0])})
else:
data = []
files = []
for item in self.meta:
if scene_label_ in item:
if self.relative_to_absolute_path(item['file']) not in files:
data.append({'file': self.relative_to_absolute_path(item['file'])})
files.append(self.relative_to_absolute_path(item['file']))
self.evaluation_data_test[0][scene_label_] = data
if scene_label:
return self.evaluation_data_test[fold][scene_label]
else:
data = []
for scene_label_ in self.scene_labels:
for item in self.evaluation_data_test[fold][scene_label_]:
data.append(item)
return data
class TUTSoundEvents_2016_EvaluationSet(Dataset):
"""TUT Sound events 2016 evaluation dataset
This dataset is used in DCASE2016 - Task 3, Sound event detection in real life audio
"""
def __init__(self, data_path='data'):
Dataset.__init__(self, data_path=data_path, name='TUT-sound-events-2016-evaluation')
self.authors = 'Annamaria Mesaros, Toni Heittola, and Tuomas Virtanen'
self.name_remote = 'TUT Sound Events 2016, evaluation dataset'
self.url = 'http://www.cs.tut.fi/sgn/arg/dcase2016/download/'
self.audio_source = 'Field recording'
self.audio_type = 'Natural'
self.recording_device_model = 'Roland Edirol R-09'
self.microphone_model = 'Soundman OKM II Klassik/studio A3 electret microphone'
self.evaluation_folds = 1
self.package_list = [
{
'remote_package': None,
'local_package': None,
'local_audio_path': os.path.join(self.local_path, 'audio'),
},
{
'remote_package': None,
'local_package': None,
'local_audio_path': os.path.join(self.local_path, 'audio', 'home'),
},
{
'remote_package': None,
'local_package': None,
'local_audio_path': os.path.join(self.local_path, 'audio', 'residential_area'),
},
]
@property
def scene_labels(self):
labels = ['home', 'residential_area']
labels.sort()
return labels
def event_label_count(self, scene_label=None):
return len(self.event_labels(scene_label=scene_label))
def event_labels(self, scene_label=None):
labels = []
for item in self.meta:
if scene_label is None or item['scene_label'] == scene_label:
if 'event_label' in item and item['event_label'] not in labels:
labels.append(item['event_label'])
labels.sort()
return labels
def on_after_extract(self):
"""After dataset packages are downloaded and extracted, meta-files are checked.
Parameters
----------
nothing
Returns
-------
nothing
"""
if not os.path.isfile(self.meta_file) and os.path.isdir(os.path.join(self.local_path,'meta')):
meta_file_handle = open(self.meta_file, 'wt')
try:
writer = csv.writer(meta_file_handle, delimiter='\t')
for filename in self.audio_files:
raw_path, raw_filename = os.path.split(filename)
relative_path = self.absolute_to_relative(raw_path)
scene_label = relative_path.replace('audio', '')[1:]
base_filename, file_extension = os.path.splitext(raw_filename)
annotation_filename = os.path.join(self.local_path, relative_path.replace('audio', 'meta'), base_filename + '.ann')
if os.path.isfile(annotation_filename):
annotation_file_handle = open(annotation_filename, 'rt')
try:
annotation_file_reader = csv.reader(annotation_file_handle, delimiter='\t')
for annotation_file_row in annotation_file_reader:
writer.writerow((os.path.join(relative_path, raw_filename),
scene_label,
float(annotation_file_row[0].replace(',', '.')),
float(annotation_file_row[1].replace(',', '.')),
annotation_file_row[2], 'm'))
finally:
annotation_file_handle.close()
finally:
meta_file_handle.close()
def train(self, fold=0, scene_label=None):
raise IOError('Train setup not available.')
def test(self, fold=0, scene_label=None):
if fold not in self.evaluation_data_test:
self.evaluation_data_test[fold] = {}
for scene_label_ in self.scene_labels:
if scene_label_ not in self.evaluation_data_test[fold]:
self.evaluation_data_test[fold][scene_label_] = []
if fold > 0:
with open(os.path.join(self.evaluation_setup_path, scene_label + '_fold' + str(fold) + '_test.txt'), 'rt') as f:
for row in csv.reader(f, delimiter='\t'):
self.evaluation_data_test[fold][scene_label_].append({'file': self.relative_to_absolute_path(row[0])})
else:
data = []
files = []
for item in self.audio_files:
if scene_label_ in item:
if self.relative_to_absolute_path(item) not in files:
data.append({'file': self.relative_to_absolute_path(item)})
files.append(self.relative_to_absolute_path(item))
self.evaluation_data_test[0][scene_label_] = data
if scene_label:
return self.evaluation_data_test[fold][scene_label]
else:
data = []
for scene_label_ in self.scene_labels:
for item in self.evaluation_data_test[fold][scene_label_]:
data.append(item)
return data
# CHIME home
class CHiMEHome_DomesticAudioTag_DevelopmentSet(Dataset):
def __init__(self, data_path=None):
Dataset.__init__(self, data_path=data_path, name = 'CHiMeHome-audiotag-development')
self.authors = 'Peter Foster, Siddharth Sigtia, Sacha Krstulovic, Jon Barker, and Mark Plumbley'
self.name_remote = 'The CHiME-Home dataset is a collection of annotated domestic environment audio recordings.'
self.url = ''
self.audio_source = 'Field recording'
self.audio_type = 'Natural'
self.recording_device_model = 'Unknown'
self.microphone_model = 'Unknown'
self.evaluation_folds = 10
self.package_list = [
{
'remote_package': 'https://archive.org/download/chime-home/chime_home.tar.gz',
'local_package': os.path.join(self.local_path, 'chime_home.tar.gz'),
'local_audio_path': os.path.join(self.local_path, 'chime_home', 'chunks'),
},
]
@property
def audio_files(self):
"""Get all audio files in the dataset, use only file from CHime-Home-refined set.
Parameters
----------
nothing
Returns
-------
files : list
audio files
"""
if self.files is None:
refined_files = []
with open(os.path.join(self.local_path, 'chime_home', 'chunks_refined.csv'), 'rt') as f:
for row in csv.reader(f, delimiter=','):
refined_files.append(row[1])
self.files = []
for file in self.package_list:
path = file['local_audio_path']
if path:
l = os.listdir(path)
p = path.replace(self.local_path + os.path.sep, '')
for f in l:
fileName, fileExtension = os.path.splitext(f)
if fileExtension[1:] in self.audio_extensions and fileName in refined_files:
self.files.append(os.path.abspath(os.path.join(path, f)))
self.files.sort()
return self.files
def read_chunk_meta(self, meta_filename):
if os.path.isfile(meta_filename):
meta_file_handle = open(meta_filename, 'rt')
try:
meta_file_reader = csv.reader(meta_file_handle, delimiter=',')
data = {}
for meta_file_row in meta_file_reader:
data[meta_file_row[0]] = meta_file_row[1]
finally:
meta_file_handle.close()
return data
def tagcode_to_taglabel(self, tag):
map = {'c': 'child speech',
'm': 'adult male speech',
'f': 'adult female speech',
'v': 'video game/tv',
'p': 'percussive sound',
'b': 'broadband noise',
'o': 'other',
'S': 'silence/background',
'U': 'unidentifiable'
}
if tag in map:
return map[tag]
else:
return None
def on_after_extract(self):
"""After dataset packages are downloaded and extracted, meta-files are checked.
Legacy dataset meta files are converted to be compatible with current scheme.
Parameters
----------
nothing
Returns
-------
nothing
"""
if not os.path.isfile(self.meta_file):
section_header('Generating meta file for dataset')
scene_label = 'home'
f = open(self.meta_file, 'wt')
try:
writer = csv.writer(f, delimiter='\t')
for file in self.audio_files:
raw_path, raw_filename = os.path.split(file)
relative_path = self.absolute_to_relative(raw_path)
base_filename, file_extension = os.path.splitext(raw_filename)
annotation_filename = os.path.join(raw_path, base_filename + '.csv')
meta_data = self.read_chunk_meta(annotation_filename)
tags = []
for i, tag in enumerate(meta_data['majorityvote']):
if tag is 'b':
print file
if tag is not 'S' and tag is not 'U':
tags.append(self.tagcode_to_taglabel(tag))
tags = ';'.join(tags)
writer.writerow(
(os.path.join(relative_path, raw_filename), scene_label, meta_data['majorityvote'], tags))
finally:
f.close()
foot()
all_folds_found = True
for fold in xrange(1, self.evaluation_folds):
for target_tag in self.audio_tags:
if not os.path.isfile(os.path.join(self.evaluation_setup_path,
'fold' + str(fold) + '_' + target_tag.replace('/', '-').replace(' ',
'_') + '_train.txt')):
all_folds_found = False
if not os.path.isfile(os.path.join(self.evaluation_setup_path,
'fold' + str(fold) + '_' + target_tag.replace('/', '-').replace(' ',
'_') + '_test.txt')):
all_folds_found = False
if not all_folds_found:
if not os.path.isdir(self.evaluation_setup_path):
os.makedirs(self.evaluation_setup_path)
numpy.random.seed(475686)
kf = KFold(n=len(self.audio_files), n_folds=self.evaluation_folds, shuffle=True)
refined_files = []
with open(os.path.join(self.local_path, 'chime_home', 'chunks_refined.csv'), 'rt') as f:
for row in csv.reader(f, delimiter=','):
refined_files.append(self.relative_to_absolute_path(os.path.join('chime_home','chunks',row[1]+'.wav')))
fold = 1
files = numpy.array(refined_files)
for train_index, test_index in kf:
train_files = files[train_index]
test_files = files[test_index]
with open(os.path.join(self.evaluation_setup_path, 'fold' + str(fold) + '_train.txt'), 'wt') as f:
writer = csv.writer(f, delimiter='\t')
for file in train_files:
raw_path, raw_filename = os.path.split(file)
relative_path = raw_path.replace(self.local_path + os.path.sep, '')
item = self.file_meta(file)[0]
writer.writerow([os.path.join(relative_path, raw_filename), item['scene_label'],item['tag_string'], ';'.join(item['tags'])])
with open(os.path.join(self.evaluation_setup_path, 'fold' + str(fold) + '_test.txt'), 'wt') as f:
writer = csv.writer(f, delimiter='\t')
for file in test_files:
raw_path, raw_filename = os.path.split(file)
relative_path = raw_path.replace(self.local_path + os.path.sep, '')
writer.writerow([os.path.join(relative_path, raw_filename)])
with open(os.path.join(self.evaluation_setup_path, 'fold' + str(fold) + '_evaluate.txt'), 'wt') as f:
writer = csv.writer(f, delimiter='\t')
for file in test_files:
raw_path, raw_filename = os.path.split(file)
relative_path = raw_path.replace(self.local_path + os.path.sep, '')
item = self.file_meta(file)[0]
writer.writerow([os.path.join(relative_path, raw_filename), item['scene_label'],item['tag_string'], ';'.join(item['tags'])])
fold+= 1
# Legacy datasets
# =====================================================
# DCASE 2013
# =====================================================
class DCASE2013_Scene_DevelopmentSet(Dataset):
"""DCASE 2013 Acoustic scene classification, development dataset
"""
def __init__(self, data_path='data'):
Dataset.__init__(self, data_path=data_path, name='DCASE2013-scene-development')
self.authors = 'Dimitrios Giannoulis, Emmanouil Benetos, Dan Stowell, and Mark Plumbley'
self.name_remote = 'IEEE AASP 2013 CASA Challenge - Public Dataset for Scene Classification Task'
self.url = 'http://www.elec.qmul.ac.uk/digitalmusic/sceneseventschallenge/'
self.audio_source = 'Field recording'
self.audio_type = 'Natural'
self.recording_device_model = 'Unknown'
self.microphone_model = 'Soundman OKM II Klassik/studio A3 electret microphone'
self.evaluation_folds = 5
self.package_list = [
{
'remote_package': 'http://c4dm.eecs.qmul.ac.uk/rdr/bitstream/handle/123456789/29/scenes_stereo.zip?sequence=1',
'local_package': os.path.join(self.local_path, 'scenes_stereo.zip'),
'local_audio_path': os.path.join(self.local_path, 'scenes_stereo'),
}
]
def on_after_extract(self):
# Make legacy dataset compatible with DCASE2016 dataset scheme
if not os.path.isfile(self.meta_file):
section_header('Generating meta file for dataset')
f = open(self.meta_file, 'wt')
try:
writer = csv.writer(f, delimiter='\t')
for file in self.audio_files:
raw_path, raw_filename = os.path.split(file)
relative_path = self.absolute_to_relative(raw_path)
label = os.path.splitext(os.path.split(file)[1])[0][:-2]
writer.writerow((os.path.join(relative_path, raw_filename), label))
finally:
f.close()
foot()
all_folds_found = True
for fold in xrange(1, self.evaluation_folds):
if not os.path.isfile(os.path.join(self.evaluation_setup_path, 'fold' + str(fold) + '_train.txt')):
all_folds_found = False
if not os.path.isfile(os.path.join(self.evaluation_setup_path, 'fold' + str(fold) + '_test.txt')):
all_folds_found = False
if not all_folds_found:
section_header('Generating evaluation setup files for dataset')
if not os.path.isdir(self.evaluation_setup_path):
os.makedirs(self.evaluation_setup_path)
print self.evaluation_setup_path
classes = []
files = []
for item in self.meta:
classes.append(item['scene_label'])
files.append(item['file'])
files = numpy.array(files)
sss = StratifiedShuffleSplit(y=classes, n_iter=self.evaluation_folds, test_size=0.3, random_state=0)
fold = 1
for train_index, test_index in sss:
# print("TRAIN:", train_index, "TEST:", test_index)
train_files = files[train_index]
with open(os.path.join(self.evaluation_setup_path, 'fold' + str(fold) + '_train.txt'), 'wt') as f:
writer = csv.writer(f, delimiter='\t')
for file in train_files:
raw_path, raw_filename = os.path.split(file)
label = self.file_meta(file)[0]['scene_label']
writer.writerow([os.path.join(raw_path, raw_filename), label])
test_files = files[test_index]
with open(os.path.join(self.evaluation_setup_path, 'fold' + str(fold) + '_test.txt'), 'wt') as f:
writer = csv.writer(f, delimiter='\t')
for file in test_files:
raw_path, raw_filename = os.path.split(file)
writer.writerow([os.path.join(raw_path, raw_filename)])
with open(os.path.join(self.evaluation_setup_path, 'fold' + str(fold) + '_evaluate.txt'), 'wt') as f:
writer = csv.writer(f, delimiter='\t')
for file in test_files:
raw_path, raw_filename = os.path.split(file)
label = self.file_meta(file)[0]['scene_label']
writer.writerow([os.path.join(raw_path, raw_filename), label])
fold += 1
foot()
class DCASE2013_Scene_EvaluationSet(DCASE2013_Scene_DevelopmentSet):
"""DCASE 2013 Acoustic scene classification, evaluation dataset
"""
def __init__(self, data_path='data'):
Dataset.__init__(self, data_path=data_path, name='DCASE2013-scene-challenge')
self.authors = 'Dimitrios Giannoulis, Emmanouil Benetos, Dan Stowell, and Mark Plumbley'
self.name_remote = 'IEEE AASP 2013 CASA Challenge - Private Dataset for Scene Classification Task'
self.url = 'http://www.elec.qmul.ac.uk/digitalmusic/sceneseventschallenge/'
self.audio_source = 'Field recording'
self.audio_type = 'Natural'
self.recording_device_model = 'Unknown'
self.microphone_model = 'Soundman OKM II Klassik/studio A3 electret microphone'
self.evaluation_folds = 5
self.package_list = [
{
'remote_package': 'https://archive.org/download/dcase2013_scene_classification_testset/scenes_stereo_testset.zip',
'local_package': os.path.join(self.local_path, 'scenes_stereo_testset.zip'),
'local_audio_path': os.path.join(self.local_path, 'scenes_stereo_testset'),
}
]
def on_after_extract(self):
# Make legacy dataset compatible with DCASE2016 dataset scheme
if not os.path.isfile(self.meta_file) or 1:
section_header('Generating meta file for dataset')
f = open(self.meta_file, 'wt')
try:
writer = csv.writer(f, delimiter='\t')
for file in self.audio_files:
raw_path, raw_filename = os.path.split(file)
relative_path = self.absolute_to_relative(raw_path)
label = os.path.splitext(os.path.split(file)[1])[0][:-2]
writer.writerow((os.path.join(relative_path, raw_filename), label))
finally:
f.close()
foot()
all_folds_found = True
for fold in xrange(1, self.evaluation_folds):
if not os.path.isfile(os.path.join(self.evaluation_setup_path, 'fold' + str(fold) + '_train.txt')):
all_folds_found = False
if not os.path.isfile(os.path.join(self.evaluation_setup_path, 'fold' + str(fold) + '_test.txt')):
all_folds_found = False
if not all_folds_found:
section_header('Generating evaluation setup files for dataset')
if not os.path.isdir(self.evaluation_setup_path):
os.makedirs(self.evaluation_setup_path)
classes = []
files = []
for item in self.meta:
classes.append(item['scene_label'])
files.append(item['file'])
files = numpy.array(files)
sss = StratifiedShuffleSplit(y=classes, n_iter=self.evaluation_folds, test_size=0.3, random_state=0)
fold = 1
for train_index, test_index in sss:
train_files = files[train_index]
with open(os.path.join(self.evaluation_setup_path, 'fold' + str(fold) + '_train.txt'), 'wt') as f:
writer = csv.writer(f, delimiter='\t')
for file in train_files:
raw_path, raw_filename = os.path.split(file)
label = self.file_meta(file)[0]['scene_label']
writer.writerow([os.path.join(raw_path, raw_filename), label])
test_files = files[test_index]
with open(os.path.join(self.evaluation_setup_path, 'fold' + str(fold) + '_test.txt'), 'wt') as f:
writer = csv.writer(f, delimiter='\t')
for file in test_files:
raw_path, raw_filename = os.path.split(file)
writer.writerow([os.path.join(raw_path, raw_filename)])
with open(os.path.join(self.evaluation_setup_path, 'fold' + str(fold) + '_evaluate.txt'), 'wt') as f:
writer = csv.writer(f, delimiter='\t')
for file in test_files:
raw_path, raw_filename = os.path.split(file)
label = self.file_meta(file)[0]['scene_label']
writer.writerow([os.path.join(raw_path, raw_filename), label])
fold += 1
foot()
# Sound events
class DCASE2013_Event_DevelopmentSet(Dataset):
"""DCASE 2013 Sound event detection, development dataset
"""
def __init__(self, data_path='data'):
Dataset.__init__(self, data_path=data_path, name='DCASE2013-event-development')
self.authors = 'Dimitrios Giannoulis, Emmanouil Benetos, Dan Stowell, and Mark Plumbley'
self.name_remote = 'IEEE AASP CASA Challenge - Public Dataset for Event Detection Task'
self.url = 'http://www.elec.qmul.ac.uk/digitalmusic/sceneseventschallenge/'
self.audio_source = 'Field recording'
self.audio_type = 'Natural'
self.recording_device_model = 'Unknown'
self.microphone_model = 'Soundman OKM II Klassik/studio A3 electret microphone'
self.evaluation_folds = 5
self.package_list = [
{
'remote_package': 'https://archive.org/download/dcase2013_event_detection_development_OS/events_OS_development_v2.zip',
'local_package': os.path.join(self.local_path, 'events_OS_development_v2.zip'),
'local_audio_path': os.path.join(self.local_path, 'events_OS_development_v2'),
},
# {
# 'remote_package':'http://c4dm.eecs.qmul.ac.uk/rdr/bitstream/handle/123456789/28/singlesounds_annotation.zip?sequence=9',
# 'local_package': os.path.join(self.local_path, 'singlesounds_annotation.zip'),
# 'local_audio_path': None,
# },
# {
# 'remote_package':'http://c4dm.eecs.qmul.ac.uk/rdr/bitstream/handle/123456789/28/singlesounds_stereo.zip?sequence=7',
# 'local_package': os.path.join(self.local_path, 'singlesounds_stereo.zip'),
# 'local_audio_path': os.path.join(self.local_path, 'singlesounds_stereo'),
# },
]
def on_after_extract(self):
# Make legacy dataset compatible with DCASE2016 dataset scheme
scene_label = 'office'
if not os.path.isfile(self.meta_file):
meta_file_handle = open(self.meta_file, 'wt')
try:
writer = csv.writer(meta_file_handle, delimiter='\t')
for file in self.audio_files:
raw_path, raw_filename = os.path.split(file)
relative_path = self.absolute_to_relative(raw_path)
base_filename, file_extension = os.path.splitext(raw_filename)
if file.find('singlesounds_stereo') != -1:
annotation_filename = os.path.join(self.local_path, 'Annotation1', base_filename + '_bdm.txt')
label = base_filename[:-2]
if os.path.isfile(annotation_filename):
annotation_file_handle = open(annotation_filename, 'rt')
try:
annotation_file_reader = csv.reader(annotation_file_handle, delimiter='\t')
for annotation_file_row in annotation_file_reader:
writer.writerow((os.path.join(relative_path, raw_filename), scene_label,
annotation_file_row[0], annotation_file_row[1], label, 'i'))
finally:
annotation_file_handle.close()
elif file.find('events_OS_development_v2') != -1:
annotation_filename = os.path.join(self.local_path, 'events_OS_development_v2',
base_filename + '_v2.txt')
if os.path.isfile(annotation_filename):
annotation_file_handle = open(annotation_filename, 'rt')
try:
annotation_file_reader = csv.reader(annotation_file_handle, delimiter='\t')
for annotation_file_row in annotation_file_reader:
writer.writerow((os.path.join(relative_path, raw_filename), scene_label,
annotation_file_row[0], annotation_file_row[1],
annotation_file_row[2], 'm'))
finally:
annotation_file_handle.close()
finally:
meta_file_handle.close()
all_folds_found = True
for fold in xrange(1, self.evaluation_folds):
if not os.path.isfile(os.path.join(self.evaluation_setup_path, 'fold' + str(fold) + '_train.txt')):
all_folds_found = False
if not os.path.isfile(os.path.join(self.evaluation_setup_path, 'fold' + str(fold) + '_test.txt')):
all_folds_found = False
if not all_folds_found:
# Construct training and testing sets. Isolated sound are used for training and
# polyphonic mixtures are used for testing.
if not os.path.isdir(self.evaluation_setup_path):
os.makedirs(self.evaluation_setup_path)
files = []
for item in self.meta:
if item['file'] not in files:
files.append(item['file'])
files = numpy.array(files)
f = numpy.zeros(len(files))
sss = StratifiedShuffleSplit(y=f, n_iter=5, test_size=0.3, random_state=0)
fold = 1
for train_index, test_index in sss:
# print("TRAIN:", train_index, "TEST:", test_index)
train_files = files[train_index]
with open(os.path.join(self.evaluation_setup_path, 'fold' + str(fold) + '_train.txt'), 'wt') as f:
writer = csv.writer(f, delimiter='\t')
for file in train_files:
raw_path, raw_filename = os.path.split(file)
relative_path = raw_path.replace(self.local_path + os.path.sep, '')
for item in self.meta:
if item['file'] == file:
writer.writerow([os.path.join(relative_path, raw_filename), item['scene_label'],
item['event_onset'], item['event_offset'], item['event_label']])
test_files = files[test_index]
with open(os.path.join(self.evaluation_setup_path, 'fold' + str(fold) + '_test.txt'), 'wt') as f:
writer = csv.writer(f, delimiter='\t')
for file in test_files:
raw_path, raw_filename = os.path.split(file)
relative_path = raw_path.replace(self.local_path + os.path.sep, '')
writer.writerow([os.path.join(relative_path, raw_filename)])
with open(os.path.join(self.evaluation_setup_path, 'fold' + str(fold) + '_evaluate.txt'), 'wt') as f:
writer = csv.writer(f, delimiter='\t')
for file in test_files:
raw_path, raw_filename = os.path.split(file)
relative_path = raw_path.replace(self.local_path + os.path.sep, '')
for item in self.meta:
if item['file'] == file:
writer.writerow([os.path.join(relative_path, raw_filename), item['scene_label'],
item['event_onset'], item['event_offset'], item['event_label']])
fold += 1
class DCASE2013_Event_EvaluationSet(Dataset):
"""DCASE 2013 Sound event detection, evaluation dataset
"""
def __init__(self, data_path='data'):
Dataset.__init__(self, data_path=data_path, name='DCASE2013-event-challenge')
self.authors = 'Dimitrios Giannoulis, Emmanouil Benetos, Dan Stowell, and Mark Plumbley'
self.name_remote = 'IEEE AASP CASA Challenge - Private Dataset for Event Detection Task'
self.url = 'http://www.elec.qmul.ac.uk/digitalmusic/sceneseventschallenge/'
self.audio_source = 'Field recording'
self.audio_type = 'Natural'
self.recording_device_model = 'Unknown'
self.microphone_model = 'Soundman OKM II Klassik/studio A3 electret microphone'
self.evaluation_folds = 5
self.package_list = [
{
'remote_package': 'https://archive.org/download/dcase2013_event_detection_testset_OS/dcase2013_event_detection_testset_OS.zip',
'local_package': os.path.join(self.local_path, 'dcase2013_event_detection_testset_OS.zip'),
'local_audio_path': os.path.join(self.local_path, 'dcase2013_event_detection_testset_OS'),
}
]
def on_after_extract(self):
# Make legacy dataset compatible with DCASE2016 dataset scheme
scene_label = 'office'
if not os.path.isfile(self.meta_file):
meta_file_handle = open(self.meta_file, 'wt')
try:
writer = csv.writer(meta_file_handle, delimiter='\t')
for file in self.audio_files:
raw_path, raw_filename = os.path.split(file)
relative_path = self.absolute_to_relative(raw_path)
base_filename, file_extension = os.path.splitext(raw_filename)
if file.find('dcase2013_event_detection_testset_OS') != -1:
annotation_filename = os.path.join(self.local_path, 'dcase2013_event_detection_testset_OS',base_filename + '_v2.txt')
if os.path.isfile(annotation_filename):
annotation_file_handle = open(annotation_filename, 'rt')
try:
annotation_file_reader = csv.reader(annotation_file_handle, delimiter='\t')
for annotation_file_row in annotation_file_reader:
writer.writerow((os.path.join(relative_path, raw_filename), scene_label,
annotation_file_row[0], annotation_file_row[1],
annotation_file_row[2], 'm'))
finally:
annotation_file_handle.close()
else:
annotation_filename = os.path.join(self.local_path, 'dcase2013_event_detection_testset_OS',base_filename + '.txt')
if os.path.isfile(annotation_filename):
annotation_file_handle = open(annotation_filename, 'rt')
try:
annotation_file_reader = csv.reader(annotation_file_handle, delimiter='\t')
for annotation_file_row in annotation_file_reader:
writer.writerow((os.path.join(relative_path, raw_filename), scene_label,
annotation_file_row[0], annotation_file_row[1],
annotation_file_row[2], 'm'))
finally:
annotation_file_handle.close()
finally:
meta_file_handle.close()
all_folds_found = True
for fold in xrange(1, self.evaluation_folds):
if not os.path.isfile(os.path.join(self.evaluation_setup_path, 'fold' + str(fold) + '_train.txt')):
all_folds_found = False
if not os.path.isfile(os.path.join(self.evaluation_setup_path, 'fold' + str(fold) + '_test.txt')):
all_folds_found = False
if not all_folds_found:
# Construct training and testing sets. Isolated sound are used for training and
# polyphonic mixtures are used for testing.
if not os.path.isdir(self.evaluation_setup_path):
os.makedirs(self.evaluation_setup_path)
files = []
for item in self.meta:
if item['file'] not in files:
files.append(item['file'])
files = numpy.array(files)
f = numpy.zeros(len(files))
sss = StratifiedShuffleSplit(y=f, n_iter=5, test_size=0.3, random_state=0)
fold = 1
for train_index, test_index in sss:
# print("TRAIN:", train_index, "TEST:", test_index)
train_files = files[train_index]
with open(os.path.join(self.evaluation_setup_path, 'fold' + str(fold) + '_train.txt'), 'wt') as f:
writer = csv.writer(f, delimiter='\t')
for file in train_files:
raw_path, raw_filename = os.path.split(file)
relative_path = raw_path.replace(self.local_path + os.path.sep, '')
for item in self.meta:
if item['file'] == file:
writer.writerow([os.path.join(relative_path, raw_filename), item['scene_label'],
item['event_onset'], item['event_offset'], item['event_label']])
test_files = files[test_index]
with open(os.path.join(self.evaluation_setup_path, 'fold' + str(fold) + '_test.txt'), 'wt') as f:
writer = csv.writer(f, delimiter='\t')
for file in test_files:
raw_path, raw_filename = os.path.split(file)
relative_path = raw_path.replace(self.local_path + os.path.sep, '')
writer.writerow([os.path.join(relative_path, raw_filename)])
with open(os.path.join(self.evaluation_setup_path, 'fold' + str(fold) + '_evaluate.txt'), 'wt') as f:
writer = csv.writer(f, delimiter='\t')
for file in test_files:
raw_path, raw_filename = os.path.split(file)
relative_path = raw_path.replace(self.local_path + os.path.sep, '')
for item in self.meta:
if item['file'] == file:
writer.writerow([os.path.join(relative_path, raw_filename), item['scene_label'],
item['event_onset'], item['event_offset'], item['event_label']])
fold += 1
|
mit
|
chanceraine/nupic
|
external/linux32/lib/python2.6/site-packages/matplotlib/axes.py
|
69
|
259904
|
from __future__ import division, generators
import math, sys, warnings, datetime, new
import numpy as np
from numpy import ma
import matplotlib
rcParams = matplotlib.rcParams
import matplotlib.artist as martist
import matplotlib.axis as maxis
import matplotlib.cbook as cbook
import matplotlib.collections as mcoll
import matplotlib.colors as mcolors
import matplotlib.contour as mcontour
import matplotlib.dates as mdates
import matplotlib.font_manager as font_manager
import matplotlib.image as mimage
import matplotlib.legend as mlegend
import matplotlib.lines as mlines
import matplotlib.mlab as mlab
import matplotlib.patches as mpatches
import matplotlib.quiver as mquiver
import matplotlib.scale as mscale
import matplotlib.table as mtable
import matplotlib.text as mtext
import matplotlib.ticker as mticker
import matplotlib.transforms as mtransforms
iterable = cbook.iterable
is_string_like = cbook.is_string_like
def _process_plot_format(fmt):
"""
Process a matlab(TM) style color/line style format string. Return a
(*linestyle*, *color*) tuple as a result of the processing. Default
values are ('-', 'b'). Example format strings include:
* 'ko': black circles
* '.b': blue dots
* 'r--': red dashed lines
.. seealso::
:func:`~matplotlib.Line2D.lineStyles` and
:func:`~matplotlib.pyplot.colors`:
for all possible styles and color format string.
"""
linestyle = None
marker = None
color = None
# Is fmt just a colorspec?
try:
color = mcolors.colorConverter.to_rgb(fmt)
return linestyle, marker, color # Yes.
except ValueError:
pass # No, not just a color.
# handle the multi char special cases and strip them from the
# string
if fmt.find('--')>=0:
linestyle = '--'
fmt = fmt.replace('--', '')
if fmt.find('-.')>=0:
linestyle = '-.'
fmt = fmt.replace('-.', '')
if fmt.find(' ')>=0:
linestyle = 'None'
fmt = fmt.replace(' ', '')
chars = [c for c in fmt]
for c in chars:
if c in mlines.lineStyles:
if linestyle is not None:
raise ValueError(
'Illegal format string "%s"; two linestyle symbols' % fmt)
linestyle = c
elif c in mlines.lineMarkers:
if marker is not None:
raise ValueError(
'Illegal format string "%s"; two marker symbols' % fmt)
marker = c
elif c in mcolors.colorConverter.colors:
if color is not None:
raise ValueError(
'Illegal format string "%s"; two color symbols' % fmt)
color = c
else:
raise ValueError(
'Unrecognized character %c in format string' % c)
if linestyle is None and marker is None:
linestyle = rcParams['lines.linestyle']
if linestyle is None:
linestyle = 'None'
if marker is None:
marker = 'None'
return linestyle, marker, color
def set_default_color_cycle(clist):
"""
Change the default cycle of colors that will be used by the plot
command. This must be called before creating the
:class:`Axes` to which it will apply; it will
apply to all future axes.
*clist* is a sequence of mpl color specifiers
"""
_process_plot_var_args.defaultColors = clist[:]
rcParams['lines.color'] = clist[0]
class _process_plot_var_args:
"""
Process variable length arguments to the plot command, so that
plot commands like the following are supported::
plot(t, s)
plot(t1, s1, t2, s2)
plot(t1, s1, 'ko', t2, s2)
plot(t1, s1, 'ko', t2, s2, 'r--', t3, e3)
an arbitrary number of *x*, *y*, *fmt* are allowed
"""
defaultColors = ['b','g','r','c','m','y','k']
def __init__(self, axes, command='plot'):
self.axes = axes
self.command = command
self._clear_color_cycle()
def _clear_color_cycle(self):
self.colors = _process_plot_var_args.defaultColors[:]
# if the default line color is a color format string, move it up
# in the que
try: ind = self.colors.index(rcParams['lines.color'])
except ValueError:
self.firstColor = rcParams['lines.color']
else:
self.colors[0], self.colors[ind] = self.colors[ind], self.colors[0]
self.firstColor = self.colors[0]
self.Ncolors = len(self.colors)
self.count = 0
def set_color_cycle(self, clist):
self.colors = clist[:]
self.firstColor = self.colors[0]
self.Ncolors = len(self.colors)
self.count = 0
def _get_next_cycle_color(self):
if self.count==0:
color = self.firstColor
else:
color = self.colors[int(self.count % self.Ncolors)]
self.count += 1
return color
def __call__(self, *args, **kwargs):
if self.axes.xaxis is not None and self.axes.yaxis is not None:
xunits = kwargs.pop( 'xunits', self.axes.xaxis.units)
yunits = kwargs.pop( 'yunits', self.axes.yaxis.units)
if xunits!=self.axes.xaxis.units:
self.axes.xaxis.set_units(xunits)
if yunits!=self.axes.yaxis.units:
self.axes.yaxis.set_units(yunits)
ret = self._grab_next_args(*args, **kwargs)
return ret
def set_lineprops(self, line, **kwargs):
assert self.command == 'plot', 'set_lineprops only works with "plot"'
for key, val in kwargs.items():
funcName = "set_%s"%key
if not hasattr(line,funcName):
raise TypeError, 'There is no line property "%s"'%key
func = getattr(line,funcName)
func(val)
def set_patchprops(self, fill_poly, **kwargs):
assert self.command == 'fill', 'set_patchprops only works with "fill"'
for key, val in kwargs.items():
funcName = "set_%s"%key
if not hasattr(fill_poly,funcName):
raise TypeError, 'There is no patch property "%s"'%key
func = getattr(fill_poly,funcName)
func(val)
def _xy_from_y(self, y):
if self.axes.yaxis is not None:
b = self.axes.yaxis.update_units(y)
if b: return np.arange(len(y)), y, False
if not ma.isMaskedArray(y):
y = np.asarray(y)
if len(y.shape) == 1:
y = y[:,np.newaxis]
nr, nc = y.shape
x = np.arange(nr)
if len(x.shape) == 1:
x = x[:,np.newaxis]
return x,y, True
def _xy_from_xy(self, x, y):
if self.axes.xaxis is not None and self.axes.yaxis is not None:
bx = self.axes.xaxis.update_units(x)
by = self.axes.yaxis.update_units(y)
# right now multicol is not supported if either x or y are
# unit enabled but this can be fixed..
if bx or by: return x, y, False
x = ma.asarray(x)
y = ma.asarray(y)
if len(x.shape) == 1:
x = x[:,np.newaxis]
if len(y.shape) == 1:
y = y[:,np.newaxis]
nrx, ncx = x.shape
nry, ncy = y.shape
assert nrx == nry, 'Dimensions of x and y are incompatible'
if ncx == ncy:
return x, y, True
if ncx == 1:
x = np.repeat(x, ncy, axis=1)
if ncy == 1:
y = np.repeat(y, ncx, axis=1)
assert x.shape == y.shape, 'Dimensions of x and y are incompatible'
return x, y, True
def _plot_1_arg(self, y, **kwargs):
assert self.command == 'plot', 'fill needs at least 2 arguments'
ret = []
x, y, multicol = self._xy_from_y(y)
if multicol:
for j in xrange(y.shape[1]):
color = self._get_next_cycle_color()
seg = mlines.Line2D(x, y[:,j],
color = color,
axes=self.axes,
)
self.set_lineprops(seg, **kwargs)
ret.append(seg)
else:
color = self._get_next_cycle_color()
seg = mlines.Line2D(x, y,
color = color,
axes=self.axes,
)
self.set_lineprops(seg, **kwargs)
ret.append(seg)
return ret
def _plot_2_args(self, tup2, **kwargs):
ret = []
if is_string_like(tup2[1]):
assert self.command == 'plot', ('fill needs at least 2 non-string '
'arguments')
y, fmt = tup2
x, y, multicol = self._xy_from_y(y)
linestyle, marker, color = _process_plot_format(fmt)
def makeline(x, y):
_color = color
if _color is None:
_color = self._get_next_cycle_color()
seg = mlines.Line2D(x, y,
color=_color,
linestyle=linestyle, marker=marker,
axes=self.axes,
)
self.set_lineprops(seg, **kwargs)
ret.append(seg)
if multicol:
for j in xrange(y.shape[1]):
makeline(x[:,j], y[:,j])
else:
makeline(x, y)
return ret
else:
x, y = tup2
x, y, multicol = self._xy_from_xy(x, y)
def makeline(x, y):
color = self._get_next_cycle_color()
seg = mlines.Line2D(x, y,
color=color,
axes=self.axes,
)
self.set_lineprops(seg, **kwargs)
ret.append(seg)
def makefill(x, y):
x = self.axes.convert_xunits(x)
y = self.axes.convert_yunits(y)
facecolor = self._get_next_cycle_color()
seg = mpatches.Polygon(np.hstack(
(x[:,np.newaxis],y[:,np.newaxis])),
facecolor = facecolor,
fill=True,
closed=closed
)
self.set_patchprops(seg, **kwargs)
ret.append(seg)
if self.command == 'plot':
func = makeline
else:
closed = kwargs.get('closed', True)
func = makefill
if multicol:
for j in xrange(y.shape[1]):
func(x[:,j], y[:,j])
else:
func(x, y)
return ret
def _plot_3_args(self, tup3, **kwargs):
ret = []
x, y, fmt = tup3
x, y, multicol = self._xy_from_xy(x, y)
linestyle, marker, color = _process_plot_format(fmt)
def makeline(x, y):
_color = color
if _color is None:
_color = self._get_next_cycle_color()
seg = mlines.Line2D(x, y,
color=_color,
linestyle=linestyle, marker=marker,
axes=self.axes,
)
self.set_lineprops(seg, **kwargs)
ret.append(seg)
def makefill(x, y):
facecolor = color
x = self.axes.convert_xunits(x)
y = self.axes.convert_yunits(y)
seg = mpatches.Polygon(np.hstack(
(x[:,np.newaxis],y[:,np.newaxis])),
facecolor = facecolor,
fill=True,
closed=closed
)
self.set_patchprops(seg, **kwargs)
ret.append(seg)
if self.command == 'plot':
func = makeline
else:
closed = kwargs.get('closed', True)
func = makefill
if multicol:
for j in xrange(y.shape[1]):
func(x[:,j], y[:,j])
else:
func(x, y)
return ret
def _grab_next_args(self, *args, **kwargs):
remaining = args
while 1:
if len(remaining)==0: return
if len(remaining)==1:
for seg in self._plot_1_arg(remaining[0], **kwargs):
yield seg
remaining = []
continue
if len(remaining)==2:
for seg in self._plot_2_args(remaining, **kwargs):
yield seg
remaining = []
continue
if len(remaining)==3:
if not is_string_like(remaining[2]):
raise ValueError, 'third arg must be a format string'
for seg in self._plot_3_args(remaining, **kwargs):
yield seg
remaining=[]
continue
if is_string_like(remaining[2]):
for seg in self._plot_3_args(remaining[:3], **kwargs):
yield seg
remaining=remaining[3:]
else:
for seg in self._plot_2_args(remaining[:2], **kwargs):
yield seg
remaining=remaining[2:]
class Axes(martist.Artist):
"""
The :class:`Axes` contains most of the figure elements:
:class:`~matplotlib.axis.Axis`, :class:`~matplotlib.axis.Tick`,
:class:`~matplotlib.lines.Line2D`, :class:`~matplotlib.text.Text`,
:class:`~matplotlib.patches.Polygon`, etc., and sets the
coordinate system.
The :class:`Axes` instance supports callbacks through a callbacks
attribute which is a :class:`~matplotlib.cbook.CallbackRegistry`
instance. The events you can connect to are 'xlim_changed' and
'ylim_changed' and the callback will be called with func(*ax*)
where *ax* is the :class:`Axes` instance.
"""
name = "rectilinear"
_shared_x_axes = cbook.Grouper()
_shared_y_axes = cbook.Grouper()
def __str__(self):
return "Axes(%g,%g;%gx%g)" % tuple(self._position.bounds)
def __init__(self, fig, rect,
axisbg = None, # defaults to rc axes.facecolor
frameon = True,
sharex=None, # use Axes instance's xaxis info
sharey=None, # use Axes instance's yaxis info
label='',
**kwargs
):
"""
Build an :class:`Axes` instance in
:class:`~matplotlib.figure.Figure` *fig* with
*rect=[left, bottom, width, height]* in
:class:`~matplotlib.figure.Figure` coordinates
Optional keyword arguments:
================ =========================================
Keyword Description
================ =========================================
*adjustable* [ 'box' | 'datalim' ]
*alpha* float: the alpha transparency
*anchor* [ 'C', 'SW', 'S', 'SE', 'E', 'NE', 'N',
'NW', 'W' ]
*aspect* [ 'auto' | 'equal' | aspect_ratio ]
*autoscale_on* [ *True* | *False* ] whether or not to
autoscale the *viewlim*
*axis_bgcolor* any matplotlib color, see
:func:`~matplotlib.pyplot.colors`
*axisbelow* draw the grids and ticks below the other
artists
*cursor_props* a (*float*, *color*) tuple
*figure* a :class:`~matplotlib.figure.Figure`
instance
*frame_on* a boolean - draw the axes frame
*label* the axes label
*navigate* [ *True* | *False* ]
*navigate_mode* [ 'PAN' | 'ZOOM' | None ] the navigation
toolbar button status
*position* [left, bottom, width, height] in
class:`~matplotlib.figure.Figure` coords
*sharex* an class:`~matplotlib.axes.Axes` instance
to share the x-axis with
*sharey* an class:`~matplotlib.axes.Axes` instance
to share the y-axis with
*title* the title string
*visible* [ *True* | *False* ] whether the axes is
visible
*xlabel* the xlabel
*xlim* (*xmin*, *xmax*) view limits
*xscale* [%(scale)s]
*xticklabels* sequence of strings
*xticks* sequence of floats
*ylabel* the ylabel strings
*ylim* (*ymin*, *ymax*) view limits
*yscale* [%(scale)s]
*yticklabels* sequence of strings
*yticks* sequence of floats
================ =========================================
""" % {'scale': ' | '.join([repr(x) for x in mscale.get_scale_names()])}
martist.Artist.__init__(self)
if isinstance(rect, mtransforms.Bbox):
self._position = rect
else:
self._position = mtransforms.Bbox.from_bounds(*rect)
self._originalPosition = self._position.frozen()
self.set_axes(self)
self.set_aspect('auto')
self._adjustable = 'box'
self.set_anchor('C')
self._sharex = sharex
self._sharey = sharey
if sharex is not None:
self._shared_x_axes.join(self, sharex)
if sharex._adjustable == 'box':
sharex._adjustable = 'datalim'
#warnings.warn(
# 'shared axes: "adjustable" is being changed to "datalim"')
self._adjustable = 'datalim'
if sharey is not None:
self._shared_y_axes.join(self, sharey)
if sharey._adjustable == 'box':
sharey._adjustable = 'datalim'
#warnings.warn(
# 'shared axes: "adjustable" is being changed to "datalim"')
self._adjustable = 'datalim'
self.set_label(label)
self.set_figure(fig)
# this call may differ for non-sep axes, eg polar
self._init_axis()
if axisbg is None: axisbg = rcParams['axes.facecolor']
self._axisbg = axisbg
self._frameon = frameon
self._axisbelow = rcParams['axes.axisbelow']
self._hold = rcParams['axes.hold']
self._connected = {} # a dict from events to (id, func)
self.cla()
# funcs used to format x and y - fall back on major formatters
self.fmt_xdata = None
self.fmt_ydata = None
self.set_cursor_props((1,'k')) # set the cursor properties for axes
self._cachedRenderer = None
self.set_navigate(True)
self.set_navigate_mode(None)
if len(kwargs): martist.setp(self, **kwargs)
if self.xaxis is not None:
self._xcid = self.xaxis.callbacks.connect('units finalize',
self.relim)
if self.yaxis is not None:
self._ycid = self.yaxis.callbacks.connect('units finalize',
self.relim)
def get_window_extent(self, *args, **kwargs):
'''
get the axes bounding box in display space; *args* and
*kwargs* are empty
'''
return self.bbox
def _init_axis(self):
"move this out of __init__ because non-separable axes don't use it"
self.xaxis = maxis.XAxis(self)
self.yaxis = maxis.YAxis(self)
self._update_transScale()
def set_figure(self, fig):
"""
Set the class:`~matplotlib.axes.Axes` figure
accepts a class:`~matplotlib.figure.Figure` instance
"""
martist.Artist.set_figure(self, fig)
self.bbox = mtransforms.TransformedBbox(self._position, fig.transFigure)
#these will be updated later as data is added
self.dataLim = mtransforms.Bbox.unit()
self.viewLim = mtransforms.Bbox.unit()
self.transScale = mtransforms.TransformWrapper(
mtransforms.IdentityTransform())
self._set_lim_and_transforms()
def _set_lim_and_transforms(self):
"""
set the *dataLim* and *viewLim*
:class:`~matplotlib.transforms.Bbox` attributes and the
*transScale*, *transData*, *transLimits* and *transAxes*
transformations.
"""
self.transAxes = mtransforms.BboxTransformTo(self.bbox)
# Transforms the x and y axis separately by a scale factor
# It is assumed that this part will have non-linear components
self.transScale = mtransforms.TransformWrapper(
mtransforms.IdentityTransform())
# An affine transformation on the data, generally to limit the
# range of the axes
self.transLimits = mtransforms.BboxTransformFrom(
mtransforms.TransformedBbox(self.viewLim, self.transScale))
# The parentheses are important for efficiency here -- they
# group the last two (which are usually affines) separately
# from the first (which, with log-scaling can be non-affine).
self.transData = self.transScale + (self.transLimits + self.transAxes)
self._xaxis_transform = mtransforms.blended_transform_factory(
self.axes.transData, self.axes.transAxes)
self._yaxis_transform = mtransforms.blended_transform_factory(
self.axes.transAxes, self.axes.transData)
def get_xaxis_transform(self):
"""
Get the transformation used for drawing x-axis labels, ticks
and gridlines. The x-direction is in data coordinates and the
y-direction is in axis coordinates.
.. note::
This transformation is primarily used by the
:class:`~matplotlib.axis.Axis` class, and is meant to be
overridden by new kinds of projections that may need to
place axis elements in different locations.
"""
return self._xaxis_transform
def get_xaxis_text1_transform(self, pad_points):
"""
Get the transformation used for drawing x-axis labels, which
will add the given amount of padding (in points) between the
axes and the label. The x-direction is in data coordinates
and the y-direction is in axis coordinates. Returns a
3-tuple of the form::
(transform, valign, halign)
where *valign* and *halign* are requested alignments for the
text.
.. note::
This transformation is primarily used by the
:class:`~matplotlib.axis.Axis` class, and is meant to be
overridden by new kinds of projections that may need to
place axis elements in different locations.
"""
return (self._xaxis_transform +
mtransforms.ScaledTranslation(0, -1 * pad_points / 72.0,
self.figure.dpi_scale_trans),
"top", "center")
def get_xaxis_text2_transform(self, pad_points):
"""
Get the transformation used for drawing the secondary x-axis
labels, which will add the given amount of padding (in points)
between the axes and the label. The x-direction is in data
coordinates and the y-direction is in axis coordinates.
Returns a 3-tuple of the form::
(transform, valign, halign)
where *valign* and *halign* are requested alignments for the
text.
.. note::
This transformation is primarily used by the
:class:`~matplotlib.axis.Axis` class, and is meant to be
overridden by new kinds of projections that may need to
place axis elements in different locations.
"""
return (self._xaxis_transform +
mtransforms.ScaledTranslation(0, pad_points / 72.0,
self.figure.dpi_scale_trans),
"bottom", "center")
def get_yaxis_transform(self):
"""
Get the transformation used for drawing y-axis labels, ticks
and gridlines. The x-direction is in axis coordinates and the
y-direction is in data coordinates.
.. note::
This transformation is primarily used by the
:class:`~matplotlib.axis.Axis` class, and is meant to be
overridden by new kinds of projections that may need to
place axis elements in different locations.
"""
return self._yaxis_transform
def get_yaxis_text1_transform(self, pad_points):
"""
Get the transformation used for drawing y-axis labels, which
will add the given amount of padding (in points) between the
axes and the label. The x-direction is in axis coordinates
and the y-direction is in data coordinates. Returns a 3-tuple
of the form::
(transform, valign, halign)
where *valign* and *halign* are requested alignments for the
text.
.. note::
This transformation is primarily used by the
:class:`~matplotlib.axis.Axis` class, and is meant to be
overridden by new kinds of projections that may need to
place axis elements in different locations.
"""
return (self._yaxis_transform +
mtransforms.ScaledTranslation(-1 * pad_points / 72.0, 0,
self.figure.dpi_scale_trans),
"center", "right")
def get_yaxis_text2_transform(self, pad_points):
"""
Get the transformation used for drawing the secondary y-axis
labels, which will add the given amount of padding (in points)
between the axes and the label. The x-direction is in axis
coordinates and the y-direction is in data coordinates.
Returns a 3-tuple of the form::
(transform, valign, halign)
where *valign* and *halign* are requested alignments for the
text.
.. note::
This transformation is primarily used by the
:class:`~matplotlib.axis.Axis` class, and is meant to be
overridden by new kinds of projections that may need to
place axis elements in different locations.
"""
return (self._yaxis_transform +
mtransforms.ScaledTranslation(pad_points / 72.0, 0,
self.figure.dpi_scale_trans),
"center", "left")
def _update_transScale(self):
self.transScale.set(
mtransforms.blended_transform_factory(
self.xaxis.get_transform(), self.yaxis.get_transform()))
if hasattr(self, "lines"):
for line in self.lines:
line._transformed_path.invalidate()
def get_position(self, original=False):
'Return the a copy of the axes rectangle as a Bbox'
if original:
return self._originalPosition.frozen()
else:
return self._position.frozen()
def set_position(self, pos, which='both'):
"""
Set the axes position with::
pos = [left, bottom, width, height]
in relative 0,1 coords, or *pos* can be a
:class:`~matplotlib.transforms.Bbox`
There are two position variables: one which is ultimately
used, but which may be modified by :meth:`apply_aspect`, and a
second which is the starting point for :meth:`apply_aspect`.
Optional keyword arguments:
*which*
========== ====================
value description
========== ====================
'active' to change the first
'original' to change the second
'both' to change both
========== ====================
"""
if not isinstance(pos, mtransforms.BboxBase):
pos = mtransforms.Bbox.from_bounds(*pos)
if which in ('both', 'active'):
self._position.set(pos)
if which in ('both', 'original'):
self._originalPosition.set(pos)
def reset_position(self):
'Make the original position the active position'
pos = self.get_position(original=True)
self.set_position(pos, which='active')
def _set_artist_props(self, a):
'set the boilerplate props for artists added to axes'
a.set_figure(self.figure)
if not a.is_transform_set():
a.set_transform(self.transData)
a.set_axes(self)
def _gen_axes_patch(self):
"""
Returns the patch used to draw the background of the axes. It
is also used as the clipping path for any data elements on the
axes.
In the standard axes, this is a rectangle, but in other
projections it may not be.
.. note::
Intended to be overridden by new projection types.
"""
return mpatches.Rectangle((0.0, 0.0), 1.0, 1.0)
def cla(self):
'Clear the current axes'
# Note: this is called by Axes.__init__()
self.xaxis.cla()
self.yaxis.cla()
self.ignore_existing_data_limits = True
self.callbacks = cbook.CallbackRegistry(('xlim_changed',
'ylim_changed'))
if self._sharex is not None:
# major and minor are class instances with
# locator and formatter attributes
self.xaxis.major = self._sharex.xaxis.major
self.xaxis.minor = self._sharex.xaxis.minor
x0, x1 = self._sharex.get_xlim()
self.set_xlim(x0, x1, emit=False)
self.xaxis.set_scale(self._sharex.xaxis.get_scale())
else:
self.xaxis.set_scale('linear')
if self._sharey is not None:
self.yaxis.major = self._sharey.yaxis.major
self.yaxis.minor = self._sharey.yaxis.minor
y0, y1 = self._sharey.get_ylim()
self.set_ylim(y0, y1, emit=False)
self.yaxis.set_scale(self._sharey.yaxis.get_scale())
else:
self.yaxis.set_scale('linear')
self._autoscaleon = True
self._update_transScale() # needed?
self._get_lines = _process_plot_var_args(self)
self._get_patches_for_fill = _process_plot_var_args(self, 'fill')
self._gridOn = rcParams['axes.grid']
self.lines = []
self.patches = []
self.texts = []
self.tables = []
self.artists = []
self.images = []
self.legend_ = None
self.collections = [] # collection.Collection instances
self.grid(self._gridOn)
props = font_manager.FontProperties(size=rcParams['axes.titlesize'])
self.titleOffsetTrans = mtransforms.ScaledTranslation(
0.0, 5.0 / 72.0, self.figure.dpi_scale_trans)
self.title = mtext.Text(
x=0.5, y=1.0, text='',
fontproperties=props,
verticalalignment='bottom',
horizontalalignment='center',
)
self.title.set_transform(self.transAxes + self.titleOffsetTrans)
self.title.set_clip_box(None)
self._set_artist_props(self.title)
# the patch draws the background of the axes. we want this to
# be below the other artists; the axesPatch name is
# deprecated. We use the frame to draw the edges so we are
# setting the edgecolor to None
self.patch = self.axesPatch = self._gen_axes_patch()
self.patch.set_figure(self.figure)
self.patch.set_facecolor(self._axisbg)
self.patch.set_edgecolor('None')
self.patch.set_linewidth(0)
self.patch.set_transform(self.transAxes)
# the frame draws the border around the axes and we want this
# above. this is a place holder for a more sophisticated
# artist that might just draw a left, bottom frame, or a
# centered frame, etc the axesFrame name is deprecated
self.frame = self.axesFrame = self._gen_axes_patch()
self.frame.set_figure(self.figure)
self.frame.set_facecolor('none')
self.frame.set_edgecolor(rcParams['axes.edgecolor'])
self.frame.set_linewidth(rcParams['axes.linewidth'])
self.frame.set_transform(self.transAxes)
self.frame.set_zorder(2.5)
self.axison = True
self.xaxis.set_clip_path(self.patch)
self.yaxis.set_clip_path(self.patch)
self._shared_x_axes.clean()
self._shared_y_axes.clean()
def clear(self):
'clear the axes'
self.cla()
def set_color_cycle(self, clist):
"""
Set the color cycle for any future plot commands on this Axes.
clist is a list of mpl color specifiers.
"""
self._get_lines.set_color_cycle(clist)
def ishold(self):
'return the HOLD status of the axes'
return self._hold
def hold(self, b=None):
"""
call signature::
hold(b=None)
Set the hold state. If *hold* is *None* (default), toggle the
*hold* state. Else set the *hold* state to boolean value *b*.
Examples:
* toggle hold:
>>> hold()
* turn hold on:
>>> hold(True)
* turn hold off
>>> hold(False)
When hold is True, subsequent plot commands will be added to
the current axes. When hold is False, the current axes and
figure will be cleared on the next plot command
"""
if b is None:
self._hold = not self._hold
else:
self._hold = b
def get_aspect(self):
return self._aspect
def set_aspect(self, aspect, adjustable=None, anchor=None):
"""
*aspect*
======== ================================================
value description
======== ================================================
'auto' automatic; fill position rectangle with data
'normal' same as 'auto'; deprecated
'equal' same scaling from data to plot units for x and y
num a circle will be stretched such that the height
is num times the width. aspect=1 is the same as
aspect='equal'.
======== ================================================
*adjustable*
========= ============================
value description
========= ============================
'box' change physical size of axes
'datalim' change xlim or ylim
========= ============================
*anchor*
===== =====================
value description
===== =====================
'C' centered
'SW' lower left corner
'S' middle of bottom edge
'SE' lower right corner
etc.
===== =====================
"""
if aspect in ('normal', 'auto'):
self._aspect = 'auto'
elif aspect == 'equal':
self._aspect = 'equal'
else:
self._aspect = float(aspect) # raise ValueError if necessary
if adjustable is not None:
self.set_adjustable(adjustable)
if anchor is not None:
self.set_anchor(anchor)
def get_adjustable(self):
return self._adjustable
def set_adjustable(self, adjustable):
"""
ACCEPTS: [ 'box' | 'datalim' ]
"""
if adjustable in ('box', 'datalim'):
if self in self._shared_x_axes or self in self._shared_y_axes:
if adjustable == 'box':
raise ValueError(
'adjustable must be "datalim" for shared axes')
self._adjustable = adjustable
else:
raise ValueError('argument must be "box", or "datalim"')
def get_anchor(self):
return self._anchor
def set_anchor(self, anchor):
"""
*anchor*
===== ============
value description
===== ============
'C' Center
'SW' bottom left
'S' bottom
'SE' bottom right
'E' right
'NE' top right
'N' top
'NW' top left
'W' left
===== ============
"""
if anchor in mtransforms.Bbox.coefs.keys() or len(anchor) == 2:
self._anchor = anchor
else:
raise ValueError('argument must be among %s' %
', '.join(mtransforms.BBox.coefs.keys()))
def get_data_ratio(self):
"""
Returns the aspect ratio of the raw data.
This method is intended to be overridden by new projection
types.
"""
xmin,xmax = self.get_xbound()
xsize = max(math.fabs(xmax-xmin), 1e-30)
ymin,ymax = self.get_ybound()
ysize = max(math.fabs(ymax-ymin), 1e-30)
return ysize/xsize
def apply_aspect(self, position=None):
'''
Use :meth:`_aspect` and :meth:`_adjustable` to modify the
axes box or the view limits.
'''
if position is None:
position = self.get_position(original=True)
aspect = self.get_aspect()
if aspect == 'auto':
self.set_position( position , which='active')
return
if aspect == 'equal':
A = 1
else:
A = aspect
#Ensure at drawing time that any Axes involved in axis-sharing
# does not have its position changed.
if self in self._shared_x_axes or self in self._shared_y_axes:
if self._adjustable == 'box':
self._adjustable = 'datalim'
warnings.warn(
'shared axes: "adjustable" is being changed to "datalim"')
figW,figH = self.get_figure().get_size_inches()
fig_aspect = figH/figW
if self._adjustable == 'box':
box_aspect = A * self.get_data_ratio()
pb = position.frozen()
pb1 = pb.shrunk_to_aspect(box_aspect, pb, fig_aspect)
self.set_position(pb1.anchored(self.get_anchor(), pb), 'active')
return
# reset active to original in case it had been changed
# by prior use of 'box'
self.set_position(position, which='active')
xmin,xmax = self.get_xbound()
xsize = max(math.fabs(xmax-xmin), 1e-30)
ymin,ymax = self.get_ybound()
ysize = max(math.fabs(ymax-ymin), 1e-30)
l,b,w,h = position.bounds
box_aspect = fig_aspect * (h/w)
data_ratio = box_aspect / A
y_expander = (data_ratio*xsize/ysize - 1.0)
#print 'y_expander', y_expander
# If y_expander > 0, the dy/dx viewLim ratio needs to increase
if abs(y_expander) < 0.005:
#print 'good enough already'
return
dL = self.dataLim
xr = 1.05 * dL.width
yr = 1.05 * dL.height
xmarg = xsize - xr
ymarg = ysize - yr
Ysize = data_ratio * xsize
Xsize = ysize / data_ratio
Xmarg = Xsize - xr
Ymarg = Ysize - yr
xm = 0 # Setting these targets to, e.g., 0.05*xr does not seem to help.
ym = 0
#print 'xmin, xmax, ymin, ymax', xmin, xmax, ymin, ymax
#print 'xsize, Xsize, ysize, Ysize', xsize, Xsize, ysize, Ysize
changex = (self in self._shared_y_axes
and self not in self._shared_x_axes)
changey = (self in self._shared_x_axes
and self not in self._shared_y_axes)
if changex and changey:
warnings.warn("adjustable='datalim' cannot work with shared "
"x and y axes")
return
if changex:
adjust_y = False
else:
#print 'xmarg, ymarg, Xmarg, Ymarg', xmarg, ymarg, Xmarg, Ymarg
if xmarg > xm and ymarg > ym:
adjy = ((Ymarg > 0 and y_expander < 0)
or (Xmarg < 0 and y_expander > 0))
else:
adjy = y_expander > 0
#print 'y_expander, adjy', y_expander, adjy
adjust_y = changey or adjy #(Ymarg > xmarg)
if adjust_y:
yc = 0.5*(ymin+ymax)
y0 = yc - Ysize/2.0
y1 = yc + Ysize/2.0
self.set_ybound((y0, y1))
#print 'New y0, y1:', y0, y1
#print 'New ysize, ysize/xsize', y1-y0, (y1-y0)/xsize
else:
xc = 0.5*(xmin+xmax)
x0 = xc - Xsize/2.0
x1 = xc + Xsize/2.0
self.set_xbound((x0, x1))
#print 'New x0, x1:', x0, x1
#print 'New xsize, ysize/xsize', x1-x0, ysize/(x1-x0)
def axis(self, *v, **kwargs):
'''
Convenience method for manipulating the x and y view limits
and the aspect ratio of the plot.
*kwargs* are passed on to :meth:`set_xlim` and
:meth:`set_ylim`
'''
if len(v)==1 and is_string_like(v[0]):
s = v[0].lower()
if s=='on': self.set_axis_on()
elif s=='off': self.set_axis_off()
elif s in ('equal', 'tight', 'scaled', 'normal', 'auto', 'image'):
self.set_autoscale_on(True)
self.set_aspect('auto')
self.autoscale_view()
# self.apply_aspect()
if s=='equal':
self.set_aspect('equal', adjustable='datalim')
elif s == 'scaled':
self.set_aspect('equal', adjustable='box', anchor='C')
self.set_autoscale_on(False) # Req. by Mark Bakker
elif s=='tight':
self.autoscale_view(tight=True)
self.set_autoscale_on(False)
elif s == 'image':
self.autoscale_view(tight=True)
self.set_autoscale_on(False)
self.set_aspect('equal', adjustable='box', anchor='C')
else:
raise ValueError('Unrecognized string %s to axis; '
'try on or off' % s)
xmin, xmax = self.get_xlim()
ymin, ymax = self.get_ylim()
return xmin, xmax, ymin, ymax
try: v[0]
except IndexError:
emit = kwargs.get('emit', True)
xmin = kwargs.get('xmin', None)
xmax = kwargs.get('xmax', None)
xmin, xmax = self.set_xlim(xmin, xmax, emit)
ymin = kwargs.get('ymin', None)
ymax = kwargs.get('ymax', None)
ymin, ymax = self.set_ylim(ymin, ymax, emit)
return xmin, xmax, ymin, ymax
v = v[0]
if len(v) != 4:
raise ValueError('v must contain [xmin xmax ymin ymax]')
self.set_xlim([v[0], v[1]])
self.set_ylim([v[2], v[3]])
return v
def get_child_artists(self):
"""
Return a list of artists the axes contains.
.. deprecated:: 0.98
"""
raise DeprecationWarning('Use get_children instead')
def get_frame(self):
'Return the axes Rectangle frame'
warnings.warn('use ax.patch instead', DeprecationWarning)
return self.patch
def get_legend(self):
'Return the legend.Legend instance, or None if no legend is defined'
return self.legend_
def get_images(self):
'return a list of Axes images contained by the Axes'
return cbook.silent_list('AxesImage', self.images)
def get_lines(self):
'Return a list of lines contained by the Axes'
return cbook.silent_list('Line2D', self.lines)
def get_xaxis(self):
'Return the XAxis instance'
return self.xaxis
def get_xgridlines(self):
'Get the x grid lines as a list of Line2D instances'
return cbook.silent_list('Line2D xgridline', self.xaxis.get_gridlines())
def get_xticklines(self):
'Get the xtick lines as a list of Line2D instances'
return cbook.silent_list('Text xtickline', self.xaxis.get_ticklines())
def get_yaxis(self):
'Return the YAxis instance'
return self.yaxis
def get_ygridlines(self):
'Get the y grid lines as a list of Line2D instances'
return cbook.silent_list('Line2D ygridline', self.yaxis.get_gridlines())
def get_yticklines(self):
'Get the ytick lines as a list of Line2D instances'
return cbook.silent_list('Line2D ytickline', self.yaxis.get_ticklines())
#### Adding and tracking artists
def has_data(self):
'''Return *True* if any artists have been added to axes.
This should not be used to determine whether the *dataLim*
need to be updated, and may not actually be useful for
anything.
'''
return (
len(self.collections) +
len(self.images) +
len(self.lines) +
len(self.patches))>0
def add_artist(self, a):
'Add any :class:`~matplotlib.artist.Artist` to the axes'
a.set_axes(self)
self.artists.append(a)
self._set_artist_props(a)
a.set_clip_path(self.patch)
a._remove_method = lambda h: self.artists.remove(h)
def add_collection(self, collection, autolim=True):
'''
add a :class:`~matplotlib.collections.Collection` instance
to the axes
'''
label = collection.get_label()
if not label:
collection.set_label('collection%d'%len(self.collections))
self.collections.append(collection)
self._set_artist_props(collection)
collection.set_clip_path(self.patch)
if autolim:
if collection._paths and len(collection._paths):
self.update_datalim(collection.get_datalim(self.transData))
collection._remove_method = lambda h: self.collections.remove(h)
def add_line(self, line):
'''
Add a :class:`~matplotlib.lines.Line2D` to the list of plot
lines
'''
self._set_artist_props(line)
line.set_clip_path(self.patch)
self._update_line_limits(line)
if not line.get_label():
line.set_label('_line%d'%len(self.lines))
self.lines.append(line)
line._remove_method = lambda h: self.lines.remove(h)
def _update_line_limits(self, line):
p = line.get_path()
if p.vertices.size > 0:
self.dataLim.update_from_path(p, self.ignore_existing_data_limits,
updatex=line.x_isdata,
updatey=line.y_isdata)
self.ignore_existing_data_limits = False
def add_patch(self, p):
"""
Add a :class:`~matplotlib.patches.Patch` *p* to the list of
axes patches; the clipbox will be set to the Axes clipping
box. If the transform is not set, it will be set to
:attr:`transData`.
"""
self._set_artist_props(p)
p.set_clip_path(self.patch)
self._update_patch_limits(p)
self.patches.append(p)
p._remove_method = lambda h: self.patches.remove(h)
def _update_patch_limits(self, patch):
'update the data limits for patch *p*'
# hist can add zero height Rectangles, which is useful to keep
# the bins, counts and patches lined up, but it throws off log
# scaling. We'll ignore rects with zero height or width in
# the auto-scaling
if (isinstance(patch, mpatches.Rectangle) and
(patch.get_width()==0 or patch.get_height()==0)):
return
vertices = patch.get_path().vertices
if vertices.size > 0:
xys = patch.get_patch_transform().transform(vertices)
if patch.get_data_transform() != self.transData:
transform = (patch.get_data_transform() +
self.transData.inverted())
xys = transform.transform(xys)
self.update_datalim(xys, updatex=patch.x_isdata,
updatey=patch.y_isdata)
def add_table(self, tab):
'''
Add a :class:`~matplotlib.tables.Table` instance to the
list of axes tables
'''
self._set_artist_props(tab)
self.tables.append(tab)
tab.set_clip_path(self.patch)
tab._remove_method = lambda h: self.tables.remove(h)
def relim(self):
'recompute the data limits based on current artists'
# Collections are deliberately not supported (yet); see
# the TODO note in artists.py.
self.dataLim.ignore(True)
self.ignore_existing_data_limits = True
for line in self.lines:
self._update_line_limits(line)
for p in self.patches:
self._update_patch_limits(p)
def update_datalim(self, xys, updatex=True, updatey=True):
'Update the data lim bbox with seq of xy tups or equiv. 2-D array'
# if no data is set currently, the bbox will ignore its
# limits and set the bound to be the bounds of the xydata.
# Otherwise, it will compute the bounds of it's current data
# and the data in xydata
if iterable(xys) and not len(xys): return
if not ma.isMaskedArray(xys):
xys = np.asarray(xys)
self.dataLim.update_from_data_xy(xys, self.ignore_existing_data_limits,
updatex=updatex, updatey=updatey)
self.ignore_existing_data_limits = False
def update_datalim_numerix(self, x, y):
'Update the data lim bbox with seq of xy tups'
# if no data is set currently, the bbox will ignore it's
# limits and set the bound to be the bounds of the xydata.
# Otherwise, it will compute the bounds of it's current data
# and the data in xydata
if iterable(x) and not len(x): return
self.dataLim.update_from_data(x, y, self.ignore_existing_data_limits)
self.ignore_existing_data_limits = False
def update_datalim_bounds(self, bounds):
'''
Update the datalim to include the given
:class:`~matplotlib.transforms.Bbox` *bounds*
'''
self.dataLim.set(mtransforms.Bbox.union([self.dataLim, bounds]))
def _process_unit_info(self, xdata=None, ydata=None, kwargs=None):
'look for unit *kwargs* and update the axis instances as necessary'
if self.xaxis is None or self.yaxis is None: return
#print 'processing', self.get_geometry()
if xdata is not None:
# we only need to update if there is nothing set yet.
if not self.xaxis.have_units():
self.xaxis.update_units(xdata)
#print '\tset from xdata', self.xaxis.units
if ydata is not None:
# we only need to update if there is nothing set yet.
if not self.yaxis.have_units():
self.yaxis.update_units(ydata)
#print '\tset from ydata', self.yaxis.units
# process kwargs 2nd since these will override default units
if kwargs is not None:
xunits = kwargs.pop( 'xunits', self.xaxis.units)
if xunits!=self.xaxis.units:
#print '\tkw setting xunits', xunits
self.xaxis.set_units(xunits)
# If the units being set imply a different converter,
# we need to update.
if xdata is not None:
self.xaxis.update_units(xdata)
yunits = kwargs.pop('yunits', self.yaxis.units)
if yunits!=self.yaxis.units:
#print '\tkw setting yunits', yunits
self.yaxis.set_units(yunits)
# If the units being set imply a different converter,
# we need to update.
if ydata is not None:
self.yaxis.update_units(ydata)
def in_axes(self, mouseevent):
'''
return *True* if the given *mouseevent* (in display coords)
is in the Axes
'''
return self.patch.contains(mouseevent)[0]
def get_autoscale_on(self):
"""
Get whether autoscaling is applied on plot commands
"""
return self._autoscaleon
def set_autoscale_on(self, b):
"""
Set whether autoscaling is applied on plot commands
accepts: [ *True* | *False* ]
"""
self._autoscaleon = b
def autoscale_view(self, tight=False, scalex=True, scaley=True):
"""
autoscale the view limits using the data limits. You can
selectively autoscale only a single axis, eg, the xaxis by
setting *scaley* to *False*. The autoscaling preserves any
axis direction reversal that has already been done.
"""
# if image data only just use the datalim
if not self._autoscaleon: return
if scalex:
xshared = self._shared_x_axes.get_siblings(self)
dl = [ax.dataLim for ax in xshared]
bb = mtransforms.BboxBase.union(dl)
x0, x1 = bb.intervalx
if scaley:
yshared = self._shared_y_axes.get_siblings(self)
dl = [ax.dataLim for ax in yshared]
bb = mtransforms.BboxBase.union(dl)
y0, y1 = bb.intervaly
if (tight or (len(self.images)>0 and
len(self.lines)==0 and
len(self.patches)==0)):
if scalex:
self.set_xbound(x0, x1)
if scaley:
self.set_ybound(y0, y1)
return
if scalex:
XL = self.xaxis.get_major_locator().view_limits(x0, x1)
self.set_xbound(XL)
if scaley:
YL = self.yaxis.get_major_locator().view_limits(y0, y1)
self.set_ybound(YL)
#### Drawing
def draw(self, renderer=None, inframe=False):
"Draw everything (plot lines, axes, labels)"
if renderer is None:
renderer = self._cachedRenderer
if renderer is None:
raise RuntimeError('No renderer defined')
if not self.get_visible(): return
renderer.open_group('axes')
self.apply_aspect()
# the patch draws the background rectangle -- the frame below
# will draw the edges
if self.axison and self._frameon:
self.patch.draw(renderer)
artists = []
if len(self.images)<=1 or renderer.option_image_nocomposite():
for im in self.images:
im.draw(renderer)
else:
# make a composite image blending alpha
# list of (mimage.Image, ox, oy)
mag = renderer.get_image_magnification()
ims = [(im.make_image(mag),0,0)
for im in self.images if im.get_visible()]
l, b, r, t = self.bbox.extents
width = mag*((round(r) + 0.5) - (round(l) - 0.5))
height = mag*((round(t) + 0.5) - (round(b) - 0.5))
im = mimage.from_images(height,
width,
ims)
im.is_grayscale = False
l, b, w, h = self.bbox.bounds
# composite images need special args so they will not
# respect z-order for now
renderer.draw_image(
round(l), round(b), im, self.bbox,
self.patch.get_path(),
self.patch.get_transform())
artists.extend(self.collections)
artists.extend(self.patches)
artists.extend(self.lines)
artists.extend(self.texts)
artists.extend(self.artists)
if self.axison and not inframe:
if self._axisbelow:
self.xaxis.set_zorder(0.5)
self.yaxis.set_zorder(0.5)
else:
self.xaxis.set_zorder(2.5)
self.yaxis.set_zorder(2.5)
artists.extend([self.xaxis, self.yaxis])
if not inframe: artists.append(self.title)
artists.extend(self.tables)
if self.legend_ is not None:
artists.append(self.legend_)
# the frame draws the edges around the axes patch -- we
# decouple these so the patch can be in the background and the
# frame in the foreground.
if self.axison and self._frameon:
artists.append(self.frame)
dsu = [ (a.zorder, i, a) for i, a in enumerate(artists)
if not a.get_animated() ]
dsu.sort()
for zorder, i, a in dsu:
a.draw(renderer)
renderer.close_group('axes')
self._cachedRenderer = renderer
def draw_artist(self, a):
"""
This method can only be used after an initial draw which
caches the renderer. It is used to efficiently update Axes
data (axis ticks, labels, etc are not updated)
"""
assert self._cachedRenderer is not None
a.draw(self._cachedRenderer)
def redraw_in_frame(self):
"""
This method can only be used after an initial draw which
caches the renderer. It is used to efficiently update Axes
data (axis ticks, labels, etc are not updated)
"""
assert self._cachedRenderer is not None
self.draw(self._cachedRenderer, inframe=True)
def get_renderer_cache(self):
return self._cachedRenderer
def __draw_animate(self):
# ignore for now; broken
if self._lastRenderer is None:
raise RuntimeError('You must first call ax.draw()')
dsu = [(a.zorder, a) for a in self.animated.keys()]
dsu.sort()
renderer = self._lastRenderer
renderer.blit()
for tmp, a in dsu:
a.draw(renderer)
#### Axes rectangle characteristics
def get_frame_on(self):
"""
Get whether the axes rectangle patch is drawn
"""
return self._frameon
def set_frame_on(self, b):
"""
Set whether the axes rectangle patch is drawn
ACCEPTS: [ *True* | *False* ]
"""
self._frameon = b
def get_axisbelow(self):
"""
Get whether axis below is true or not
"""
return self._axisbelow
def set_axisbelow(self, b):
"""
Set whether the axis ticks and gridlines are above or below most artists
ACCEPTS: [ *True* | *False* ]
"""
self._axisbelow = b
def grid(self, b=None, **kwargs):
"""
call signature::
grid(self, b=None, **kwargs)
Set the axes grids on or off; *b* is a boolean
If *b* is *None* and ``len(kwargs)==0``, toggle the grid state. If
*kwargs* are supplied, it is assumed that you want a grid and *b*
is thus set to *True*
*kawrgs* are used to set the grid line properties, eg::
ax.grid(color='r', linestyle='-', linewidth=2)
Valid :class:`~matplotlib.lines.Line2D` kwargs are
%(Line2D)s
"""
if len(kwargs): b = True
self.xaxis.grid(b, **kwargs)
self.yaxis.grid(b, **kwargs)
grid.__doc__ = cbook.dedent(grid.__doc__) % martist.kwdocd
def ticklabel_format(self, **kwargs):
"""
Convenience method for manipulating the ScalarFormatter
used by default for linear axes.
Optional keyword arguments:
============ =====================================
Keyword Description
============ =====================================
*style* [ 'sci' (or 'scientific') | 'plain' ]
plain turns off scientific notation
*scilimits* (m, n), pair of integers; if *style*
is 'sci', scientific notation will
be used for numbers outside the range
10`-m`:sup: to 10`n`:sup:.
Use (0,0) to include all numbers.
*axis* [ 'x' | 'y' | 'both' ]
============ =====================================
Only the major ticks are affected.
If the method is called when the
:class:`~matplotlib.ticker.ScalarFormatter` is not the
:class:`~matplotlib.ticker.Formatter` being used, an
:exc:`AttributeError` will be raised.
"""
style = kwargs.pop('style', '').lower()
scilimits = kwargs.pop('scilimits', None)
if scilimits is not None:
try:
m, n = scilimits
m+n+1 # check that both are numbers
except (ValueError, TypeError):
raise ValueError("scilimits must be a sequence of 2 integers")
axis = kwargs.pop('axis', 'both').lower()
if style[:3] == 'sci':
sb = True
elif style in ['plain', 'comma']:
sb = False
if style == 'plain':
cb = False
else:
cb = True
raise NotImplementedError, "comma style remains to be added"
elif style == '':
sb = None
else:
raise ValueError, "%s is not a valid style value"
try:
if sb is not None:
if axis == 'both' or axis == 'x':
self.xaxis.major.formatter.set_scientific(sb)
if axis == 'both' or axis == 'y':
self.yaxis.major.formatter.set_scientific(sb)
if scilimits is not None:
if axis == 'both' or axis == 'x':
self.xaxis.major.formatter.set_powerlimits(scilimits)
if axis == 'both' or axis == 'y':
self.yaxis.major.formatter.set_powerlimits(scilimits)
except AttributeError:
raise AttributeError(
"This method only works with the ScalarFormatter.")
def set_axis_off(self):
"""turn off the axis"""
self.axison = False
def set_axis_on(self):
"""turn on the axis"""
self.axison = True
def get_axis_bgcolor(self):
'Return the axis background color'
return self._axisbg
def set_axis_bgcolor(self, color):
"""
set the axes background color
ACCEPTS: any matplotlib color - see
:func:`~matplotlib.pyplot.colors`
"""
self._axisbg = color
self.patch.set_facecolor(color)
### data limits, ticks, tick labels, and formatting
def invert_xaxis(self):
"Invert the x-axis."
left, right = self.get_xlim()
self.set_xlim(right, left)
def xaxis_inverted(self):
'Returns True if the x-axis is inverted.'
left, right = self.get_xlim()
return right < left
def get_xbound(self):
"""
Returns the x-axis numerical bounds where::
lowerBound < upperBound
"""
left, right = self.get_xlim()
if left < right:
return left, right
else:
return right, left
def set_xbound(self, lower=None, upper=None):
"""
Set the lower and upper numerical bounds of the x-axis.
This method will honor axes inversion regardless of parameter order.
"""
if upper is None and iterable(lower):
lower,upper = lower
old_lower,old_upper = self.get_xbound()
if lower is None: lower = old_lower
if upper is None: upper = old_upper
if self.xaxis_inverted():
if lower < upper:
self.set_xlim(upper, lower)
else:
self.set_xlim(lower, upper)
else:
if lower < upper:
self.set_xlim(lower, upper)
else:
self.set_xlim(upper, lower)
def get_xlim(self):
"""
Get the x-axis range [*xmin*, *xmax*]
"""
return tuple(self.viewLim.intervalx)
def set_xlim(self, xmin=None, xmax=None, emit=True, **kwargs):
"""
call signature::
set_xlim(self, *args, **kwargs)
Set the limits for the xaxis
Returns the current xlimits as a length 2 tuple: [*xmin*, *xmax*]
Examples::
set_xlim((valmin, valmax))
set_xlim(valmin, valmax)
set_xlim(xmin=1) # xmax unchanged
set_xlim(xmax=1) # xmin unchanged
Keyword arguments:
*ymin*: scalar
the min of the ylim
*ymax*: scalar
the max of the ylim
*emit*: [ True | False ]
notify observers of lim change
ACCEPTS: len(2) sequence of floats
"""
if xmax is None and iterable(xmin):
xmin,xmax = xmin
self._process_unit_info(xdata=(xmin, xmax))
if xmin is not None:
xmin = self.convert_xunits(xmin)
if xmax is not None:
xmax = self.convert_xunits(xmax)
old_xmin,old_xmax = self.get_xlim()
if xmin is None: xmin = old_xmin
if xmax is None: xmax = old_xmax
xmin, xmax = mtransforms.nonsingular(xmin, xmax, increasing=False)
xmin, xmax = self.xaxis.limit_range_for_scale(xmin, xmax)
self.viewLim.intervalx = (xmin, xmax)
if emit:
self.callbacks.process('xlim_changed', self)
# Call all of the other x-axes that are shared with this one
for other in self._shared_x_axes.get_siblings(self):
if other is not self:
other.set_xlim(self.viewLim.intervalx, emit=False)
if (other.figure != self.figure and
other.figure.canvas is not None):
other.figure.canvas.draw_idle()
return xmin, xmax
def get_xscale(self):
'return the xaxis scale string: %s' % (
", ".join(mscale.get_scale_names()))
return self.xaxis.get_scale()
def set_xscale(self, value, **kwargs):
"""
call signature::
set_xscale(value)
Set the scaling of the x-axis: %(scale)s
ACCEPTS: [%(scale)s]
Different kwargs are accepted, depending on the scale:
%(scale_docs)s
"""
self.xaxis.set_scale(value, **kwargs)
self.autoscale_view()
self._update_transScale()
set_xscale.__doc__ = cbook.dedent(set_xscale.__doc__) % {
'scale': ' | '.join([repr(x) for x in mscale.get_scale_names()]),
'scale_docs': mscale.get_scale_docs().strip()}
def get_xticks(self, minor=False):
'Return the x ticks as a list of locations'
return self.xaxis.get_ticklocs(minor=minor)
def set_xticks(self, ticks, minor=False):
"""
Set the x ticks with list of *ticks*
ACCEPTS: sequence of floats
"""
return self.xaxis.set_ticks(ticks, minor=minor)
def get_xmajorticklabels(self):
'Get the xtick labels as a list of Text instances'
return cbook.silent_list('Text xticklabel',
self.xaxis.get_majorticklabels())
def get_xminorticklabels(self):
'Get the xtick labels as a list of Text instances'
return cbook.silent_list('Text xticklabel',
self.xaxis.get_minorticklabels())
def get_xticklabels(self, minor=False):
'Get the xtick labels as a list of Text instances'
return cbook.silent_list('Text xticklabel',
self.xaxis.get_ticklabels(minor=minor))
def set_xticklabels(self, labels, fontdict=None, minor=False, **kwargs):
"""
call signature::
set_xticklabels(labels, fontdict=None, minor=False, **kwargs)
Set the xtick labels with list of strings *labels*. Return a
list of axis text instances.
*kwargs* set the :class:`~matplotlib.text.Text` properties.
Valid properties are
%(Text)s
ACCEPTS: sequence of strings
"""
return self.xaxis.set_ticklabels(labels, fontdict,
minor=minor, **kwargs)
set_xticklabels.__doc__ = cbook.dedent(
set_xticklabels.__doc__) % martist.kwdocd
def invert_yaxis(self):
"Invert the y-axis."
left, right = self.get_ylim()
self.set_ylim(right, left)
def yaxis_inverted(self):
'Returns True if the y-axis is inverted.'
left, right = self.get_ylim()
return right < left
def get_ybound(self):
"Return y-axis numerical bounds in the form of lowerBound < upperBound"
left, right = self.get_ylim()
if left < right:
return left, right
else:
return right, left
def set_ybound(self, lower=None, upper=None):
"""Set the lower and upper numerical bounds of the y-axis.
This method will honor axes inversion regardless of parameter order.
"""
if upper is None and iterable(lower):
lower,upper = lower
old_lower,old_upper = self.get_ybound()
if lower is None: lower = old_lower
if upper is None: upper = old_upper
if self.yaxis_inverted():
if lower < upper:
self.set_ylim(upper, lower)
else:
self.set_ylim(lower, upper)
else:
if lower < upper:
self.set_ylim(lower, upper)
else:
self.set_ylim(upper, lower)
def get_ylim(self):
"""
Get the y-axis range [*ymin*, *ymax*]
"""
return tuple(self.viewLim.intervaly)
def set_ylim(self, ymin=None, ymax=None, emit=True, **kwargs):
"""
call signature::
set_ylim(self, *args, **kwargs):
Set the limits for the yaxis; v = [ymin, ymax]::
set_ylim((valmin, valmax))
set_ylim(valmin, valmax)
set_ylim(ymin=1) # ymax unchanged
set_ylim(ymax=1) # ymin unchanged
Keyword arguments:
*ymin*: scalar
the min of the ylim
*ymax*: scalar
the max of the ylim
*emit*: [ True | False ]
notify observers of lim change
Returns the current ylimits as a length 2 tuple
ACCEPTS: len(2) sequence of floats
"""
if ymax is None and iterable(ymin):
ymin,ymax = ymin
if ymin is not None:
ymin = self.convert_yunits(ymin)
if ymax is not None:
ymax = self.convert_yunits(ymax)
old_ymin,old_ymax = self.get_ylim()
if ymin is None: ymin = old_ymin
if ymax is None: ymax = old_ymax
ymin, ymax = mtransforms.nonsingular(ymin, ymax, increasing=False)
ymin, ymax = self.yaxis.limit_range_for_scale(ymin, ymax)
self.viewLim.intervaly = (ymin, ymax)
if emit:
self.callbacks.process('ylim_changed', self)
# Call all of the other y-axes that are shared with this one
for other in self._shared_y_axes.get_siblings(self):
if other is not self:
other.set_ylim(self.viewLim.intervaly, emit=False)
if (other.figure != self.figure and
other.figure.canvas is not None):
other.figure.canvas.draw_idle()
return ymin, ymax
def get_yscale(self):
'return the xaxis scale string: %s' % (
", ".join(mscale.get_scale_names()))
return self.yaxis.get_scale()
def set_yscale(self, value, **kwargs):
"""
call signature::
set_yscale(value)
Set the scaling of the y-axis: %(scale)s
ACCEPTS: [%(scale)s]
Different kwargs are accepted, depending on the scale:
%(scale_docs)s
"""
self.yaxis.set_scale(value, **kwargs)
self.autoscale_view()
self._update_transScale()
set_yscale.__doc__ = cbook.dedent(set_yscale.__doc__) % {
'scale': ' | '.join([repr(x) for x in mscale.get_scale_names()]),
'scale_docs': mscale.get_scale_docs().strip()}
def get_yticks(self, minor=False):
'Return the y ticks as a list of locations'
return self.yaxis.get_ticklocs(minor=minor)
def set_yticks(self, ticks, minor=False):
"""
Set the y ticks with list of *ticks*
ACCEPTS: sequence of floats
Keyword arguments:
*minor*: [ False | True ]
Sets the minor ticks if True
"""
return self.yaxis.set_ticks(ticks, minor=minor)
def get_ymajorticklabels(self):
'Get the xtick labels as a list of Text instances'
return cbook.silent_list('Text yticklabel',
self.yaxis.get_majorticklabels())
def get_yminorticklabels(self):
'Get the xtick labels as a list of Text instances'
return cbook.silent_list('Text yticklabel',
self.yaxis.get_minorticklabels())
def get_yticklabels(self, minor=False):
'Get the xtick labels as a list of Text instances'
return cbook.silent_list('Text yticklabel',
self.yaxis.get_ticklabels(minor=minor))
def set_yticklabels(self, labels, fontdict=None, minor=False, **kwargs):
"""
call signature::
set_yticklabels(labels, fontdict=None, minor=False, **kwargs)
Set the ytick labels with list of strings *labels*. Return a list of
:class:`~matplotlib.text.Text` instances.
*kwargs* set :class:`~matplotlib.text.Text` properties for the labels.
Valid properties are
%(Text)s
ACCEPTS: sequence of strings
"""
return self.yaxis.set_ticklabels(labels, fontdict,
minor=minor, **kwargs)
set_yticklabels.__doc__ = cbook.dedent(
set_yticklabels.__doc__) % martist.kwdocd
def xaxis_date(self, tz=None):
"""Sets up x-axis ticks and labels that treat the x data as dates.
*tz* is the time zone to use in labeling dates. Defaults to rc value.
"""
xmin, xmax = self.dataLim.intervalx
if xmin==0.:
# no data has been added - let's set the default datalim.
# We should probably use a better proxy for the datalim
# have been updated than the ignore setting
dmax = today = datetime.date.today()
dmin = today-datetime.timedelta(days=10)
self._process_unit_info(xdata=(dmin, dmax))
dmin, dmax = self.convert_xunits([dmin, dmax])
self.viewLim.intervalx = dmin, dmax
self.dataLim.intervalx = dmin, dmax
locator = self.xaxis.get_major_locator()
if not isinstance(locator, mdates.DateLocator):
locator = mdates.AutoDateLocator(tz)
self.xaxis.set_major_locator(locator)
# the autolocator uses the viewlim to pick the right date
# locator, but it may not have correct viewlim before an
# autoscale. If the viewlim is still zero..1, set it to the
# datalim and the autoscaler will update it on request
if self.viewLim.intervalx[0]==0.:
self.viewLim.intervalx = tuple(self.dataLim.intervalx)
locator.refresh()
formatter = self.xaxis.get_major_formatter()
if not isinstance(formatter, mdates.DateFormatter):
formatter = mdates.AutoDateFormatter(locator, tz)
self.xaxis.set_major_formatter(formatter)
def yaxis_date(self, tz=None):
"""Sets up y-axis ticks and labels that treat the y data as dates.
*tz* is the time zone to use in labeling dates. Defaults to rc value.
"""
ymin, ymax = self.dataLim.intervaly
if ymin==0.:
# no data has been added - let's set the default datalim.
# We should probably use a better proxy for the datalim
# have been updated than the ignore setting
dmax = today = datetime.date.today()
dmin = today-datetime.timedelta(days=10)
self._process_unit_info(ydata=(dmin, dmax))
dmin, dmax = self.convert_yunits([dmin, dmax])
self.viewLim.intervaly = dmin, dmax
self.dataLim.intervaly = dmin, dmax
locator = self.yaxis.get_major_locator()
if not isinstance(locator, mdates.DateLocator):
locator = mdates.AutoDateLocator(tz)
self.yaxis.set_major_locator(locator)
# the autolocator uses the viewlim to pick the right date
# locator, but it may not have correct viewlim before an
# autoscale. If the viewlim is still zero..1, set it to the
# datalim and the autoscaler will update it on request
if self.viewLim.intervaly[0]==0.:
self.viewLim.intervaly = tuple(self.dataLim.intervaly)
locator.refresh()
formatter = self.xaxis.get_major_formatter()
if not isinstance(formatter, mdates.DateFormatter):
formatter = mdates.AutoDateFormatter(locator, tz)
self.yaxis.set_major_formatter(formatter)
def format_xdata(self, x):
"""
Return *x* string formatted. This function will use the attribute
self.fmt_xdata if it is callable, else will fall back on the xaxis
major formatter
"""
try: return self.fmt_xdata(x)
except TypeError:
func = self.xaxis.get_major_formatter().format_data_short
val = func(x)
return val
def format_ydata(self, y):
"""
Return y string formatted. This function will use the
:attr:`fmt_ydata` attribute if it is callable, else will fall
back on the yaxis major formatter
"""
try: return self.fmt_ydata(y)
except TypeError:
func = self.yaxis.get_major_formatter().format_data_short
val = func(y)
return val
def format_coord(self, x, y):
'return a format string formatting the *x*, *y* coord'
if x is None:
x = '???'
if y is None:
y = '???'
xs = self.format_xdata(x)
ys = self.format_ydata(y)
return 'x=%s, y=%s'%(xs,ys)
#### Interactive manipulation
def can_zoom(self):
"""
Return *True* if this axes support the zoom box
"""
return True
def get_navigate(self):
"""
Get whether the axes responds to navigation commands
"""
return self._navigate
def set_navigate(self, b):
"""
Set whether the axes responds to navigation toolbar commands
ACCEPTS: [ True | False ]
"""
self._navigate = b
def get_navigate_mode(self):
"""
Get the navigation toolbar button status: 'PAN', 'ZOOM', or None
"""
return self._navigate_mode
def set_navigate_mode(self, b):
"""
Set the navigation toolbar button status;
.. warning::
this is not a user-API function.
"""
self._navigate_mode = b
def start_pan(self, x, y, button):
"""
Called when a pan operation has started.
*x*, *y* are the mouse coordinates in display coords.
button is the mouse button number:
* 1: LEFT
* 2: MIDDLE
* 3: RIGHT
.. note::
Intended to be overridden by new projection types.
"""
self._pan_start = cbook.Bunch(
lim = self.viewLim.frozen(),
trans = self.transData.frozen(),
trans_inverse = self.transData.inverted().frozen(),
bbox = self.bbox.frozen(),
x = x,
y = y
)
def end_pan(self):
"""
Called when a pan operation completes (when the mouse button
is up.)
.. note::
Intended to be overridden by new projection types.
"""
del self._pan_start
def drag_pan(self, button, key, x, y):
"""
Called when the mouse moves during a pan operation.
*button* is the mouse button number:
* 1: LEFT
* 2: MIDDLE
* 3: RIGHT
*key* is a "shift" key
*x*, *y* are the mouse coordinates in display coords.
.. note::
Intended to be overridden by new projection types.
"""
def format_deltas(key, dx, dy):
if key=='control':
if(abs(dx)>abs(dy)):
dy = dx
else:
dx = dy
elif key=='x':
dy = 0
elif key=='y':
dx = 0
elif key=='shift':
if 2*abs(dx) < abs(dy):
dx=0
elif 2*abs(dy) < abs(dx):
dy=0
elif(abs(dx)>abs(dy)):
dy=dy/abs(dy)*abs(dx)
else:
dx=dx/abs(dx)*abs(dy)
return (dx,dy)
p = self._pan_start
dx = x - p.x
dy = y - p.y
if dx == 0 and dy == 0:
return
if button == 1:
dx, dy = format_deltas(key, dx, dy)
result = p.bbox.translated(-dx, -dy) \
.transformed(p.trans_inverse)
elif button == 3:
try:
dx = -dx / float(self.bbox.width)
dy = -dy / float(self.bbox.height)
dx, dy = format_deltas(key, dx, dy)
if self.get_aspect() != 'auto':
dx = 0.5 * (dx + dy)
dy = dx
alpha = np.power(10.0, (dx, dy))
start = p.trans_inverse.transform_point((p.x, p.y))
lim_points = p.lim.get_points()
result = start + alpha * (lim_points - start)
result = mtransforms.Bbox(result)
except OverflowError:
warnings.warn('Overflow while panning')
return
self.set_xlim(*result.intervalx)
self.set_ylim(*result.intervaly)
def get_cursor_props(self):
"""
return the cursor propertiess as a (*linewidth*, *color*)
tuple, where *linewidth* is a float and *color* is an RGBA
tuple
"""
return self._cursorProps
def set_cursor_props(self, *args):
"""
Set the cursor property as::
ax.set_cursor_props(linewidth, color)
or::
ax.set_cursor_props((linewidth, color))
ACCEPTS: a (*float*, *color*) tuple
"""
if len(args)==1:
lw, c = args[0]
elif len(args)==2:
lw, c = args
else:
raise ValueError('args must be a (linewidth, color) tuple')
c =mcolors.colorConverter.to_rgba(c)
self._cursorProps = lw, c
def connect(self, s, func):
"""
Register observers to be notified when certain events occur. Register
with callback functions with the following signatures. The function
has the following signature::
func(ax) # where ax is the instance making the callback.
The following events can be connected to:
'xlim_changed','ylim_changed'
The connection id is is returned - you can use this with
disconnect to disconnect from the axes event
"""
raise DeprecationWarning('use the callbacks CallbackRegistry instance '
'instead')
def disconnect(self, cid):
'disconnect from the Axes event.'
raise DeprecationWarning('use the callbacks CallbackRegistry instance '
'instead')
def get_children(self):
'return a list of child artists'
children = []
children.append(self.xaxis)
children.append(self.yaxis)
children.extend(self.lines)
children.extend(self.patches)
children.extend(self.texts)
children.extend(self.tables)
children.extend(self.artists)
children.extend(self.images)
if self.legend_ is not None:
children.append(self.legend_)
children.extend(self.collections)
children.append(self.title)
children.append(self.patch)
children.append(self.frame)
return children
def contains(self,mouseevent):
"""Test whether the mouse event occured in the axes.
Returns T/F, {}
"""
if callable(self._contains): return self._contains(self,mouseevent)
return self.patch.contains(mouseevent)
def pick(self, *args):
"""
call signature::
pick(mouseevent)
each child artist will fire a pick event if mouseevent is over
the artist and the artist has picker set
"""
if len(args)>1:
raise DeprecationWarning('New pick API implemented -- '
'see API_CHANGES in the src distribution')
martist.Artist.pick(self,args[0])
def __pick(self, x, y, trans=None, among=None):
"""
Return the artist under point that is closest to the *x*, *y*.
If *trans* is *None*, *x*, and *y* are in window coords,
(0,0 = lower left). Otherwise, *trans* is a
:class:`~matplotlib.transforms.Transform` that specifies the
coordinate system of *x*, *y*.
The selection of artists from amongst which the pick function
finds an artist can be narrowed using the optional keyword
argument *among*. If provided, this should be either a sequence
of permitted artists or a function taking an artist as its
argument and returning a true value if and only if that artist
can be selected.
Note this algorithm calculates distance to the vertices of the
polygon, so if you want to pick a patch, click on the edge!
"""
# MGDTODO: Needs updating
if trans is not None:
xywin = trans.transform_point((x,y))
else:
xywin = x,y
def dist_points(p1, p2):
'return the distance between two points'
x1, y1 = p1
x2, y2 = p2
return math.sqrt((x1-x2)**2+(y1-y2)**2)
def dist_x_y(p1, x, y):
'*x* and *y* are arrays; return the distance to the closest point'
x1, y1 = p1
return min(np.sqrt((x-x1)**2+(y-y1)**2))
def dist(a):
if isinstance(a, Text):
bbox = a.get_window_extent()
l,b,w,h = bbox.bounds
verts = (l,b), (l,b+h), (l+w,b+h), (l+w, b)
xt, yt = zip(*verts)
elif isinstance(a, Patch):
path = a.get_path()
tverts = a.get_transform().transform_path(path)
xt, yt = zip(*tverts)
elif isinstance(a, mlines.Line2D):
xdata = a.get_xdata(orig=False)
ydata = a.get_ydata(orig=False)
xt, yt = a.get_transform().numerix_x_y(xdata, ydata)
return dist_x_y(xywin, np.asarray(xt), np.asarray(yt))
artists = self.lines + self.patches + self.texts
if callable(among):
artists = filter(test, artists)
elif iterable(among):
amongd = dict([(k,1) for k in among])
artists = [a for a in artists if a in amongd]
elif among is None:
pass
else:
raise ValueError('among must be callable or iterable')
if not len(artists): return None
ds = [ (dist(a),a) for a in artists]
ds.sort()
return ds[0][1]
#### Labelling
def get_title(self):
"""
Get the title text string.
"""
return self.title.get_text()
def set_title(self, label, fontdict=None, **kwargs):
"""
call signature::
set_title(label, fontdict=None, **kwargs):
Set the title for the axes.
kwargs are Text properties:
%(Text)s
ACCEPTS: str
.. seealso::
:meth:`text`:
for information on how override and the optional args work
"""
default = {
'fontsize':rcParams['axes.titlesize'],
'verticalalignment' : 'bottom',
'horizontalalignment' : 'center'
}
self.title.set_text(label)
self.title.update(default)
if fontdict is not None: self.title.update(fontdict)
self.title.update(kwargs)
return self.title
set_title.__doc__ = cbook.dedent(set_title.__doc__) % martist.kwdocd
def get_xlabel(self):
"""
Get the xlabel text string.
"""
label = self.xaxis.get_label()
return label.get_text()
def set_xlabel(self, xlabel, fontdict=None, **kwargs):
"""
call signature::
set_xlabel(xlabel, fontdict=None, **kwargs)
Set the label for the xaxis.
Valid kwargs are Text properties:
%(Text)s
ACCEPTS: str
.. seealso::
:meth:`text`:
for information on how override and the optional args work
"""
label = self.xaxis.get_label()
label.set_text(xlabel)
if fontdict is not None: label.update(fontdict)
label.update(kwargs)
return label
set_xlabel.__doc__ = cbook.dedent(set_xlabel.__doc__) % martist.kwdocd
def get_ylabel(self):
"""
Get the ylabel text string.
"""
label = self.yaxis.get_label()
return label.get_text()
def set_ylabel(self, ylabel, fontdict=None, **kwargs):
"""
call signature::
set_ylabel(ylabel, fontdict=None, **kwargs)
Set the label for the yaxis
Valid kwargs are Text properties:
%(Text)s
ACCEPTS: str
.. seealso::
:meth:`text`:
for information on how override and the optional args work
"""
label = self.yaxis.get_label()
label.set_text(ylabel)
if fontdict is not None: label.update(fontdict)
label.update(kwargs)
return label
set_ylabel.__doc__ = cbook.dedent(set_ylabel.__doc__) % martist.kwdocd
def text(self, x, y, s, fontdict=None,
withdash=False, **kwargs):
"""
call signature::
text(x, y, s, fontdict=None, **kwargs)
Add text in string *s* to axis at location *x*, *y*, data
coordinates.
Keyword arguments:
*fontdict*:
A dictionary to override the default text properties.
If *fontdict* is *None*, the defaults are determined by your rc
parameters.
*withdash*: [ False | True ]
Creates a :class:`~matplotlib.text.TextWithDash` instance
instead of a :class:`~matplotlib.text.Text` instance.
Individual keyword arguments can be used to override any given
parameter::
text(x, y, s, fontsize=12)
The default transform specifies that text is in data coords,
alternatively, you can specify text in axis coords (0,0 is
lower-left and 1,1 is upper-right). The example below places
text in the center of the axes::
text(0.5, 0.5,'matplotlib',
horizontalalignment='center',
verticalalignment='center',
transform = ax.transAxes)
You can put a rectangular box around the text instance (eg. to
set a background color) by using the keyword *bbox*. *bbox* is
a dictionary of :class:`matplotlib.patches.Rectangle`
properties. For example::
text(x, y, s, bbox=dict(facecolor='red', alpha=0.5))
Valid kwargs are :class:`matplotlib.text.Text` properties:
%(Text)s
"""
default = {
'verticalalignment' : 'bottom',
'horizontalalignment' : 'left',
#'verticalalignment' : 'top',
'transform' : self.transData,
}
# At some point if we feel confident that TextWithDash
# is robust as a drop-in replacement for Text and that
# the performance impact of the heavier-weight class
# isn't too significant, it may make sense to eliminate
# the withdash kwarg and simply delegate whether there's
# a dash to TextWithDash and dashlength.
if withdash:
t = mtext.TextWithDash(
x=x, y=y, text=s,
)
else:
t = mtext.Text(
x=x, y=y, text=s,
)
self._set_artist_props(t)
t.update(default)
if fontdict is not None: t.update(fontdict)
t.update(kwargs)
self.texts.append(t)
t._remove_method = lambda h: self.texts.remove(h)
#if t.get_clip_on(): t.set_clip_box(self.bbox)
if 'clip_on' in kwargs: t.set_clip_box(self.bbox)
return t
text.__doc__ = cbook.dedent(text.__doc__) % martist.kwdocd
def annotate(self, *args, **kwargs):
"""
call signature::
annotate(s, xy, xytext=None, xycoords='data',
textcoords='data', arrowprops=None, **kwargs)
Keyword arguments:
%(Annotation)s
.. plot:: mpl_examples/pylab_examples/annotation_demo2.py
"""
a = mtext.Annotation(*args, **kwargs)
a.set_transform(mtransforms.IdentityTransform())
self._set_artist_props(a)
if kwargs.has_key('clip_on'): a.set_clip_path(self.patch)
self.texts.append(a)
return a
annotate.__doc__ = cbook.dedent(annotate.__doc__) % martist.kwdocd
#### Lines and spans
def axhline(self, y=0, xmin=0, xmax=1, **kwargs):
"""
call signature::
axhline(y=0, xmin=0, xmax=1, **kwargs)
Axis Horizontal Line
Draw a horizontal line at *y* from *xmin* to *xmax*. With the
default values of *xmin* = 0 and *xmax* = 1, this line will
always span the horizontal extent of the axes, regardless of
the xlim settings, even if you change them, eg. with the
:meth:`set_xlim` command. That is, the horizontal extent is
in axes coords: 0=left, 0.5=middle, 1.0=right but the *y*
location is in data coordinates.
Return value is the :class:`~matplotlib.lines.Line2D`
instance. kwargs are the same as kwargs to plot, and can be
used to control the line properties. Eg.,
* draw a thick red hline at *y* = 0 that spans the xrange
>>> axhline(linewidth=4, color='r')
* draw a default hline at *y* = 1 that spans the xrange
>>> axhline(y=1)
* draw a default hline at *y* = .5 that spans the the middle half of
the xrange
>>> axhline(y=.5, xmin=0.25, xmax=0.75)
Valid kwargs are :class:`~matplotlib.lines.Line2D` properties:
%(Line2D)s
.. seealso::
:meth:`axhspan`:
for example plot and source code
"""
ymin, ymax = self.get_ybound()
# We need to strip away the units for comparison with
# non-unitized bounds
yy = self.convert_yunits( y )
scaley = (yy<ymin) or (yy>ymax)
trans = mtransforms.blended_transform_factory(
self.transAxes, self.transData)
l = mlines.Line2D([xmin,xmax], [y,y], transform=trans, **kwargs)
l.x_isdata = False
self.add_line(l)
self.autoscale_view(scalex=False, scaley=scaley)
return l
axhline.__doc__ = cbook.dedent(axhline.__doc__) % martist.kwdocd
def axvline(self, x=0, ymin=0, ymax=1, **kwargs):
"""
call signature::
axvline(x=0, ymin=0, ymax=1, **kwargs)
Axis Vertical Line
Draw a vertical line at *x* from *ymin* to *ymax*. With the
default values of *ymin* = 0 and *ymax* = 1, this line will
always span the vertical extent of the axes, regardless of the
xlim settings, even if you change them, eg. with the
:meth:`set_xlim` command. That is, the vertical extent is in
axes coords: 0=bottom, 0.5=middle, 1.0=top but the *x* location
is in data coordinates.
Return value is the :class:`~matplotlib.lines.Line2D`
instance. kwargs are the same as kwargs to plot, and can be
used to control the line properties. Eg.,
* draw a thick red vline at *x* = 0 that spans the yrange
>>> axvline(linewidth=4, color='r')
* draw a default vline at *x* = 1 that spans the yrange
>>> axvline(x=1)
* draw a default vline at *x* = .5 that spans the the middle half of
the yrange
>>> axvline(x=.5, ymin=0.25, ymax=0.75)
Valid kwargs are :class:`~matplotlib.lines.Line2D` properties:
%(Line2D)s
.. seealso::
:meth:`axhspan`:
for example plot and source code
"""
xmin, xmax = self.get_xbound()
# We need to strip away the units for comparison with
# non-unitized bounds
xx = self.convert_xunits( x )
scalex = (xx<xmin) or (xx>xmax)
trans = mtransforms.blended_transform_factory(
self.transData, self.transAxes)
l = mlines.Line2D([x,x], [ymin,ymax] , transform=trans, **kwargs)
l.y_isdata = False
self.add_line(l)
self.autoscale_view(scalex=scalex, scaley=False)
return l
axvline.__doc__ = cbook.dedent(axvline.__doc__) % martist.kwdocd
def axhspan(self, ymin, ymax, xmin=0, xmax=1, **kwargs):
"""
call signature::
axhspan(ymin, ymax, xmin=0, xmax=1, **kwargs)
Axis Horizontal Span.
*y* coords are in data units and *x* coords are in axes (relative
0-1) units.
Draw a horizontal span (rectangle) from *ymin* to *ymax*.
With the default values of *xmin* = 0 and *xmax* = 1, this
always spans the xrange, regardless of the xlim settings, even
if you change them, eg. with the :meth:`set_xlim` command.
That is, the horizontal extent is in axes coords: 0=left,
0.5=middle, 1.0=right but the *y* location is in data
coordinates.
Return value is a :class:`matplotlib.patches.Polygon`
instance.
Examples:
* draw a gray rectangle from *y* = 0.25-0.75 that spans the
horizontal extent of the axes
>>> axhspan(0.25, 0.75, facecolor='0.5', alpha=0.5)
Valid kwargs are :class:`~matplotlib.patches.Polygon` properties:
%(Polygon)s
**Example:**
.. plot:: mpl_examples/pylab_examples/axhspan_demo.py
"""
trans = mtransforms.blended_transform_factory(
self.transAxes, self.transData)
# process the unit information
self._process_unit_info( [xmin, xmax], [ymin, ymax], kwargs=kwargs )
# first we need to strip away the units
xmin, xmax = self.convert_xunits( [xmin, xmax] )
ymin, ymax = self.convert_yunits( [ymin, ymax] )
verts = (xmin, ymin), (xmin, ymax), (xmax, ymax), (xmax, ymin)
p = mpatches.Polygon(verts, **kwargs)
p.set_transform(trans)
p.x_isdata = False
self.add_patch(p)
return p
axhspan.__doc__ = cbook.dedent(axhspan.__doc__) % martist.kwdocd
def axvspan(self, xmin, xmax, ymin=0, ymax=1, **kwargs):
"""
call signature::
axvspan(xmin, xmax, ymin=0, ymax=1, **kwargs)
Axis Vertical Span.
*x* coords are in data units and *y* coords are in axes (relative
0-1) units.
Draw a vertical span (rectangle) from *xmin* to *xmax*. With
the default values of *ymin* = 0 and *ymax* = 1, this always
spans the yrange, regardless of the ylim settings, even if you
change them, eg. with the :meth:`set_ylim` command. That is,
the vertical extent is in axes coords: 0=bottom, 0.5=middle,
1.0=top but the *y* location is in data coordinates.
Return value is the :class:`matplotlib.patches.Polygon`
instance.
Examples:
* draw a vertical green translucent rectangle from x=1.25 to 1.55 that
spans the yrange of the axes
>>> axvspan(1.25, 1.55, facecolor='g', alpha=0.5)
Valid kwargs are :class:`~matplotlib.patches.Polygon`
properties:
%(Polygon)s
.. seealso::
:meth:`axhspan`:
for example plot and source code
"""
trans = mtransforms.blended_transform_factory(
self.transData, self.transAxes)
# process the unit information
self._process_unit_info( [xmin, xmax], [ymin, ymax], kwargs=kwargs )
# first we need to strip away the units
xmin, xmax = self.convert_xunits( [xmin, xmax] )
ymin, ymax = self.convert_yunits( [ymin, ymax] )
verts = [(xmin, ymin), (xmin, ymax), (xmax, ymax), (xmax, ymin)]
p = mpatches.Polygon(verts, **kwargs)
p.set_transform(trans)
p.y_isdata = False
self.add_patch(p)
return p
axvspan.__doc__ = cbook.dedent(axvspan.__doc__) % martist.kwdocd
def hlines(self, y, xmin, xmax, colors='k', linestyles='solid',
label='', **kwargs):
"""
call signature::
hlines(y, xmin, xmax, colors='k', linestyles='solid', **kwargs)
Plot horizontal lines at each *y* from *xmin* to *xmax*.
Returns the :class:`~matplotlib.collections.LineCollection`
that was added.
Required arguments:
*y*:
a 1-D numpy array or iterable.
*xmin* and *xmax*:
can be scalars or ``len(x)`` numpy arrays. If they are
scalars, then the respective values are constant, else the
widths of the lines are determined by *xmin* and *xmax*.
Optional keyword arguments:
*colors*:
a line collections color argument, either a single color
or a ``len(y)`` list of colors
*linestyles*:
[ 'solid' | 'dashed' | 'dashdot' | 'dotted' ]
**Example:**
.. plot:: mpl_examples/pylab_examples/hline_demo.py
"""
if kwargs.get('fmt') is not None:
raise DeprecationWarning('hlines now uses a '
'collections.LineCollection and not a '
'list of Line2D to draw; see API_CHANGES')
# We do the conversion first since not all unitized data is uniform
y = self.convert_yunits( y )
xmin = self.convert_xunits( xmin )
xmax = self.convert_xunits( xmax )
if not iterable(y): y = [y]
if not iterable(xmin): xmin = [xmin]
if not iterable(xmax): xmax = [xmax]
y = np.asarray(y)
xmin = np.asarray(xmin)
xmax = np.asarray(xmax)
if len(xmin)==1:
xmin = np.resize( xmin, y.shape )
if len(xmax)==1:
xmax = np.resize( xmax, y.shape )
if len(xmin)!=len(y):
raise ValueError, 'xmin and y are unequal sized sequences'
if len(xmax)!=len(y):
raise ValueError, 'xmax and y are unequal sized sequences'
verts = [ ((thisxmin, thisy), (thisxmax, thisy))
for thisxmin, thisxmax, thisy in zip(xmin, xmax, y)]
coll = mcoll.LineCollection(verts, colors=colors,
linestyles=linestyles, label=label)
self.add_collection(coll)
coll.update(kwargs)
minx = min(xmin.min(), xmax.min())
maxx = max(xmin.max(), xmax.max())
miny = y.min()
maxy = y.max()
corners = (minx, miny), (maxx, maxy)
self.update_datalim(corners)
self.autoscale_view()
return coll
hlines.__doc__ = cbook.dedent(hlines.__doc__)
def vlines(self, x, ymin, ymax, colors='k', linestyles='solid',
label='', **kwargs):
"""
call signature::
vlines(x, ymin, ymax, color='k', linestyles='solid')
Plot vertical lines at each *x* from *ymin* to *ymax*. *ymin*
or *ymax* can be scalars or len(*x*) numpy arrays. If they are
scalars, then the respective values are constant, else the
heights of the lines are determined by *ymin* and *ymax*.
*colors*
a line collections color args, either a single color
or a len(*x*) list of colors
*linestyles*
one of [ 'solid' | 'dashed' | 'dashdot' | 'dotted' ]
Returns the :class:`matplotlib.collections.LineCollection`
that was added.
kwargs are :class:`~matplotlib.collections.LineCollection` properties:
%(LineCollection)s
"""
if kwargs.get('fmt') is not None:
raise DeprecationWarning('vlines now uses a '
'collections.LineCollection and not a '
'list of Line2D to draw; see API_CHANGES')
self._process_unit_info(xdata=x, ydata=ymin, kwargs=kwargs)
# We do the conversion first since not all unitized data is uniform
x = self.convert_xunits( x )
ymin = self.convert_yunits( ymin )
ymax = self.convert_yunits( ymax )
if not iterable(x): x = [x]
if not iterable(ymin): ymin = [ymin]
if not iterable(ymax): ymax = [ymax]
x = np.asarray(x)
ymin = np.asarray(ymin)
ymax = np.asarray(ymax)
if len(ymin)==1:
ymin = np.resize( ymin, x.shape )
if len(ymax)==1:
ymax = np.resize( ymax, x.shape )
if len(ymin)!=len(x):
raise ValueError, 'ymin and x are unequal sized sequences'
if len(ymax)!=len(x):
raise ValueError, 'ymax and x are unequal sized sequences'
Y = np.array([ymin, ymax]).T
verts = [ ((thisx, thisymin), (thisx, thisymax))
for thisx, (thisymin, thisymax) in zip(x,Y)]
#print 'creating line collection'
coll = mcoll.LineCollection(verts, colors=colors,
linestyles=linestyles, label=label)
self.add_collection(coll)
coll.update(kwargs)
minx = min( x )
maxx = max( x )
miny = min( min(ymin), min(ymax) )
maxy = max( max(ymin), max(ymax) )
corners = (minx, miny), (maxx, maxy)
self.update_datalim(corners)
self.autoscale_view()
return coll
vlines.__doc__ = cbook.dedent(vlines.__doc__) % martist.kwdocd
#### Basic plotting
def plot(self, *args, **kwargs):
"""
Plot lines and/or markers to the
:class:`~matplotlib.axes.Axes`. *args* is a variable length
argument, allowing for multiple *x*, *y* pairs with an
optional format string. For example, each of the following is
legal::
plot(x, y) # plot x and y using default line style and color
plot(x, y, 'bo') # plot x and y using blue circle markers
plot(y) # plot y using x as index array 0..N-1
plot(y, 'r+') # ditto, but with red plusses
If *x* and/or *y* is 2-dimensional, then the corresponding columns
will be plotted.
An arbitrary number of *x*, *y*, *fmt* groups can be
specified, as in::
a.plot(x1, y1, 'g^', x2, y2, 'g-')
Return value is a list of lines that were added.
The following format string characters are accepted to control
the line style or marker:
================ ===============================
character description
================ ===============================
'-' solid line style
'--' dashed line style
'-.' dash-dot line style
':' dotted line style
'.' point marker
',' pixel marker
'o' circle marker
'v' triangle_down marker
'^' triangle_up marker
'<' triangle_left marker
'>' triangle_right marker
'1' tri_down marker
'2' tri_up marker
'3' tri_left marker
'4' tri_right marker
's' square marker
'p' pentagon marker
'*' star marker
'h' hexagon1 marker
'H' hexagon2 marker
'+' plus marker
'x' x marker
'D' diamond marker
'd' thin_diamond marker
'|' vline marker
'_' hline marker
================ ===============================
The following color abbreviations are supported:
========== ========
character color
========== ========
'b' blue
'g' green
'r' red
'c' cyan
'm' magenta
'y' yellow
'k' black
'w' white
========== ========
In addition, you can specify colors in many weird and
wonderful ways, including full names (``'green'``), hex
strings (``'#008000'``), RGB or RGBA tuples (``(0,1,0,1)``) or
grayscale intensities as a string (``'0.8'``). Of these, the
string specifications can be used in place of a ``fmt`` group,
but the tuple forms can be used only as ``kwargs``.
Line styles and colors are combined in a single format string, as in
``'bo'`` for blue circles.
The *kwargs* can be used to set line properties (any property that has
a ``set_*`` method). You can use this to set a line label (for auto
legends), linewidth, anitialising, marker face color, etc. Here is an
example::
plot([1,2,3], [1,2,3], 'go-', label='line 1', linewidth=2)
plot([1,2,3], [1,4,9], 'rs', label='line 2')
axis([0, 4, 0, 10])
legend()
If you make multiple lines with one plot command, the kwargs
apply to all those lines, e.g.::
plot(x1, y1, x2, y2, antialised=False)
Neither line will be antialiased.
You do not need to use format strings, which are just
abbreviations. All of the line properties can be controlled
by keyword arguments. For example, you can set the color,
marker, linestyle, and markercolor with::
plot(x, y, color='green', linestyle='dashed', marker='o',
markerfacecolor='blue', markersize=12). See
:class:`~matplotlib.lines.Line2D` for details.
The kwargs are :class:`~matplotlib.lines.Line2D` properties:
%(Line2D)s
kwargs *scalex* and *scaley*, if defined, are passed on to
:meth:`~matplotlib.axes.Axes.autoscale_view` to determine
whether the *x* and *y* axes are autoscaled; the default is
*True*.
"""
scalex = kwargs.pop( 'scalex', True)
scaley = kwargs.pop( 'scaley', True)
if not self._hold: self.cla()
lines = []
for line in self._get_lines(*args, **kwargs):
self.add_line(line)
lines.append(line)
self.autoscale_view(scalex=scalex, scaley=scaley)
return lines
plot.__doc__ = cbook.dedent(plot.__doc__) % martist.kwdocd
def plot_date(self, x, y, fmt='bo', tz=None, xdate=True, ydate=False,
**kwargs):
"""
call signature::
plot_date(x, y, fmt='bo', tz=None, xdate=True, ydate=False, **kwargs)
Similar to the :func:`~matplotlib.pyplot.plot` command, except
the *x* or *y* (or both) data is considered to be dates, and the
axis is labeled accordingly.
*x* and/or *y* can be a sequence of dates represented as float
days since 0001-01-01 UTC.
Keyword arguments:
*fmt*: string
The plot format string.
*tz*: [ None | timezone string ]
The time zone to use in labeling dates. If *None*, defaults to rc
value.
*xdate*: [ True | False ]
If *True*, the *x*-axis will be labeled with dates.
*ydate*: [ False | True ]
If *True*, the *y*-axis will be labeled with dates.
Note if you are using custom date tickers and formatters, it
may be necessary to set the formatters/locators after the call
to :meth:`plot_date` since :meth:`plot_date` will set the
default tick locator to
:class:`matplotlib.ticker.AutoDateLocator` (if the tick
locator is not already set to a
:class:`matplotlib.ticker.DateLocator` instance) and the
default tick formatter to
:class:`matplotlib.ticker.AutoDateFormatter` (if the tick
formatter is not already set to a
:class:`matplotlib.ticker.DateFormatter` instance).
Valid kwargs are :class:`~matplotlib.lines.Line2D` properties:
%(Line2D)s
.. seealso::
:mod:`~matplotlib.dates`:
for helper functions
:func:`~matplotlib.dates.date2num`,
:func:`~matplotlib.dates.num2date` and
:func:`~matplotlib.dates.drange`:
for help on creating the required floating point
dates.
"""
if not self._hold: self.cla()
ret = self.plot(x, y, fmt, **kwargs)
if xdate:
self.xaxis_date(tz)
if ydate:
self.yaxis_date(tz)
self.autoscale_view()
return ret
plot_date.__doc__ = cbook.dedent(plot_date.__doc__) % martist.kwdocd
def loglog(self, *args, **kwargs):
"""
call signature::
loglog(*args, **kwargs)
Make a plot with log scaling on the *x* and *y* axis.
:func:`~matplotlib.pyplot.loglog` supports all the keyword
arguments of :func:`~matplotlib.pyplot.plot` and
:meth:`matplotlib.axes.Axes.set_xscale` /
:meth:`matplotlib.axes.Axes.set_yscale`.
Notable keyword arguments:
*basex*/*basey*: scalar > 1
base of the *x*/*y* logarithm
*subsx*/*subsy*: [ None | sequence ]
the location of the minor *x*/*y* ticks; *None* defaults
to autosubs, which depend on the number of decades in the
plot; see :meth:`matplotlib.axes.Axes.set_xscale` /
:meth:`matplotlib.axes.Axes.set_yscale` for details
The remaining valid kwargs are
:class:`~matplotlib.lines.Line2D` properties:
%(Line2D)s
**Example:**
.. plot:: mpl_examples/pylab_examples/log_demo.py
"""
if not self._hold: self.cla()
dx = {'basex': kwargs.pop('basex', 10),
'subsx': kwargs.pop('subsx', None),
}
dy = {'basey': kwargs.pop('basey', 10),
'subsy': kwargs.pop('subsy', None),
}
self.set_xscale('log', **dx)
self.set_yscale('log', **dy)
b = self._hold
self._hold = True # we've already processed the hold
l = self.plot(*args, **kwargs)
self._hold = b # restore the hold
return l
loglog.__doc__ = cbook.dedent(loglog.__doc__) % martist.kwdocd
def semilogx(self, *args, **kwargs):
"""
call signature::
semilogx(*args, **kwargs)
Make a plot with log scaling on the *x* axis.
:func:`semilogx` supports all the keyword arguments of
:func:`~matplotlib.pyplot.plot` and
:meth:`matplotlib.axes.Axes.set_xscale`.
Notable keyword arguments:
*basex*: scalar > 1
base of the *x* logarithm
*subsx*: [ None | sequence ]
The location of the minor xticks; *None* defaults to
autosubs, which depend on the number of decades in the
plot; see :meth:`~matplotlib.axes.Axes.set_xscale` for
details.
The remaining valid kwargs are
:class:`~matplotlib.lines.Line2D` properties:
%(Line2D)s
.. seealso::
:meth:`loglog`:
For example code and figure
"""
if not self._hold: self.cla()
d = {'basex': kwargs.pop( 'basex', 10),
'subsx': kwargs.pop( 'subsx', None),
}
self.set_xscale('log', **d)
b = self._hold
self._hold = True # we've already processed the hold
l = self.plot(*args, **kwargs)
self._hold = b # restore the hold
return l
semilogx.__doc__ = cbook.dedent(semilogx.__doc__) % martist.kwdocd
def semilogy(self, *args, **kwargs):
"""
call signature::
semilogy(*args, **kwargs)
Make a plot with log scaling on the *y* axis.
:func:`semilogy` supports all the keyword arguments of
:func:`~matplotlib.pylab.plot` and
:meth:`matplotlib.axes.Axes.set_yscale`.
Notable keyword arguments:
*basey*: scalar > 1
Base of the *y* logarithm
*subsy*: [ None | sequence ]
The location of the minor yticks; *None* defaults to
autosubs, which depend on the number of decades in the
plot; see :meth:`~matplotlib.axes.Axes.set_yscale` for
details.
The remaining valid kwargs are
:class:`~matplotlib.lines.Line2D` properties:
%(Line2D)s
.. seealso::
:meth:`loglog`:
For example code and figure
"""
if not self._hold: self.cla()
d = {'basey': kwargs.pop('basey', 10),
'subsy': kwargs.pop('subsy', None),
}
self.set_yscale('log', **d)
b = self._hold
self._hold = True # we've already processed the hold
l = self.plot(*args, **kwargs)
self._hold = b # restore the hold
return l
semilogy.__doc__ = cbook.dedent(semilogy.__doc__) % martist.kwdocd
def acorr(self, x, **kwargs):
"""
call signature::
acorr(x, normed=False, detrend=mlab.detrend_none, usevlines=False,
maxlags=None, **kwargs)
Plot the autocorrelation of *x*. If *normed* = *True*,
normalize the data by the autocorrelation at 0-th lag. *x* is
detrended by the *detrend* callable (default no normalization).
Data are plotted as ``plot(lags, c, **kwargs)``
Return value is a tuple (*lags*, *c*, *line*) where:
- *lags* are a length 2*maxlags+1 lag vector
- *c* is the 2*maxlags+1 auto correlation vector
- *line* is a :class:`~matplotlib.lines.Line2D` instance
returned by :meth:`plot`
The default *linestyle* is None and the default *marker* is
``'o'``, though these can be overridden with keyword args.
The cross correlation is performed with
:func:`numpy.correlate` with *mode* = 2.
If *usevlines* is *True*, :meth:`~matplotlib.axes.Axes.vlines`
rather than :meth:`~matplotlib.axes.Axes.plot` is used to draw
vertical lines from the origin to the acorr. Otherwise, the
plot style is determined by the kwargs, which are
:class:`~matplotlib.lines.Line2D` properties.
*maxlags* is a positive integer detailing the number of lags
to show. The default value of *None* will return all
:math:`2 \mathrm{len}(x) - 1` lags.
The return value is a tuple (*lags*, *c*, *linecol*, *b*)
where
- *linecol* is the
:class:`~matplotlib.collections.LineCollection`
- *b* is the *x*-axis.
.. seealso::
:meth:`~matplotlib.axes.Axes.plot` or
:meth:`~matplotlib.axes.Axes.vlines`: For documentation on
valid kwargs.
**Example:**
:func:`~matplotlib.pyplot.xcorr` above, and
:func:`~matplotlib.pyplot.acorr` below.
**Example:**
.. plot:: mpl_examples/pylab_examples/xcorr_demo.py
"""
return self.xcorr(x, x, **kwargs)
acorr.__doc__ = cbook.dedent(acorr.__doc__) % martist.kwdocd
def xcorr(self, x, y, normed=False, detrend=mlab.detrend_none,
usevlines=False, maxlags=None, **kwargs):
"""
call signature::
xcorr(x, y, normed=False, detrend=mlab.detrend_none,
usevlines=False, **kwargs):
Plot the cross correlation between *x* and *y*. If *normed* =
*True*, normalize the data by the cross correlation at 0-th
lag. *x* and y are detrended by the *detrend* callable
(default no normalization). *x* and *y* must be equal length.
Data are plotted as ``plot(lags, c, **kwargs)``
Return value is a tuple (*lags*, *c*, *line*) where:
- *lags* are a length ``2*maxlags+1`` lag vector
- *c* is the ``2*maxlags+1`` auto correlation vector
- *line* is a :class:`~matplotlib.lines.Line2D` instance
returned by :func:`~matplotlib.pyplot.plot`.
The default *linestyle* is *None* and the default *marker* is
'o', though these can be overridden with keyword args. The
cross correlation is performed with :func:`numpy.correlate`
with *mode* = 2.
If *usevlines* is *True*:
:func:`~matplotlib.pyplot.vlines`
rather than :func:`~matplotlib.pyplot.plot` is used to draw
vertical lines from the origin to the xcorr. Otherwise the
plotstyle is determined by the kwargs, which are
:class:`~matplotlib.lines.Line2D` properties.
The return value is a tuple (*lags*, *c*, *linecol*, *b*)
where *linecol* is the
:class:`matplotlib.collections.LineCollection` instance and
*b* is the *x*-axis.
*maxlags* is a positive integer detailing the number of lags to show.
The default value of *None* will return all ``(2*len(x)-1)`` lags.
**Example:**
:func:`~matplotlib.pyplot.xcorr` above, and
:func:`~matplotlib.pyplot.acorr` below.
**Example:**
.. plot:: mpl_examples/pylab_examples/xcorr_demo.py
"""
Nx = len(x)
if Nx!=len(y):
raise ValueError('x and y must be equal length')
x = detrend(np.asarray(x))
y = detrend(np.asarray(y))
c = np.correlate(x, y, mode=2)
if normed: c/= np.sqrt(np.dot(x,x) * np.dot(y,y))
if maxlags is None: maxlags = Nx - 1
if maxlags >= Nx or maxlags < 1:
raise ValueError('maglags must be None or strictly '
'positive < %d'%Nx)
lags = np.arange(-maxlags,maxlags+1)
c = c[Nx-1-maxlags:Nx+maxlags]
if usevlines:
a = self.vlines(lags, [0], c, **kwargs)
b = self.axhline(**kwargs)
else:
kwargs.setdefault('marker', 'o')
kwargs.setdefault('linestyle', 'None')
a, = self.plot(lags, c, **kwargs)
b = None
return lags, c, a, b
xcorr.__doc__ = cbook.dedent(xcorr.__doc__) % martist.kwdocd
def legend(self, *args, **kwargs):
"""
call signature::
legend(*args, **kwargs)
Place a legend on the current axes at location *loc*. Labels are a
sequence of strings and *loc* can be a string or an integer specifying
the legend location.
To make a legend with existing lines::
legend()
:meth:`legend` by itself will try and build a legend using the label
property of the lines/patches/collections. You can set the label of
a line by doing::
plot(x, y, label='my data')
or::
line.set_label('my data').
If label is set to '_nolegend_', the item will not be shown in
legend.
To automatically generate the legend from labels::
legend( ('label1', 'label2', 'label3') )
To make a legend for a list of lines and labels::
legend( (line1, line2, line3), ('label1', 'label2', 'label3') )
To make a legend at a given location, using a location argument::
legend( ('label1', 'label2', 'label3'), loc='upper left')
or::
legend( (line1, line2, line3), ('label1', 'label2', 'label3'), loc=2)
The location codes are
=============== =============
Location String Location Code
=============== =============
'best' 0
'upper right' 1
'upper left' 2
'lower left' 3
'lower right' 4
'right' 5
'center left' 6
'center right' 7
'lower center' 8
'upper center' 9
'center' 10
=============== =============
If none of these are locations are suitable, loc can be a 2-tuple
giving x,y in axes coords, ie::
loc = 0, 1 # left top
loc = 0.5, 0.5 # center
Keyword arguments:
*isaxes*: [ True | False ]
Indicates that this is an axes legend
*numpoints*: integer
The number of points in the legend line, default is 4
*prop*: [ None | FontProperties ]
A :class:`matplotlib.font_manager.FontProperties`
instance, or *None* to use rc settings.
*pad*: [ None | scalar ]
The fractional whitespace inside the legend border, between 0 and 1.
If *None*, use rc settings.
*markerscale*: [ None | scalar ]
The relative size of legend markers vs. original. If *None*, use rc
settings.
*shadow*: [ None | False | True ]
If *True*, draw a shadow behind legend. If *None*, use rc settings.
*labelsep*: [ None | scalar ]
The vertical space between the legend entries. If *None*, use rc
settings.
*handlelen*: [ None | scalar ]
The length of the legend lines. If *None*, use rc settings.
*handletextsep*: [ None | scalar ]
The space between the legend line and legend text. If *None*, use rc
settings.
*axespad*: [ None | scalar ]
The border between the axes and legend edge. If *None*, use rc
settings.
**Example:**
.. plot:: mpl_examples/api/legend_demo.py
"""
def get_handles():
handles = self.lines[:]
handles.extend(self.patches)
handles.extend([c for c in self.collections
if isinstance(c, mcoll.LineCollection)])
handles.extend([c for c in self.collections
if isinstance(c, mcoll.RegularPolyCollection)])
return handles
if len(args)==0:
handles = []
labels = []
for handle in get_handles():
label = handle.get_label()
if (label is not None and
label != '' and not label.startswith('_')):
handles.append(handle)
labels.append(label)
if len(handles) == 0:
warnings.warn("No labeled objects found. "
"Use label='...' kwarg on individual plots.")
return None
elif len(args)==1:
# LABELS
labels = args[0]
handles = [h for h, label in zip(get_handles(), labels)]
elif len(args)==2:
if is_string_like(args[1]) or isinstance(args[1], int):
# LABELS, LOC
labels, loc = args
handles = [h for h, label in zip(get_handles(), labels)]
kwargs['loc'] = loc
else:
# LINES, LABELS
handles, labels = args
elif len(args)==3:
# LINES, LABELS, LOC
handles, labels, loc = args
kwargs['loc'] = loc
else:
raise TypeError('Invalid arguments to legend')
handles = cbook.flatten(handles)
self.legend_ = mlegend.Legend(self, handles, labels, **kwargs)
return self.legend_
#### Specialized plotting
def step(self, x, y, *args, **kwargs):
'''
call signature::
step(x, y, *args, **kwargs)
Make a step plot. Additional keyword args to :func:`step` are the same
as those for :func:`~matplotlib.pyplot.plot`.
*x* and *y* must be 1-D sequences, and it is assumed, but not checked,
that *x* is uniformly increasing.
Keyword arguments:
*where*: [ 'pre' | 'post' | 'mid' ]
If 'pre', the interval from x[i] to x[i+1] has level y[i]
If 'post', that interval has level y[i+1]
If 'mid', the jumps in *y* occur half-way between the
*x*-values.
'''
where = kwargs.pop('where', 'pre')
if where not in ('pre', 'post', 'mid'):
raise ValueError("'where' argument to step must be "
"'pre', 'post' or 'mid'")
kwargs['linestyle'] = 'steps-' + where
return self.plot(x, y, *args, **kwargs)
def bar(self, left, height, width=0.8, bottom=None,
color=None, edgecolor=None, linewidth=None,
yerr=None, xerr=None, ecolor=None, capsize=3,
align='edge', orientation='vertical', log=False,
**kwargs
):
"""
call signature::
bar(left, height, width=0.8, bottom=0,
color=None, edgecolor=None, linewidth=None,
yerr=None, xerr=None, ecolor=None, capsize=3,
align='edge', orientation='vertical', log=False)
Make a bar plot with rectangles bounded by:
*left*, *left* + *width*, *bottom*, *bottom* + *height*
(left, right, bottom and top edges)
*left*, *height*, *width*, and *bottom* can be either scalars
or sequences
Return value is a list of
:class:`matplotlib.patches.Rectangle` instances.
Required arguments:
======== ===============================================
Argument Description
======== ===============================================
*left* the x coordinates of the left sides of the bars
*height* the heights of the bars
======== ===============================================
Optional keyword arguments:
=============== ==========================================
Keyword Description
=============== ==========================================
*width* the widths of the bars
*bottom* the y coordinates of the bottom edges of
the bars
*color* the colors of the bars
*edgecolor* the colors of the bar edges
*linewidth* width of bar edges; None means use default
linewidth; 0 means don't draw edges.
*xerr* if not None, will be used to generate
errorbars on the bar chart
*yerr* if not None, will be used to generate
errorbars on the bar chart
*ecolor* specifies the color of any errorbar
*capsize* (default 3) determines the length in
points of the error bar caps
*align* 'edge' (default) | 'center'
*orientation* 'vertical' | 'horizontal'
*log* [False|True] False (default) leaves the
orientation axis as-is; True sets it to
log scale
=============== ==========================================
For vertical bars, *align* = 'edge' aligns bars by their left
edges in left, while *align* = 'center' interprets these
values as the *x* coordinates of the bar centers. For
horizontal bars, *align* = 'edge' aligns bars by their bottom
edges in bottom, while *align* = 'center' interprets these
values as the *y* coordinates of the bar centers.
The optional arguments *color*, *edgecolor*, *linewidth*,
*xerr*, and *yerr* can be either scalars or sequences of
length equal to the number of bars. This enables you to use
bar as the basis for stacked bar charts, or candlestick plots.
Other optional kwargs:
%(Rectangle)s
**Example:** A stacked bar chart.
.. plot:: mpl_examples/pylab_examples/bar_stacked.py
"""
if not self._hold: self.cla()
label = kwargs.pop('label', '')
def make_iterable(x):
if not iterable(x):
return [x]
else:
return x
# make them safe to take len() of
_left = left
left = make_iterable(left)
height = make_iterable(height)
width = make_iterable(width)
_bottom = bottom
bottom = make_iterable(bottom)
linewidth = make_iterable(linewidth)
adjust_ylim = False
adjust_xlim = False
if orientation == 'vertical':
self._process_unit_info(xdata=left, ydata=height, kwargs=kwargs)
if log:
self.set_yscale('log')
# size width and bottom according to length of left
if _bottom is None:
if self.get_yscale() == 'log':
bottom = [1e-100]
adjust_ylim = True
else:
bottom = [0]
nbars = len(left)
if len(width) == 1:
width *= nbars
if len(bottom) == 1:
bottom *= nbars
elif orientation == 'horizontal':
self._process_unit_info(xdata=width, ydata=bottom, kwargs=kwargs)
if log:
self.set_xscale('log')
# size left and height according to length of bottom
if _left is None:
if self.get_xscale() == 'log':
left = [1e-100]
adjust_xlim = True
else:
left = [0]
nbars = len(bottom)
if len(left) == 1:
left *= nbars
if len(height) == 1:
height *= nbars
else:
raise ValueError, 'invalid orientation: %s' % orientation
# do not convert to array here as unit info is lost
#left = np.asarray(left)
#height = np.asarray(height)
#width = np.asarray(width)
#bottom = np.asarray(bottom)
if len(linewidth) < nbars:
linewidth *= nbars
if color is None:
color = [None] * nbars
else:
color = list(mcolors.colorConverter.to_rgba_array(color))
if len(color) < nbars:
color *= nbars
if edgecolor is None:
edgecolor = [None] * nbars
else:
edgecolor = list(mcolors.colorConverter.to_rgba_array(edgecolor))
if len(edgecolor) < nbars:
edgecolor *= nbars
if yerr is not None:
if not iterable(yerr):
yerr = [yerr]*nbars
if xerr is not None:
if not iterable(xerr):
xerr = [xerr]*nbars
# FIXME: convert the following to proper input validation
# raising ValueError; don't use assert for this.
assert len(left)==nbars, "argument 'left' must be %d or scalar" % nbars
assert len(height)==nbars, ("argument 'height' must be %d or scalar" %
nbars)
assert len(width)==nbars, ("argument 'width' must be %d or scalar" %
nbars)
assert len(bottom)==nbars, ("argument 'bottom' must be %d or scalar" %
nbars)
if yerr is not None and len(yerr)!=nbars:
raise ValueError(
"bar() argument 'yerr' must be len(%s) or scalar" % nbars)
if xerr is not None and len(xerr)!=nbars:
raise ValueError(
"bar() argument 'xerr' must be len(%s) or scalar" % nbars)
patches = []
# lets do some conversions now since some types cannot be
# subtracted uniformly
if self.xaxis is not None:
xconv = self.xaxis.converter
if xconv is not None:
units = self.xaxis.get_units()
left = xconv.convert( left, units )
width = xconv.convert( width, units )
if self.yaxis is not None:
yconv = self.yaxis.converter
if yconv is not None :
units = self.yaxis.get_units()
bottom = yconv.convert( bottom, units )
height = yconv.convert( height, units )
if align == 'edge':
pass
elif align == 'center':
if orientation == 'vertical':
left = [left[i] - width[i]/2. for i in xrange(len(left))]
elif orientation == 'horizontal':
bottom = [bottom[i] - height[i]/2. for i in xrange(len(bottom))]
else:
raise ValueError, 'invalid alignment: %s' % align
args = zip(left, bottom, width, height, color, edgecolor, linewidth)
for l, b, w, h, c, e, lw in args:
if h<0:
b += h
h = abs(h)
if w<0:
l += w
w = abs(w)
r = mpatches.Rectangle(
xy=(l, b), width=w, height=h,
facecolor=c,
edgecolor=e,
linewidth=lw,
label=label
)
label = '_nolegend_'
r.update(kwargs)
#print r.get_label(), label, 'label' in kwargs
self.add_patch(r)
patches.append(r)
holdstate = self._hold
self.hold(True) # ensure hold is on before plotting errorbars
if xerr is not None or yerr is not None:
if orientation == 'vertical':
# using list comps rather than arrays to preserve unit info
x = [l+0.5*w for l, w in zip(left, width)]
y = [b+h for b,h in zip(bottom, height)]
elif orientation == 'horizontal':
# using list comps rather than arrays to preserve unit info
x = [l+w for l,w in zip(left, width)]
y = [b+0.5*h for b,h in zip(bottom, height)]
self.errorbar(
x, y,
yerr=yerr, xerr=xerr,
fmt=None, ecolor=ecolor, capsize=capsize)
self.hold(holdstate) # restore previous hold state
if adjust_xlim:
xmin, xmax = self.dataLim.intervalx
xmin = np.amin(width[width!=0]) # filter out the 0 width rects
if xerr is not None:
xmin = xmin - np.amax(xerr)
xmin = max(xmin*0.9, 1e-100)
self.dataLim.intervalx = (xmin, xmax)
if adjust_ylim:
ymin, ymax = self.dataLim.intervaly
ymin = np.amin(height[height!=0]) # filter out the 0 height rects
if yerr is not None:
ymin = ymin - np.amax(yerr)
ymin = max(ymin*0.9, 1e-100)
self.dataLim.intervaly = (ymin, ymax)
self.autoscale_view()
return patches
bar.__doc__ = cbook.dedent(bar.__doc__) % martist.kwdocd
def barh(self, bottom, width, height=0.8, left=None, **kwargs):
"""
call signature::
barh(bottom, width, height=0.8, left=0, **kwargs)
Make a horizontal bar plot with rectangles bounded by:
*left*, *left* + *width*, *bottom*, *bottom* + *height*
(left, right, bottom and top edges)
*bottom*, *width*, *height*, and *left* can be either scalars
or sequences
Return value is a list of
:class:`matplotlib.patches.Rectangle` instances.
Required arguments:
======== ======================================================
Argument Description
======== ======================================================
*bottom* the vertical positions of the bottom edges of the bars
*width* the lengths of the bars
======== ======================================================
Optional keyword arguments:
=============== ==========================================
Keyword Description
=============== ==========================================
*height* the heights (thicknesses) of the bars
*left* the x coordinates of the left edges of the
bars
*color* the colors of the bars
*edgecolor* the colors of the bar edges
*linewidth* width of bar edges; None means use default
linewidth; 0 means don't draw edges.
*xerr* if not None, will be used to generate
errorbars on the bar chart
*yerr* if not None, will be used to generate
errorbars on the bar chart
*ecolor* specifies the color of any errorbar
*capsize* (default 3) determines the length in
points of the error bar caps
*align* 'edge' (default) | 'center'
*log* [False|True] False (default) leaves the
horizontal axis as-is; True sets it to log
scale
=============== ==========================================
Setting *align* = 'edge' aligns bars by their bottom edges in
bottom, while *align* = 'center' interprets these values as
the *y* coordinates of the bar centers.
The optional arguments *color*, *edgecolor*, *linewidth*,
*xerr*, and *yerr* can be either scalars or sequences of
length equal to the number of bars. This enables you to use
barh as the basis for stacked bar charts, or candlestick
plots.
other optional kwargs:
%(Rectangle)s
"""
patches = self.bar(left=left, height=height, width=width, bottom=bottom,
orientation='horizontal', **kwargs)
return patches
barh.__doc__ = cbook.dedent(barh.__doc__) % martist.kwdocd
def broken_barh(self, xranges, yrange, **kwargs):
"""
call signature::
broken_barh(self, xranges, yrange, **kwargs)
A collection of horizontal bars spanning *yrange* with a sequence of
*xranges*.
Required arguments:
========= ==============================
Argument Description
========= ==============================
*xranges* sequence of (*xmin*, *xwidth*)
*yrange* sequence of (*ymin*, *ywidth*)
========= ==============================
kwargs are
:class:`matplotlib.collections.BrokenBarHCollection`
properties:
%(BrokenBarHCollection)s
these can either be a single argument, ie::
facecolors = 'black'
or a sequence of arguments for the various bars, ie::
facecolors = ('black', 'red', 'green')
**Example:**
.. plot:: mpl_examples/pylab_examples/broken_barh.py
"""
col = mcoll.BrokenBarHCollection(xranges, yrange, **kwargs)
self.add_collection(col, autolim=True)
self.autoscale_view()
return col
broken_barh.__doc__ = cbook.dedent(broken_barh.__doc__) % martist.kwdocd
def stem(self, x, y, linefmt='b-', markerfmt='bo', basefmt='r-'):
"""
call signature::
stem(x, y, linefmt='b-', markerfmt='bo', basefmt='r-')
A stem plot plots vertical lines (using *linefmt*) at each *x*
location from the baseline to *y*, and places a marker there
using *markerfmt*. A horizontal line at 0 is is plotted using
*basefmt*.
Return value is a tuple (*markerline*, *stemlines*,
*baseline*).
.. seealso::
`this document`__ for details
:file:`examples/pylab_examples/stem_plot.py`:
for a demo
__ http://www.mathworks.com/access/helpdesk/help/techdoc/ref/stem.html
"""
remember_hold=self._hold
if not self._hold: self.cla()
self.hold(True)
markerline, = self.plot(x, y, markerfmt)
stemlines = []
for thisx, thisy in zip(x, y):
l, = self.plot([thisx,thisx], [0, thisy], linefmt)
stemlines.append(l)
baseline, = self.plot([np.amin(x), np.amax(x)], [0,0], basefmt)
self.hold(remember_hold)
return markerline, stemlines, baseline
def pie(self, x, explode=None, labels=None, colors=None,
autopct=None, pctdistance=0.6, shadow=False,
labeldistance=1.1):
r"""
call signature::
pie(x, explode=None, labels=None,
colors=('b', 'g', 'r', 'c', 'm', 'y', 'k', 'w'),
autopct=None, pctdistance=0.6, labeldistance=1.1, shadow=False)
Make a pie chart of array *x*. The fractional area of each
wedge is given by x/sum(x). If sum(x) <= 1, then the values
of x give the fractional area directly and the array will not
be normalized.
Keyword arguments:
*explode*: [ None | len(x) sequence ]
If not *None*, is a len(*x*) array which specifies the
fraction of the radius with which to offset each wedge.
*colors*: [ None | color sequence ]
A sequence of matplotlib color args through which the pie chart
will cycle.
*labels*: [ None | len(x) sequence of strings ]
A sequence of strings providing the labels for each wedge
*autopct*: [ None | format string | format function ]
If not *None*, is a string or function used to label the
wedges with their numeric value. The label will be placed inside
the wedge. If it is a format string, the label will be ``fmt%pct``.
If it is a function, it will be called.
*pctdistance*: scalar
The ratio between the center of each pie slice and the
start of the text generated by *autopct*. Ignored if
*autopct* is *None*; default is 0.6.
*labeldistance*: scalar
The radial distance at which the pie labels are drawn
*shadow*: [ False | True ]
Draw a shadow beneath the pie.
The pie chart will probably look best if the figure and axes are
square. Eg.::
figure(figsize=(8,8))
ax = axes([0.1, 0.1, 0.8, 0.8])
Return value:
If *autopct* is None, return the tuple (*patches*, *texts*):
- *patches* is a sequence of
:class:`matplotlib.patches.Wedge` instances
- *texts* is a list of the label
:class:`matplotlib.text.Text` instances.
If *autopct* is not *None*, return the tuple (*patches*,
*texts*, *autotexts*), where *patches* and *texts* are as
above, and *autotexts* is a list of
:class:`~matplotlib.text.Text` instances for the numeric
labels.
"""
self.set_frame_on(False)
x = np.asarray(x).astype(np.float32)
sx = float(x.sum())
if sx>1: x = np.divide(x,sx)
if labels is None: labels = ['']*len(x)
if explode is None: explode = [0]*len(x)
assert(len(x)==len(labels))
assert(len(x)==len(explode))
if colors is None: colors = ('b', 'g', 'r', 'c', 'm', 'y', 'k', 'w')
center = 0,0
radius = 1
theta1 = 0
i = 0
texts = []
slices = []
autotexts = []
for frac, label, expl in cbook.safezip(x,labels, explode):
x, y = center
theta2 = theta1 + frac
thetam = 2*math.pi*0.5*(theta1+theta2)
x += expl*math.cos(thetam)
y += expl*math.sin(thetam)
w = mpatches.Wedge((x,y), radius, 360.*theta1, 360.*theta2,
facecolor=colors[i%len(colors)])
slices.append(w)
self.add_patch(w)
w.set_label(label)
if shadow:
# make sure to add a shadow after the call to
# add_patch so the figure and transform props will be
# set
shad = mpatches.Shadow(w, -0.02, -0.02,
#props={'facecolor':w.get_facecolor()}
)
shad.set_zorder(0.9*w.get_zorder())
self.add_patch(shad)
xt = x + labeldistance*radius*math.cos(thetam)
yt = y + labeldistance*radius*math.sin(thetam)
label_alignment = xt > 0 and 'left' or 'right'
t = self.text(xt, yt, label,
size=rcParams['xtick.labelsize'],
horizontalalignment=label_alignment,
verticalalignment='center')
texts.append(t)
if autopct is not None:
xt = x + pctdistance*radius*math.cos(thetam)
yt = y + pctdistance*radius*math.sin(thetam)
if is_string_like(autopct):
s = autopct%(100.*frac)
elif callable(autopct):
s = autopct(100.*frac)
else:
raise TypeError(
'autopct must be callable or a format string')
t = self.text(xt, yt, s,
horizontalalignment='center',
verticalalignment='center')
autotexts.append(t)
theta1 = theta2
i += 1
self.set_xlim((-1.25, 1.25))
self.set_ylim((-1.25, 1.25))
self.set_xticks([])
self.set_yticks([])
if autopct is None: return slices, texts
else: return slices, texts, autotexts
def errorbar(self, x, y, yerr=None, xerr=None,
fmt='-', ecolor=None, elinewidth=None, capsize=3,
barsabove=False, lolims=False, uplims=False,
xlolims=False, xuplims=False, **kwargs):
"""
call signature::
errorbar(x, y, yerr=None, xerr=None,
fmt='-', ecolor=None, elinewidth=None, capsize=3,
barsabove=False, lolims=False, uplims=False,
xlolims=False, xuplims=False)
Plot *x* versus *y* with error deltas in *yerr* and *xerr*.
Vertical errorbars are plotted if *yerr* is not *None*.
Horizontal errorbars are plotted if *xerr* is not *None*.
*x*, *y*, *xerr*, and *yerr* can all be scalars, which plots a
single error bar at *x*, *y*.
Optional keyword arguments:
*xerr*/*yerr*: [ scalar | N, Nx1, Nx2 array-like ]
If a scalar number, len(N) array-like object, or an Nx1 array-like
object, errorbars are drawn +/- value.
If a rank-1, Nx2 Numpy array, errorbars are drawn at -column1 and
+column2
*fmt*: '-'
The plot format symbol for *y*. If *fmt* is *None*, just plot the
errorbars with no line symbols. This can be useful for creating a
bar plot with errorbars.
*ecolor*: [ None | mpl color ]
a matplotlib color arg which gives the color the errorbar lines; if
*None*, use the marker color.
*elinewidth*: scalar
the linewidth of the errorbar lines. If *None*, use the linewidth.
*capsize*: scalar
the size of the error bar caps in points
*barsabove*: [ True | False ]
if *True*, will plot the errorbars above the plot
symbols. Default is below.
*lolims*/*uplims*/*xlolims*/*xuplims*: [ False | True ]
These arguments can be used to indicate that a value gives
only upper/lower limits. In that case a caret symbol is
used to indicate this. lims-arguments may be of the same
type as *xerr* and *yerr*.
All other keyword arguments are passed on to the plot command for the
markers, so you can add additional key=value pairs to control the
errorbar markers. For example, this code makes big red squares with
thick green edges::
x,y,yerr = rand(3,10)
errorbar(x, y, yerr, marker='s',
mfc='red', mec='green', ms=20, mew=4)
where *mfc*, *mec*, *ms* and *mew* are aliases for the longer
property names, *markerfacecolor*, *markeredgecolor*, *markersize*
and *markeredgewith*.
valid kwargs for the marker properties are
%(Line2D)s
Return value is a length 3 tuple. The first element is the
:class:`~matplotlib.lines.Line2D` instance for the *y* symbol
lines. The second element is a list of error bar cap lines,
the third element is a list of
:class:`~matplotlib.collections.LineCollection` instances for
the horizontal and vertical error ranges.
**Example:**
.. plot:: mpl_examples/pylab_examples/errorbar_demo.py
"""
self._process_unit_info(xdata=x, ydata=y, kwargs=kwargs)
if not self._hold: self.cla()
# make sure all the args are iterable; use lists not arrays to
# preserve units
if not iterable(x):
x = [x]
if not iterable(y):
y = [y]
if xerr is not None:
if not iterable(xerr):
xerr = [xerr]*len(x)
if yerr is not None:
if not iterable(yerr):
yerr = [yerr]*len(y)
l0 = None
if barsabove and fmt is not None:
l0, = self.plot(x,y,fmt,**kwargs)
barcols = []
caplines = []
lines_kw = {'label':'_nolegend_'}
if elinewidth:
lines_kw['linewidth'] = elinewidth
else:
if 'linewidth' in kwargs:
lines_kw['linewidth']=kwargs['linewidth']
if 'lw' in kwargs:
lines_kw['lw']=kwargs['lw']
if 'transform' in kwargs:
lines_kw['transform'] = kwargs['transform']
# arrays fine here, they are booleans and hence not units
if not iterable(lolims):
lolims = np.asarray([lolims]*len(x), bool)
else: lolims = np.asarray(lolims, bool)
if not iterable(uplims): uplims = np.array([uplims]*len(x), bool)
else: uplims = np.asarray(uplims, bool)
if not iterable(xlolims): xlolims = np.array([xlolims]*len(x), bool)
else: xlolims = np.asarray(xlolims, bool)
if not iterable(xuplims): xuplims = np.array([xuplims]*len(x), bool)
else: xuplims = np.asarray(xuplims, bool)
def xywhere(xs, ys, mask):
"""
return xs[mask], ys[mask] where mask is True but xs and
ys are not arrays
"""
assert len(xs)==len(ys)
assert len(xs)==len(mask)
xs = [thisx for thisx, b in zip(xs, mask) if b]
ys = [thisy for thisy, b in zip(ys, mask) if b]
return xs, ys
if capsize > 0:
plot_kw = {
'ms':2*capsize,
'label':'_nolegend_'}
if 'markeredgewidth' in kwargs:
plot_kw['markeredgewidth']=kwargs['markeredgewidth']
if 'mew' in kwargs:
plot_kw['mew']=kwargs['mew']
if 'transform' in kwargs:
plot_kw['transform'] = kwargs['transform']
if xerr is not None:
if (iterable(xerr) and len(xerr)==2 and
iterable(xerr[0]) and iterable(xerr[1])):
# using list comps rather than arrays to preserve units
left = [thisx-thiserr for (thisx, thiserr)
in cbook.safezip(x,xerr[0])]
right = [thisx+thiserr for (thisx, thiserr)
in cbook.safezip(x,xerr[1])]
else:
# using list comps rather than arrays to preserve units
left = [thisx-thiserr for (thisx, thiserr)
in cbook.safezip(x,xerr)]
right = [thisx+thiserr for (thisx, thiserr)
in cbook.safezip(x,xerr)]
barcols.append( self.hlines(y, left, right, **lines_kw ) )
if capsize > 0:
if xlolims.any():
# can't use numpy logical indexing since left and
# y are lists
leftlo, ylo = xywhere(left, y, xlolims)
caplines.extend(
self.plot(leftlo, ylo, ls='None',
marker=mlines.CARETLEFT, **plot_kw) )
xlolims = ~xlolims
leftlo, ylo = xywhere(left, y, xlolims)
caplines.extend( self.plot(leftlo, ylo, 'k|', **plot_kw) )
else:
caplines.extend( self.plot(left, y, 'k|', **plot_kw) )
if xuplims.any():
rightup, yup = xywhere(right, y, xuplims)
caplines.extend(
self.plot(rightup, yup, ls='None',
marker=mlines.CARETRIGHT, **plot_kw) )
xuplims = ~xuplims
rightup, yup = xywhere(right, y, xuplims)
caplines.extend( self.plot(rightup, yup, 'k|', **plot_kw) )
else:
caplines.extend( self.plot(right, y, 'k|', **plot_kw) )
if yerr is not None:
if (iterable(yerr) and len(yerr)==2 and
iterable(yerr[0]) and iterable(yerr[1])):
# using list comps rather than arrays to preserve units
lower = [thisy-thiserr for (thisy, thiserr)
in cbook.safezip(y,yerr[0])]
upper = [thisy+thiserr for (thisy, thiserr)
in cbook.safezip(y,yerr[1])]
else:
# using list comps rather than arrays to preserve units
lower = [thisy-thiserr for (thisy, thiserr)
in cbook.safezip(y,yerr)]
upper = [thisy+thiserr for (thisy, thiserr)
in cbook.safezip(y,yerr)]
barcols.append( self.vlines(x, lower, upper, **lines_kw) )
if capsize > 0:
if lolims.any():
xlo, lowerlo = xywhere(x, lower, lolims)
caplines.extend(
self.plot(xlo, lowerlo, ls='None',
marker=mlines.CARETDOWN, **plot_kw) )
lolims = ~lolims
xlo, lowerlo = xywhere(x, lower, lolims)
caplines.extend( self.plot(xlo, lowerlo, 'k_', **plot_kw) )
else:
caplines.extend( self.plot(x, lower, 'k_', **plot_kw) )
if uplims.any():
xup, upperup = xywhere(x, upper, uplims)
caplines.extend(
self.plot(xup, upperup, ls='None',
marker=mlines.CARETUP, **plot_kw) )
uplims = ~uplims
xup, upperup = xywhere(x, upper, uplims)
caplines.extend( self.plot(xup, upperup, 'k_', **plot_kw) )
else:
caplines.extend( self.plot(x, upper, 'k_', **plot_kw) )
if not barsabove and fmt is not None:
l0, = self.plot(x,y,fmt,**kwargs)
if ecolor is None:
if l0 is None:
ecolor = self._get_lines._get_next_cycle_color()
else:
ecolor = l0.get_color()
for l in barcols:
l.set_color(ecolor)
for l in caplines:
l.set_color(ecolor)
self.autoscale_view()
return (l0, caplines, barcols)
errorbar.__doc__ = cbook.dedent(errorbar.__doc__) % martist.kwdocd
def boxplot(self, x, notch=0, sym='b+', vert=1, whis=1.5,
positions=None, widths=None):
"""
call signature::
boxplot(x, notch=0, sym='+', vert=1, whis=1.5,
positions=None, widths=None)
Make a box and whisker plot for each column of *x* or each
vector in sequence *x*. The box extends from the lower to
upper quartile values of the data, with a line at the median.
The whiskers extend from the box to show the range of the
data. Flier points are those past the end of the whiskers.
- *notch* = 0 (default) produces a rectangular box plot.
- *notch* = 1 will produce a notched box plot
*sym* (default 'b+') is the default symbol for flier points.
Enter an empty string ('') if you don't want to show fliers.
- *vert* = 1 (default) makes the boxes vertical.
- *vert* = 0 makes horizontal boxes. This seems goofy, but
that's how Matlab did it.
*whis* (default 1.5) defines the length of the whiskers as
a function of the inner quartile range. They extend to the
most extreme data point within ( ``whis*(75%-25%)`` ) data range.
*positions* (default 1,2,...,n) sets the horizontal positions of
the boxes. The ticks and limits are automatically set to match
the positions.
*widths* is either a scalar or a vector and sets the width of
each box. The default is 0.5, or ``0.15*(distance between extreme
positions)`` if that is smaller.
*x* is an array or a sequence of vectors.
Returns a dictionary mapping each component of the boxplot
to a list of the :class:`matplotlib.lines.Line2D`
instances created.
**Example:**
.. plot:: pyplots/boxplot_demo.py
"""
if not self._hold: self.cla()
holdStatus = self._hold
whiskers, caps, boxes, medians, fliers = [], [], [], [], []
# convert x to a list of vectors
if hasattr(x, 'shape'):
if len(x.shape) == 1:
if hasattr(x[0], 'shape'):
x = list(x)
else:
x = [x,]
elif len(x.shape) == 2:
nr, nc = x.shape
if nr == 1:
x = [x]
elif nc == 1:
x = [x.ravel()]
else:
x = [x[:,i] for i in xrange(nc)]
else:
raise ValueError, "input x can have no more than 2 dimensions"
if not hasattr(x[0], '__len__'):
x = [x]
col = len(x)
# get some plot info
if positions is None:
positions = range(1, col + 1)
if widths is None:
distance = max(positions) - min(positions)
widths = min(0.15*max(distance,1.0), 0.5)
if isinstance(widths, float) or isinstance(widths, int):
widths = np.ones((col,), float) * widths
# loop through columns, adding each to plot
self.hold(True)
for i,pos in enumerate(positions):
d = np.ravel(x[i])
row = len(d)
# get median and quartiles
q1, med, q3 = mlab.prctile(d,[25,50,75])
# get high extreme
iq = q3 - q1
hi_val = q3 + whis*iq
wisk_hi = np.compress( d <= hi_val , d )
if len(wisk_hi) == 0:
wisk_hi = q3
else:
wisk_hi = max(wisk_hi)
# get low extreme
lo_val = q1 - whis*iq
wisk_lo = np.compress( d >= lo_val, d )
if len(wisk_lo) == 0:
wisk_lo = q1
else:
wisk_lo = min(wisk_lo)
# get fliers - if we are showing them
flier_hi = []
flier_lo = []
flier_hi_x = []
flier_lo_x = []
if len(sym) != 0:
flier_hi = np.compress( d > wisk_hi, d )
flier_lo = np.compress( d < wisk_lo, d )
flier_hi_x = np.ones(flier_hi.shape[0]) * pos
flier_lo_x = np.ones(flier_lo.shape[0]) * pos
# get x locations for fliers, whisker, whisker cap and box sides
box_x_min = pos - widths[i] * 0.5
box_x_max = pos + widths[i] * 0.5
wisk_x = np.ones(2) * pos
cap_x_min = pos - widths[i] * 0.25
cap_x_max = pos + widths[i] * 0.25
cap_x = [cap_x_min, cap_x_max]
# get y location for median
med_y = [med, med]
# calculate 'regular' plot
if notch == 0:
# make our box vectors
box_x = [box_x_min, box_x_max, box_x_max, box_x_min, box_x_min ]
box_y = [q1, q1, q3, q3, q1 ]
# make our median line vectors
med_x = [box_x_min, box_x_max]
# calculate 'notch' plot
else:
notch_max = med + 1.57*iq/np.sqrt(row)
notch_min = med - 1.57*iq/np.sqrt(row)
if notch_max > q3:
notch_max = q3
if notch_min < q1:
notch_min = q1
# make our notched box vectors
box_x = [box_x_min, box_x_max, box_x_max, cap_x_max, box_x_max,
box_x_max, box_x_min, box_x_min, cap_x_min, box_x_min,
box_x_min ]
box_y = [q1, q1, notch_min, med, notch_max, q3, q3, notch_max,
med, notch_min, q1]
# make our median line vectors
med_x = [cap_x_min, cap_x_max]
med_y = [med, med]
# vertical or horizontal plot?
if vert:
def doplot(*args):
return self.plot(*args)
else:
def doplot(*args):
shuffled = []
for i in xrange(0, len(args), 3):
shuffled.extend([args[i+1], args[i], args[i+2]])
return self.plot(*shuffled)
whiskers.extend(doplot(wisk_x, [q1, wisk_lo], 'b--',
wisk_x, [q3, wisk_hi], 'b--'))
caps.extend(doplot(cap_x, [wisk_hi, wisk_hi], 'k-',
cap_x, [wisk_lo, wisk_lo], 'k-'))
boxes.extend(doplot(box_x, box_y, 'b-'))
medians.extend(doplot(med_x, med_y, 'r-'))
fliers.extend(doplot(flier_hi_x, flier_hi, sym,
flier_lo_x, flier_lo, sym))
# fix our axes/ticks up a little
if 1 == vert:
setticks, setlim = self.set_xticks, self.set_xlim
else:
setticks, setlim = self.set_yticks, self.set_ylim
newlimits = min(positions)-0.5, max(positions)+0.5
setlim(newlimits)
setticks(positions)
# reset hold status
self.hold(holdStatus)
return dict(whiskers=whiskers, caps=caps, boxes=boxes,
medians=medians, fliers=fliers)
def scatter(self, x, y, s=20, c='b', marker='o', cmap=None, norm=None,
vmin=None, vmax=None, alpha=1.0, linewidths=None,
faceted=True, verts=None,
**kwargs):
"""
call signatures::
scatter(x, y, s=20, c='b', marker='o', cmap=None, norm=None,
vmin=None, vmax=None, alpha=1.0, linewidths=None,
verts=None, **kwargs)
Make a scatter plot of *x* versus *y*, where *x*, *y* are 1-D
sequences of the same length, *N*.
Keyword arguments:
*s*:
size in points^2. It is a scalar or an array of the same
length as *x* and *y*.
*c*:
a color. *c* can be a single color format string, or a
sequence of color specifications of length *N*, or a
sequence of *N* numbers to be mapped to colors using the
*cmap* and *norm* specified via kwargs (see below). Note
that *c* should not be a single numeric RGB or RGBA
sequence because that is indistinguishable from an array
of values to be colormapped. *c* can be a 2-D array in
which the rows are RGB or RGBA, however.
*marker*:
can be one of:
===== ==============
Value Description
===== ==============
's' square
'o' circle
'^' triangle up
'>' triangle right
'v' triangle down
'<' triangle left
'd' diamond
'p' pentagram
'h' hexagon
'8' octagon
'+' plus
'x' cross
===== ==============
The marker can also be a tuple (*numsides*, *style*,
*angle*), which will create a custom, regular symbol.
*numsides*:
the number of sides
*style*:
the style of the regular symbol:
===== =============================================
Value Description
===== =============================================
0 a regular polygon
1 a star-like symbol
2 an asterisk
3 a circle (*numsides* and *angle* is ignored)
===== =============================================
*angle*:
the angle of rotation of the symbol
Finally, *marker* can be (*verts*, 0): *verts* is a
sequence of (*x*, *y*) vertices for a custom scatter
symbol. Alternatively, use the kwarg combination
*marker* = *None*, *verts* = *verts*.
Any or all of *x*, *y*, *s*, and *c* may be masked arrays, in
which case all masks will be combined and only unmasked points
will be plotted.
Other keyword arguments: the color mapping and normalization
arguments will be used only if *c* is an array of floats.
*cmap*: [ None | Colormap ]
A :class:`matplotlib.colors.Colormap` instance. If *None*,
defaults to rc ``image.cmap``. *cmap* is only used if *c*
is an array of floats.
*norm*: [ None | Normalize ]
A :class:`matplotlib.colors.Normalize` instance is used to
scale luminance data to 0, 1. If *None*, use the default
:func:`normalize`. *norm* is only used if *c* is an array
of floats.
*vmin*/*vmax*:
*vmin* and *vmax* are used in conjunction with norm to
normalize luminance data. If either are None, the min and
max of the color array *C* is used. Note if you pass a
*norm* instance, your settings for *vmin* and *vmax* will
be ignored.
*alpha*: 0 <= scalar <= 1
The alpha value for the patches
*linewidths*: [ None | scalar | sequence ]
If *None*, defaults to (lines.linewidth,). Note that this
is a tuple, and if you set the linewidths argument you
must set it as a sequence of floats, as required by
:class:`~matplotlib.collections.RegularPolyCollection`.
Optional kwargs control the
:class:`~matplotlib.collections.Collection` properties; in
particular:
*edgecolors*:
'none' to plot faces with no outlines
*facecolors*:
'none' to plot unfilled outlines
Here are the standard descriptions of all the
:class:`~matplotlib.collections.Collection` kwargs:
%(Collection)s
A :class:`~matplotlib.collections.Collection` instance is
returned.
"""
if not self._hold: self.cla()
syms = { # a dict from symbol to (numsides, angle)
's' : (4,math.pi/4.0,0), # square
'o' : (20,3,0), # circle
'^' : (3,0,0), # triangle up
'>' : (3,math.pi/2.0,0), # triangle right
'v' : (3,math.pi,0), # triangle down
'<' : (3,3*math.pi/2.0,0), # triangle left
'd' : (4,0,0), # diamond
'p' : (5,0,0), # pentagram
'h' : (6,0,0), # hexagon
'8' : (8,0,0), # octagon
'+' : (4,0,2), # plus
'x' : (4,math.pi/4.0,2) # cross
}
self._process_unit_info(xdata=x, ydata=y, kwargs=kwargs)
x, y, s, c = cbook.delete_masked_points(x, y, s, c)
if is_string_like(c) or cbook.is_sequence_of_strings(c):
colors = mcolors.colorConverter.to_rgba_array(c, alpha)
else:
sh = np.shape(c)
# The inherent ambiguity is resolved in favor of color
# mapping, not interpretation as rgb or rgba:
if len(sh) == 1 and sh[0] == len(x):
colors = None # use cmap, norm after collection is created
else:
colors = mcolors.colorConverter.to_rgba_array(c, alpha)
if not iterable(s):
scales = (s,)
else:
scales = s
if faceted:
edgecolors = None
else:
edgecolors = 'none'
warnings.warn(
'''replace "faceted=False" with "edgecolors='none'"''',
DeprecationWarning) #2008/04/18
sym = None
symstyle = 0
# to be API compatible
if marker is None and not (verts is None):
marker = (verts, 0)
verts = None
if is_string_like(marker):
# the standard way to define symbols using a string character
sym = syms.get(marker)
if sym is None and verts is None:
raise ValueError('Unknown marker symbol to scatter')
numsides, rotation, symstyle = syms[marker]
elif iterable(marker):
# accept marker to be:
# (numsides, style, [angle])
# or
# (verts[], style, [angle])
if len(marker)<2 or len(marker)>3:
raise ValueError('Cannot create markersymbol from marker')
if cbook.is_numlike(marker[0]):
# (numsides, style, [angle])
if len(marker)==2:
numsides, rotation = marker[0], 0.
elif len(marker)==3:
numsides, rotation = marker[0], marker[2]
sym = True
if marker[1] in (1,2):
symstyle = marker[1]
else:
verts = np.asarray(marker[0])
if sym is not None:
if symstyle==0:
collection = mcoll.RegularPolyCollection(
numsides, rotation, scales,
facecolors = colors,
edgecolors = edgecolors,
linewidths = linewidths,
offsets = zip(x,y),
transOffset = self.transData,
)
elif symstyle==1:
collection = mcoll.StarPolygonCollection(
numsides, rotation, scales,
facecolors = colors,
edgecolors = edgecolors,
linewidths = linewidths,
offsets = zip(x,y),
transOffset = self.transData,
)
elif symstyle==2:
collection = mcoll.AsteriskPolygonCollection(
numsides, rotation, scales,
facecolors = colors,
edgecolors = edgecolors,
linewidths = linewidths,
offsets = zip(x,y),
transOffset = self.transData,
)
elif symstyle==3:
collection = mcoll.CircleCollection(
scales,
facecolors = colors,
edgecolors = edgecolors,
linewidths = linewidths,
offsets = zip(x,y),
transOffset = self.transData,
)
else:
rescale = np.sqrt(max(verts[:,0]**2+verts[:,1]**2))
verts /= rescale
collection = mcoll.PolyCollection(
(verts,), scales,
facecolors = colors,
edgecolors = edgecolors,
linewidths = linewidths,
offsets = zip(x,y),
transOffset = self.transData,
)
collection.set_transform(mtransforms.IdentityTransform())
collection.set_alpha(alpha)
collection.update(kwargs)
if colors is None:
if norm is not None: assert(isinstance(norm, mcolors.Normalize))
if cmap is not None: assert(isinstance(cmap, mcolors.Colormap))
collection.set_array(np.asarray(c))
collection.set_cmap(cmap)
collection.set_norm(norm)
if vmin is not None or vmax is not None:
collection.set_clim(vmin, vmax)
else:
collection.autoscale_None()
temp_x = x
temp_y = y
minx = np.amin(temp_x)
maxx = np.amax(temp_x)
miny = np.amin(temp_y)
maxy = np.amax(temp_y)
w = maxx-minx
h = maxy-miny
# the pad is a little hack to deal with the fact that we don't
# want to transform all the symbols whose scales are in points
# to data coords to get the exact bounding box for efficiency
# reasons. It can be done right if this is deemed important
padx, pady = 0.05*w, 0.05*h
corners = (minx-padx, miny-pady), (maxx+padx, maxy+pady)
self.update_datalim( corners)
self.autoscale_view()
# add the collection last
self.add_collection(collection)
return collection
scatter.__doc__ = cbook.dedent(scatter.__doc__) % martist.kwdocd
def hexbin(self, x, y, C = None, gridsize = 100, bins = None,
xscale = 'linear', yscale = 'linear',
cmap=None, norm=None, vmin=None, vmax=None,
alpha=1.0, linewidths=None, edgecolors='none',
reduce_C_function = np.mean,
**kwargs):
"""
call signature::
hexbin(x, y, C = None, gridsize = 100, bins = None,
xscale = 'linear', yscale = 'linear',
cmap=None, norm=None, vmin=None, vmax=None,
alpha=1.0, linewidths=None, edgecolors='none'
reduce_C_function = np.mean,
**kwargs)
Make a hexagonal binning plot of *x* versus *y*, where *x*,
*y* are 1-D sequences of the same length, *N*. If *C* is None
(the default), this is a histogram of the number of occurences
of the observations at (x[i],y[i]).
If *C* is specified, it specifies values at the coordinate
(x[i],y[i]). These values are accumulated for each hexagonal
bin and then reduced according to *reduce_C_function*, which
defaults to numpy's mean function (np.mean). (If *C* is
specified, it must also be a 1-D sequence of the same length
as *x* and *y*.)
*x*, *y* and/or *C* may be masked arrays, in which case only
unmasked points will be plotted.
Optional keyword arguments:
*gridsize*: [ 100 | integer ]
The number of hexagons in the *x*-direction, default is
100. The corresponding number of hexagons in the
*y*-direction is chosen such that the hexagons are
approximately regular. Alternatively, gridsize can be a
tuple with two elements specifying the number of hexagons
in the *x*-direction and the *y*-direction.
*bins*: [ None | 'log' | integer | sequence ]
If *None*, no binning is applied; the color of each hexagon
directly corresponds to its count value.
If 'log', use a logarithmic scale for the color
map. Internally, :math:`log_{10}(i+1)` is used to
determine the hexagon color.
If an integer, divide the counts in the specified number
of bins, and color the hexagons accordingly.
If a sequence of values, the values of the lower bound of
the bins to be used.
*xscale*: [ 'linear' | 'log' ]
Use a linear or log10 scale on the horizontal axis.
*scale*: [ 'linear' | 'log' ]
Use a linear or log10 scale on the vertical axis.
Other keyword arguments controlling color mapping and normalization
arguments:
*cmap*: [ None | Colormap ]
a :class:`matplotlib.cm.Colormap` instance. If *None*,
defaults to rc ``image.cmap``.
*norm*: [ None | Normalize ]
:class:`matplotlib.colors.Normalize` instance is used to
scale luminance data to 0,1.
*vmin*/*vmax*: scalar
*vmin* and *vmax* are used in conjunction with *norm* to normalize
luminance data. If either are *None*, the min and max of the color
array *C* is used. Note if you pass a norm instance, your settings
for *vmin* and *vmax* will be ignored.
*alpha*: scalar
the alpha value for the patches
*linewidths*: [ None | scalar ]
If *None*, defaults to rc lines.linewidth. Note that this
is a tuple, and if you set the linewidths argument you
must set it as a sequence of floats, as required by
:class:`~matplotlib.collections.RegularPolyCollection`.
Other keyword arguments controlling the Collection properties:
*edgecolors*: [ None | mpl color | color sequence ]
If 'none', draws the edges in the same color as the fill color.
This is the default, as it avoids unsightly unpainted pixels
between the hexagons.
If *None*, draws the outlines in the default color.
If a matplotlib color arg or sequence of rgba tuples, draws the
outlines in the specified color.
Here are the standard descriptions of all the
:class:`~matplotlib.collections.Collection` kwargs:
%(Collection)s
The return value is a
:class:`~matplotlib.collections.PolyCollection` instance; use
:meth:`~matplotlib.collection.PolyCollection.get_array` on
this :class:`~matplotlib.collections.PolyCollection` to get
the counts in each hexagon.
**Example:**
.. plot:: mpl_examples/pylab_examples/hexbin_demo.py
"""
if not self._hold: self.cla()
self._process_unit_info(xdata=x, ydata=y, kwargs=kwargs)
x, y, C = cbook.delete_masked_points(x, y, C)
# Set the size of the hexagon grid
if iterable(gridsize):
nx, ny = gridsize
else:
nx = gridsize
ny = int(nx/math.sqrt(3))
# Count the number of data in each hexagon
x = np.array(x, float)
y = np.array(y, float)
if xscale=='log':
x = np.log10(x)
if yscale=='log':
y = np.log10(y)
xmin = np.amin(x)
xmax = np.amax(x)
ymin = np.amin(y)
ymax = np.amax(y)
# In the x-direction, the hexagons exactly cover the region from
# xmin to xmax. Need some padding to avoid roundoff errors.
padding = 1.e-9 * (xmax - xmin)
xmin -= padding
xmax += padding
sx = (xmax-xmin) / nx
sy = (ymax-ymin) / ny
x = (x-xmin)/sx
y = (y-ymin)/sy
ix1 = np.round(x).astype(int)
iy1 = np.round(y).astype(int)
ix2 = np.floor(x).astype(int)
iy2 = np.floor(y).astype(int)
nx1 = nx + 1
ny1 = ny + 1
nx2 = nx
ny2 = ny
n = nx1*ny1+nx2*ny2
d1 = (x-ix1)**2 + 3.0 * (y-iy1)**2
d2 = (x-ix2-0.5)**2 + 3.0 * (y-iy2-0.5)**2
bdist = (d1<d2)
if C is None:
accum = np.zeros(n)
# Create appropriate views into "accum" array.
lattice1 = accum[:nx1*ny1]
lattice2 = accum[nx1*ny1:]
lattice1.shape = (nx1,ny1)
lattice2.shape = (nx2,ny2)
for i in xrange(len(x)):
if bdist[i]:
lattice1[ix1[i], iy1[i]]+=1
else:
lattice2[ix2[i], iy2[i]]+=1
else:
# create accumulation arrays
lattice1 = np.empty((nx1,ny1),dtype=object)
for i in xrange(nx1):
for j in xrange(ny1):
lattice1[i,j] = []
lattice2 = np.empty((nx2,ny2),dtype=object)
for i in xrange(nx2):
for j in xrange(ny2):
lattice2[i,j] = []
for i in xrange(len(x)):
if bdist[i]:
lattice1[ix1[i], iy1[i]].append( C[i] )
else:
lattice2[ix2[i], iy2[i]].append( C[i] )
for i in xrange(nx1):
for j in xrange(ny1):
vals = lattice1[i,j]
if len(vals):
lattice1[i,j] = reduce_C_function( vals )
else:
lattice1[i,j] = np.nan
for i in xrange(nx2):
for j in xrange(ny2):
vals = lattice2[i,j]
if len(vals):
lattice2[i,j] = reduce_C_function( vals )
else:
lattice2[i,j] = np.nan
accum = np.hstack((
lattice1.astype(float).ravel(), lattice2.astype(float).ravel()))
good_idxs = ~np.isnan(accum)
px = xmin + sx * np.array([ 0.5, 0.5, 0.0, -0.5, -0.5, 0.0])
py = ymin + sy * np.array([-0.5, 0.5, 1.0, 0.5, -0.5, -1.0]) / 3.0
polygons = np.zeros((6, n, 2), float)
polygons[:,:nx1*ny1,0] = np.repeat(np.arange(nx1), ny1)
polygons[:,:nx1*ny1,1] = np.tile(np.arange(ny1), nx1)
polygons[:,nx1*ny1:,0] = np.repeat(np.arange(nx2) + 0.5, ny2)
polygons[:,nx1*ny1:,1] = np.tile(np.arange(ny2), nx2) + 0.5
if C is not None:
# remove accumulation bins with no data
polygons = polygons[:,good_idxs,:]
accum = accum[good_idxs]
polygons = np.transpose(polygons, axes=[1,0,2])
polygons[:,:,0] *= sx
polygons[:,:,1] *= sy
polygons[:,:,0] += px
polygons[:,:,1] += py
if xscale=='log':
polygons[:,:,0] = 10**(polygons[:,:,0])
xmin = 10**xmin
xmax = 10**xmax
self.set_xscale('log')
if yscale=='log':
polygons[:,:,1] = 10**(polygons[:,:,1])
ymin = 10**ymin
ymax = 10**ymax
self.set_yscale('log')
if edgecolors=='none':
edgecolors = 'face'
collection = mcoll.PolyCollection(
polygons,
edgecolors = edgecolors,
linewidths = linewidths,
transOffset = self.transData,
)
# Transform accum if needed
if bins=='log':
accum = np.log10(accum+1)
elif bins!=None:
if not iterable(bins):
minimum, maximum = min(accum), max(accum)
bins-=1 # one less edge than bins
bins = minimum + (maximum-minimum)*np.arange(bins)/bins
bins = np.sort(bins)
accum = bins.searchsorted(accum)
if norm is not None: assert(isinstance(norm, mcolors.Normalize))
if cmap is not None: assert(isinstance(cmap, mcolors.Colormap))
collection.set_array(accum)
collection.set_cmap(cmap)
collection.set_norm(norm)
collection.set_alpha(alpha)
collection.update(kwargs)
if vmin is not None or vmax is not None:
collection.set_clim(vmin, vmax)
else:
collection.autoscale_None()
corners = ((xmin, ymin), (xmax, ymax))
self.update_datalim( corners)
self.autoscale_view()
# add the collection last
self.add_collection(collection)
return collection
hexbin.__doc__ = cbook.dedent(hexbin.__doc__) % martist.kwdocd
def arrow(self, x, y, dx, dy, **kwargs):
"""
call signature::
arrow(x, y, dx, dy, **kwargs)
Draws arrow on specified axis from (*x*, *y*) to (*x* + *dx*,
*y* + *dy*).
Optional kwargs control the arrow properties:
%(FancyArrow)s
**Example:**
.. plot:: mpl_examples/pylab_examples/arrow_demo.py
"""
a = mpatches.FancyArrow(x, y, dx, dy, **kwargs)
self.add_artist(a)
return a
arrow.__doc__ = cbook.dedent(arrow.__doc__) % martist.kwdocd
def quiverkey(self, *args, **kw):
qk = mquiver.QuiverKey(*args, **kw)
self.add_artist(qk)
return qk
quiverkey.__doc__ = mquiver.QuiverKey.quiverkey_doc
def quiver(self, *args, **kw):
if not self._hold: self.cla()
q = mquiver.Quiver(self, *args, **kw)
self.add_collection(q, False)
self.update_datalim(q.XY)
self.autoscale_view()
return q
quiver.__doc__ = mquiver.Quiver.quiver_doc
def barbs(self, *args, **kw):
"""
%(barbs_doc)s
**Example:**
.. plot:: mpl_examples/pylab_examples/barb_demo.py
"""
if not self._hold: self.cla()
b = mquiver.Barbs(self, *args, **kw)
self.add_collection(b)
self.update_datalim(b.get_offsets())
self.autoscale_view()
return b
barbs.__doc__ = cbook.dedent(barbs.__doc__) % {
'barbs_doc': mquiver.Barbs.barbs_doc}
def fill(self, *args, **kwargs):
"""
call signature::
fill(*args, **kwargs)
Plot filled polygons. *args* is a variable length argument,
allowing for multiple *x*, *y* pairs with an optional color
format string; see :func:`~matplotlib.pyplot.plot` for details
on the argument parsing. For example, to plot a polygon with
vertices at *x*, *y* in blue.::
ax.fill(x,y, 'b' )
An arbitrary number of *x*, *y*, *color* groups can be specified::
ax.fill(x1, y1, 'g', x2, y2, 'r')
Return value is a list of :class:`~matplotlib.patches.Patch`
instances that were added.
The same color strings that :func:`~matplotlib.pyplot.plot`
supports are supported by the fill format string.
If you would like to fill below a curve, eg. shade a region
between 0 and *y* along *x*, use :meth:`fill_between`
The *closed* kwarg will close the polygon when *True* (default).
kwargs control the Polygon properties:
%(Polygon)s
**Example:**
.. plot:: mpl_examples/pylab_examples/fill_demo.py
"""
if not self._hold: self.cla()
patches = []
for poly in self._get_patches_for_fill(*args, **kwargs):
self.add_patch( poly )
patches.append( poly )
self.autoscale_view()
return patches
fill.__doc__ = cbook.dedent(fill.__doc__) % martist.kwdocd
def fill_between(self, x, y1, y2=0, where=None, **kwargs):
"""
call signature::
fill_between(x, y1, y2=0, where=None, **kwargs)
Create a :class:`~matplotlib.collections.PolyCollection`
filling the regions between *y1* and *y2* where
``where==True``
*x*
an N length np array of the x data
*y1*
an N length scalar or np array of the x data
*y2*
an N length scalar or np array of the x data
*where*
if None, default to fill between everywhere. If not None,
it is a a N length numpy boolean array and the fill will
only happen over the regions where ``where==True``
*kwargs*
keyword args passed on to the :class:`PolyCollection`
kwargs control the Polygon properties:
%(PolyCollection)s
.. plot:: mpl_examples/pylab_examples/fill_between.py
"""
# Handle united data, such as dates
self._process_unit_info(xdata=x, ydata=y1, kwargs=kwargs)
self._process_unit_info(ydata=y2)
# Convert the arrays so we can work with them
x = np.asarray(self.convert_xunits(x))
y1 = np.asarray(self.convert_yunits(y1))
y2 = np.asarray(self.convert_yunits(y2))
if not cbook.iterable(y1):
y1 = np.ones_like(x)*y1
if not cbook.iterable(y2):
y2 = np.ones_like(x)*y2
if where is None:
where = np.ones(len(x), np.bool)
where = np.asarray(where)
assert( (len(x)==len(y1)) and (len(x)==len(y2)) and len(x)==len(where))
polys = []
for ind0, ind1 in mlab.contiguous_regions(where):
theseverts = []
xslice = x[ind0:ind1]
y1slice = y1[ind0:ind1]
y2slice = y2[ind0:ind1]
if not len(xslice):
continue
N = len(xslice)
X = np.zeros((2*N+2, 2), np.float)
# the purpose of the next two lines is for when y2 is a
# scalar like 0 and we want the fill to go all the way
# down to 0 even if none of the y1 sample points do
X[0] = xslice[0], y2slice[0]
X[N+1] = xslice[-1], y2slice[-1]
X[1:N+1,0] = xslice
X[1:N+1,1] = y1slice
X[N+2:,0] = xslice[::-1]
X[N+2:,1] = y2slice[::-1]
polys.append(X)
collection = mcoll.PolyCollection(polys, **kwargs)
# now update the datalim and autoscale
XY1 = np.array([x[where], y1[where]]).T
XY2 = np.array([x[where], y2[where]]).T
self.dataLim.update_from_data_xy(XY1, self.ignore_existing_data_limits,
updatex=True, updatey=True)
self.dataLim.update_from_data_xy(XY2, self.ignore_existing_data_limits,
updatex=False, updatey=True)
self.add_collection(collection)
self.autoscale_view()
return collection
fill_between.__doc__ = cbook.dedent(fill_between.__doc__) % martist.kwdocd
#### plotting z(x,y): imshow, pcolor and relatives, contour
def imshow(self, X, cmap=None, norm=None, aspect=None,
interpolation=None, alpha=1.0, vmin=None, vmax=None,
origin=None, extent=None, shape=None, filternorm=1,
filterrad=4.0, imlim=None, resample=None, url=None, **kwargs):
"""
call signature::
imshow(X, cmap=None, norm=None, aspect=None, interpolation=None,
alpha=1.0, vmin=None, vmax=None, origin=None, extent=None,
**kwargs)
Display the image in *X* to current axes. *X* may be a float
array, a uint8 array or a PIL image. If *X* is an array, *X*
can have the following shapes:
* MxN -- luminance (grayscale, float array only)
* MxNx3 -- RGB (float or uint8 array)
* MxNx4 -- RGBA (float or uint8 array)
The value for each component of MxNx3 and MxNx4 float arrays should be
in the range 0.0 to 1.0; MxN float arrays may be normalised.
An :class:`matplotlib.image.AxesImage` instance is returned.
Keyword arguments:
*cmap*: [ None | Colormap ]
A :class:`matplotlib.cm.Colormap` instance, eg. cm.jet.
If *None*, default to rc ``image.cmap`` value.
*cmap* is ignored when *X* has RGB(A) information
*aspect*: [ None | 'auto' | 'equal' | scalar ]
If 'auto', changes the image aspect ratio to match that of the axes
If 'equal', and *extent* is *None*, changes the axes
aspect ratio to match that of the image. If *extent* is
not *None*, the axes aspect ratio is changed to match that
of the extent.
If *None*, default to rc ``image.aspect`` value.
*interpolation*:
Acceptable values are *None*, 'nearest', 'bilinear',
'bicubic', 'spline16', 'spline36', 'hanning', 'hamming',
'hermite', 'kaiser', 'quadric', 'catrom', 'gaussian',
'bessel', 'mitchell', 'sinc', 'lanczos',
If *interpolation* is *None*, default to rc
``image.interpolation``. See also the *filternorm* and
*filterrad* parameters
*norm*: [ None | Normalize ]
An :class:`matplotlib.colors.Normalize` instance; if
*None*, default is ``normalization()``. This scales
luminance -> 0-1
*norm* is only used for an MxN float array.
*vmin*/*vmax*: [ None | scalar ]
Used to scale a luminance image to 0-1. If either is
*None*, the min and max of the luminance values will be
used. Note if *norm* is not *None*, the settings for
*vmin* and *vmax* will be ignored.
*alpha*: scalar
The alpha blending value, between 0 (transparent) and 1 (opaque)
*origin*: [ None | 'upper' | 'lower' ]
Place the [0,0] index of the array in the upper left or lower left
corner of the axes. If *None*, default to rc ``image.origin``.
*extent*: [ None | scalars (left, right, bottom, top) ]
Eata values of the axes. The default assigns zero-based row,
column indices to the *x*, *y* centers of the pixels.
*shape*: [ None | scalars (columns, rows) ]
For raw buffer images
*filternorm*:
A parameter for the antigrain image resize filter. From the
antigrain documentation, if *filternorm* = 1, the filter normalizes
integer values and corrects the rounding errors. It doesn't do
anything with the source floating point values, it corrects only
integers according to the rule of 1.0 which means that any sum of
pixel weights must be equal to 1.0. So, the filter function must
produce a graph of the proper shape.
*filterrad*:
The filter radius for filters that have a radius
parameter, i.e. when interpolation is one of: 'sinc',
'lanczos' or 'blackman'
Additional kwargs are :class:`~matplotlib.artist.Artist` properties:
%(Artist)s
**Example:**
.. plot:: mpl_examples/pylab_examples/image_demo.py
"""
if not self._hold: self.cla()
if norm is not None: assert(isinstance(norm, mcolors.Normalize))
if cmap is not None: assert(isinstance(cmap, mcolors.Colormap))
if aspect is None: aspect = rcParams['image.aspect']
self.set_aspect(aspect)
im = mimage.AxesImage(self, cmap, norm, interpolation, origin, extent,
filternorm=filternorm,
filterrad=filterrad, resample=resample, **kwargs)
im.set_data(X)
im.set_alpha(alpha)
self._set_artist_props(im)
im.set_clip_path(self.patch)
#if norm is None and shape is None:
# im.set_clim(vmin, vmax)
if vmin is not None or vmax is not None:
im.set_clim(vmin, vmax)
else:
im.autoscale_None()
im.set_url(url)
xmin, xmax, ymin, ymax = im.get_extent()
corners = (xmin, ymin), (xmax, ymax)
self.update_datalim(corners)
if self._autoscaleon:
self.set_xlim((xmin, xmax))
self.set_ylim((ymin, ymax))
self.images.append(im)
return im
imshow.__doc__ = cbook.dedent(imshow.__doc__) % martist.kwdocd
def _pcolorargs(self, funcname, *args):
if len(args)==1:
C = args[0]
numRows, numCols = C.shape
X, Y = np.meshgrid(np.arange(numCols+1), np.arange(numRows+1) )
elif len(args)==3:
X, Y, C = args
else:
raise TypeError(
'Illegal arguments to %s; see help(%s)' % (funcname, funcname))
Nx = X.shape[-1]
Ny = Y.shape[0]
if len(X.shape) <> 2 or X.shape[0] == 1:
x = X.reshape(1,Nx)
X = x.repeat(Ny, axis=0)
if len(Y.shape) <> 2 or Y.shape[1] == 1:
y = Y.reshape(Ny, 1)
Y = y.repeat(Nx, axis=1)
if X.shape != Y.shape:
raise TypeError(
'Incompatible X, Y inputs to %s; see help(%s)' % (
funcname, funcname))
return X, Y, C
def pcolor(self, *args, **kwargs):
"""
call signatures::
pcolor(C, **kwargs)
pcolor(X, Y, C, **kwargs)
Create a pseudocolor plot of a 2-D array.
*C* is the array of color values.
*X* and *Y*, if given, specify the (*x*, *y*) coordinates of
the colored quadrilaterals; the quadrilateral for C[i,j] has
corners at::
(X[i, j], Y[i, j]),
(X[i, j+1], Y[i, j+1]),
(X[i+1, j], Y[i+1, j]),
(X[i+1, j+1], Y[i+1, j+1]).
Ideally the dimensions of *X* and *Y* should be one greater
than those of *C*; if the dimensions are the same, then the
last row and column of *C* will be ignored.
Note that the the column index corresponds to the
*x*-coordinate, and the row index corresponds to *y*; for
details, see the :ref:`Grid Orientation
<axes-pcolor-grid-orientation>` section below.
If either or both of *X* and *Y* are 1-D arrays or column vectors,
they will be expanded as needed into the appropriate 2-D arrays,
making a rectangular grid.
*X*, *Y* and *C* may be masked arrays. If either C[i, j], or one
of the vertices surrounding C[i,j] (*X* or *Y* at [i, j], [i+1, j],
[i, j+1],[i+1, j+1]) is masked, nothing is plotted.
Keyword arguments:
*cmap*: [ None | Colormap ]
A :class:`matplotlib.cm.Colormap` instance. If *None*, use
rc settings.
norm: [ None | Normalize ]
An :class:`matplotlib.colors.Normalize` instance is used
to scale luminance data to 0,1. If *None*, defaults to
:func:`normalize`.
*vmin*/*vmax*: [ None | scalar ]
*vmin* and *vmax* are used in conjunction with *norm* to
normalize luminance data. If either are *None*, the min
and max of the color array *C* is used. If you pass a
*norm* instance, *vmin* and *vmax* will be ignored.
*shading*: [ 'flat' | 'faceted' ]
If 'faceted', a black grid is drawn around each rectangle; if
'flat', edges are not drawn. Default is 'flat', contrary to
Matlab(TM).
This kwarg is deprecated; please use 'edgecolors' instead:
* shading='flat' -- edgecolors='None'
* shading='faceted -- edgecolors='k'
*edgecolors*: [ None | 'None' | color | color sequence]
If *None*, the rc setting is used by default.
If 'None', edges will not be visible.
An mpl color or sequence of colors will set the edge color
*alpha*: 0 <= scalar <= 1
the alpha blending value
Return value is a :class:`matplotlib.collection.Collection`
instance.
.. _axes-pcolor-grid-orientation:
The grid orientation follows the Matlab(TM) convention: an
array *C* with shape (*nrows*, *ncolumns*) is plotted with
the column number as *X* and the row number as *Y*, increasing
up; hence it is plotted the way the array would be printed,
except that the *Y* axis is reversed. That is, *C* is taken
as *C*(*y*, *x*).
Similarly for :func:`~matplotlib.pyplot.meshgrid`::
x = np.arange(5)
y = np.arange(3)
X, Y = meshgrid(x,y)
is equivalent to:
X = array([[0, 1, 2, 3, 4],
[0, 1, 2, 3, 4],
[0, 1, 2, 3, 4]])
Y = array([[0, 0, 0, 0, 0],
[1, 1, 1, 1, 1],
[2, 2, 2, 2, 2]])
so if you have::
C = rand( len(x), len(y))
then you need::
pcolor(X, Y, C.T)
or::
pcolor(C.T)
Matlab :func:`pcolor` always discards the last row and column
of *C*, but matplotlib displays the last row and column if *X* and
*Y* are not specified, or if *X* and *Y* have one more row and
column than *C*.
kwargs can be used to control the
:class:`~matplotlib.collection.PolyCollection` properties:
%(PolyCollection)s
"""
if not self._hold: self.cla()
alpha = kwargs.pop('alpha', 1.0)
norm = kwargs.pop('norm', None)
cmap = kwargs.pop('cmap', None)
vmin = kwargs.pop('vmin', None)
vmax = kwargs.pop('vmax', None)
shading = kwargs.pop('shading', 'flat')
X, Y, C = self._pcolorargs('pcolor', *args)
Ny, Nx = X.shape
# convert to MA, if necessary.
C = ma.asarray(C)
X = ma.asarray(X)
Y = ma.asarray(Y)
mask = ma.getmaskarray(X)+ma.getmaskarray(Y)
xymask = mask[0:-1,0:-1]+mask[1:,1:]+mask[0:-1,1:]+mask[1:,0:-1]
# don't plot if C or any of the surrounding vertices are masked.
mask = ma.getmaskarray(C)[0:Ny-1,0:Nx-1]+xymask
newaxis = np.newaxis
compress = np.compress
ravelmask = (mask==0).ravel()
X1 = compress(ravelmask, ma.filled(X[0:-1,0:-1]).ravel())
Y1 = compress(ravelmask, ma.filled(Y[0:-1,0:-1]).ravel())
X2 = compress(ravelmask, ma.filled(X[1:,0:-1]).ravel())
Y2 = compress(ravelmask, ma.filled(Y[1:,0:-1]).ravel())
X3 = compress(ravelmask, ma.filled(X[1:,1:]).ravel())
Y3 = compress(ravelmask, ma.filled(Y[1:,1:]).ravel())
X4 = compress(ravelmask, ma.filled(X[0:-1,1:]).ravel())
Y4 = compress(ravelmask, ma.filled(Y[0:-1,1:]).ravel())
npoly = len(X1)
xy = np.concatenate((X1[:,newaxis], Y1[:,newaxis],
X2[:,newaxis], Y2[:,newaxis],
X3[:,newaxis], Y3[:,newaxis],
X4[:,newaxis], Y4[:,newaxis],
X1[:,newaxis], Y1[:,newaxis]),
axis=1)
verts = xy.reshape((npoly, 5, 2))
#verts = zip(zip(X1,Y1),zip(X2,Y2),zip(X3,Y3),zip(X4,Y4))
C = compress(ravelmask, ma.filled(C[0:Ny-1,0:Nx-1]).ravel())
if shading == 'faceted':
edgecolors = (0,0,0,1),
linewidths = (0.25,)
else:
edgecolors = 'face'
linewidths = (1.0,)
kwargs.setdefault('edgecolors', edgecolors)
kwargs.setdefault('antialiaseds', (0,))
kwargs.setdefault('linewidths', linewidths)
collection = mcoll.PolyCollection(verts, **kwargs)
collection.set_alpha(alpha)
collection.set_array(C)
if norm is not None: assert(isinstance(norm, mcolors.Normalize))
if cmap is not None: assert(isinstance(cmap, mcolors.Colormap))
collection.set_cmap(cmap)
collection.set_norm(norm)
if vmin is not None or vmax is not None:
collection.set_clim(vmin, vmax)
else:
collection.autoscale_None()
self.grid(False)
x = X.compressed()
y = Y.compressed()
minx = np.amin(x)
maxx = np.amax(x)
miny = np.amin(y)
maxy = np.amax(y)
corners = (minx, miny), (maxx, maxy)
self.update_datalim( corners)
self.autoscale_view()
self.add_collection(collection)
return collection
pcolor.__doc__ = cbook.dedent(pcolor.__doc__) % martist.kwdocd
def pcolormesh(self, *args, **kwargs):
"""
call signatures::
pcolormesh(C)
pcolormesh(X, Y, C)
pcolormesh(C, **kwargs)
*C* may be a masked array, but *X* and *Y* may not. Masked
array support is implemented via *cmap* and *norm*; in
contrast, :func:`~matplotlib.pyplot.pcolor` simply does not
draw quadrilaterals with masked colors or vertices.
Keyword arguments:
*cmap*: [ None | Colormap ]
A :class:`matplotlib.cm.Colormap` instance. If None, use
rc settings.
*norm*: [ None | Normalize ]
A :class:`matplotlib.colors.Normalize` instance is used to
scale luminance data to 0,1. If None, defaults to
:func:`normalize`.
*vmin*/*vmax*: [ None | scalar ]
*vmin* and *vmax* are used in conjunction with *norm* to
normalize luminance data. If either are *None*, the min
and max of the color array *C* is used. If you pass a
*norm* instance, *vmin* and *vmax* will be ignored.
*shading*: [ 'flat' | 'faceted' ]
If 'faceted', a black grid is drawn around each rectangle; if
'flat', edges are not drawn. Default is 'flat', contrary to
Matlab(TM).
This kwarg is deprecated; please use 'edgecolors' instead:
* shading='flat' -- edgecolors='None'
* shading='faceted -- edgecolors='k'
*edgecolors*: [ None | 'None' | color | color sequence]
If None, the rc setting is used by default.
If 'None', edges will not be visible.
An mpl color or sequence of colors will set the edge color
*alpha*: 0 <= scalar <= 1
the alpha blending value
Return value is a :class:`matplotlib.collection.QuadMesh`
object.
kwargs can be used to control the
:class:`matplotlib.collections.QuadMesh`
properties:
%(QuadMesh)s
.. seealso::
:func:`~matplotlib.pyplot.pcolor`:
For an explanation of the grid orientation and the
expansion of 1-D *X* and/or *Y* to 2-D arrays.
"""
if not self._hold: self.cla()
alpha = kwargs.pop('alpha', 1.0)
norm = kwargs.pop('norm', None)
cmap = kwargs.pop('cmap', None)
vmin = kwargs.pop('vmin', None)
vmax = kwargs.pop('vmax', None)
shading = kwargs.pop('shading', 'flat')
edgecolors = kwargs.pop('edgecolors', 'None')
antialiased = kwargs.pop('antialiased', False)
X, Y, C = self._pcolorargs('pcolormesh', *args)
Ny, Nx = X.shape
# convert to one dimensional arrays
C = ma.ravel(C[0:Ny-1, 0:Nx-1]) # data point in each cell is value at
# lower left corner
X = X.ravel()
Y = Y.ravel()
coords = np.zeros(((Nx * Ny), 2), dtype=float)
coords[:, 0] = X
coords[:, 1] = Y
if shading == 'faceted' or edgecolors != 'None':
showedges = 1
else:
showedges = 0
collection = mcoll.QuadMesh(
Nx - 1, Ny - 1, coords, showedges,
antialiased=antialiased) # kwargs are not used
collection.set_alpha(alpha)
collection.set_array(C)
if norm is not None: assert(isinstance(norm, mcolors.Normalize))
if cmap is not None: assert(isinstance(cmap, mcolors.Colormap))
collection.set_cmap(cmap)
collection.set_norm(norm)
if vmin is not None or vmax is not None:
collection.set_clim(vmin, vmax)
else:
collection.autoscale_None()
self.grid(False)
minx = np.amin(X)
maxx = np.amax(X)
miny = np.amin(Y)
maxy = np.amax(Y)
corners = (minx, miny), (maxx, maxy)
self.update_datalim( corners)
self.autoscale_view()
self.add_collection(collection)
return collection
pcolormesh.__doc__ = cbook.dedent(pcolormesh.__doc__) % martist.kwdocd
def pcolorfast(self, *args, **kwargs):
"""
pseudocolor plot of a 2-D array
Experimental; this is a version of pcolor that
does not draw lines, that provides the fastest
possible rendering with the Agg backend, and that
can handle any quadrilateral grid.
Call signatures::
pcolor(C, **kwargs)
pcolor(xr, yr, C, **kwargs)
pcolor(x, y, C, **kwargs)
pcolor(X, Y, C, **kwargs)
C is the 2D array of color values corresponding to quadrilateral
cells. Let (nr, nc) be its shape. C may be a masked array.
``pcolor(C, **kwargs)`` is equivalent to
``pcolor([0,nc], [0,nr], C, **kwargs)``
*xr*, *yr* specify the ranges of *x* and *y* corresponding to the
rectangular region bounding *C*. If::
xr = [x0, x1]
and::
yr = [y0,y1]
then *x* goes from *x0* to *x1* as the second index of *C* goes
from 0 to *nc*, etc. (*x0*, *y0*) is the outermost corner of
cell (0,0), and (*x1*, *y1*) is the outermost corner of cell
(*nr*-1, *nc*-1). All cells are rectangles of the same size.
This is the fastest version.
*x*, *y* are 1D arrays of length *nc* +1 and *nr* +1, respectively,
giving the x and y boundaries of the cells. Hence the cells are
rectangular but the grid may be nonuniform. The speed is
intermediate. (The grid is checked, and if found to be
uniform the fast version is used.)
*X* and *Y* are 2D arrays with shape (*nr* +1, *nc* +1) that specify
the (x,y) coordinates of the corners of the colored
quadrilaterals; the quadrilateral for C[i,j] has corners at
(X[i,j],Y[i,j]), (X[i,j+1],Y[i,j+1]), (X[i+1,j],Y[i+1,j]),
(X[i+1,j+1],Y[i+1,j+1]). The cells need not be rectangular.
This is the most general, but the slowest to render. It may
produce faster and more compact output using ps, pdf, and
svg backends, however.
Note that the the column index corresponds to the x-coordinate,
and the row index corresponds to y; for details, see
the "Grid Orientation" section below.
Optional keyword arguments:
*cmap*: [ None | Colormap ]
A cm Colormap instance from cm. If None, use rc settings.
*norm*: [ None | Normalize ]
An mcolors.Normalize instance is used to scale luminance data to
0,1. If None, defaults to normalize()
*vmin*/*vmax*: [ None | scalar ]
*vmin* and *vmax* are used in conjunction with norm to normalize
luminance data. If either are *None*, the min and max of the color
array *C* is used. If you pass a norm instance, *vmin* and *vmax*
will be *None*.
*alpha*: 0 <= scalar <= 1
the alpha blending value
Return value is an image if a regular or rectangular grid
is specified, and a QuadMesh collection in the general
quadrilateral case.
"""
if not self._hold: self.cla()
alpha = kwargs.pop('alpha', 1.0)
norm = kwargs.pop('norm', None)
cmap = kwargs.pop('cmap', None)
vmin = kwargs.pop('vmin', None)
vmax = kwargs.pop('vmax', None)
if norm is not None: assert(isinstance(norm, mcolors.Normalize))
if cmap is not None: assert(isinstance(cmap, mcolors.Colormap))
C = args[-1]
nr, nc = C.shape
if len(args) == 1:
style = "image"
x = [0, nc]
y = [0, nr]
elif len(args) == 3:
x, y = args[:2]
x = np.asarray(x)
y = np.asarray(y)
if x.ndim == 1 and y.ndim == 1:
if x.size == 2 and y.size == 2:
style = "image"
else:
dx = np.diff(x)
dy = np.diff(y)
if (np.ptp(dx) < 0.01*np.abs(dx.mean()) and
np.ptp(dy) < 0.01*np.abs(dy.mean())):
style = "image"
else:
style = "pcolorimage"
elif x.ndim == 2 and y.ndim == 2:
style = "quadmesh"
else:
raise TypeError("arguments do not match valid signatures")
else:
raise TypeError("need 1 argument or 3 arguments")
if style == "quadmesh":
# convert to one dimensional arrays
# This should also be moved to the QuadMesh class
C = ma.ravel(C) # data point in each cell is value
# at lower left corner
X = x.ravel()
Y = y.ravel()
Nx = nc+1
Ny = nr+1
# The following needs to be cleaned up; the renderer
# requires separate contiguous arrays for X and Y,
# but the QuadMesh class requires the 2D array.
coords = np.empty(((Nx * Ny), 2), np.float64)
coords[:, 0] = X
coords[:, 1] = Y
# The QuadMesh class can also be changed to
# handle relevant superclass kwargs; the initializer
# should do much more than it does now.
collection = mcoll.QuadMesh(nc, nr, coords, 0)
collection.set_alpha(alpha)
collection.set_array(C)
collection.set_cmap(cmap)
collection.set_norm(norm)
self.add_collection(collection)
xl, xr, yb, yt = X.min(), X.max(), Y.min(), Y.max()
ret = collection
else:
# One of the image styles:
xl, xr, yb, yt = x[0], x[-1], y[0], y[-1]
if style == "image":
im = mimage.AxesImage(self, cmap, norm,
interpolation='nearest',
origin='lower',
extent=(xl, xr, yb, yt),
**kwargs)
im.set_data(C)
im.set_alpha(alpha)
self.images.append(im)
ret = im
if style == "pcolorimage":
im = mimage.PcolorImage(self, x, y, C,
cmap=cmap,
norm=norm,
alpha=alpha,
**kwargs)
self.images.append(im)
ret = im
self._set_artist_props(ret)
if vmin is not None or vmax is not None:
ret.set_clim(vmin, vmax)
else:
ret.autoscale_None()
self.update_datalim(np.array([[xl, yb], [xr, yt]]))
self.autoscale_view(tight=True)
return ret
def contour(self, *args, **kwargs):
if not self._hold: self.cla()
kwargs['filled'] = False
return mcontour.ContourSet(self, *args, **kwargs)
contour.__doc__ = mcontour.ContourSet.contour_doc
def contourf(self, *args, **kwargs):
if not self._hold: self.cla()
kwargs['filled'] = True
return mcontour.ContourSet(self, *args, **kwargs)
contourf.__doc__ = mcontour.ContourSet.contour_doc
def clabel(self, CS, *args, **kwargs):
return CS.clabel(*args, **kwargs)
clabel.__doc__ = mcontour.ContourSet.clabel.__doc__
def table(self, **kwargs):
"""
call signature::
table(cellText=None, cellColours=None,
cellLoc='right', colWidths=None,
rowLabels=None, rowColours=None, rowLoc='left',
colLabels=None, colColours=None, colLoc='center',
loc='bottom', bbox=None):
Add a table to the current axes. Returns a
:class:`matplotlib.table.Table` instance. For finer grained
control over tables, use the :class:`~matplotlib.table.Table`
class and add it to the axes with
:meth:`~matplotlib.axes.Axes.add_table`.
Thanks to John Gill for providing the class and table.
kwargs control the :class:`~matplotlib.table.Table`
properties:
%(Table)s
"""
return mtable.table(self, **kwargs)
table.__doc__ = cbook.dedent(table.__doc__) % martist.kwdocd
def twinx(self):
"""
call signature::
ax = twinx()
create a twin of Axes for generating a plot with a sharex
x-axis but independent y axis. The y-axis of self will have
ticks on left and the returned axes will have ticks on the
right
"""
ax2 = self.figure.add_axes(self.get_position(True), sharex=self,
frameon=False)
ax2.yaxis.tick_right()
ax2.yaxis.set_label_position('right')
self.yaxis.tick_left()
return ax2
def twiny(self):
"""
call signature::
ax = twiny()
create a twin of Axes for generating a plot with a shared
y-axis but independent x axis. The x-axis of self will have
ticks on bottom and the returned axes will have ticks on the
top
"""
ax2 = self.figure.add_axes(self.get_position(True), sharey=self,
frameon=False)
ax2.xaxis.tick_top()
ax2.xaxis.set_label_position('top')
self.xaxis.tick_bottom()
return ax2
def get_shared_x_axes(self):
'Return a copy of the shared axes Grouper object for x axes'
return self._shared_x_axes
def get_shared_y_axes(self):
'Return a copy of the shared axes Grouper object for y axes'
return self._shared_y_axes
#### Data analysis
def hist(self, x, bins=10, range=None, normed=False, cumulative=False,
bottom=None, histtype='bar', align='mid',
orientation='vertical', rwidth=None, log=False, **kwargs):
"""
call signature::
hist(x, bins=10, range=None, normed=False, cumulative=False,
bottom=None, histtype='bar', align='mid',
orientation='vertical', rwidth=None, log=False, **kwargs)
Compute and draw the histogram of *x*. The return value is a
tuple (*n*, *bins*, *patches*) or ([*n0*, *n1*, ...], *bins*,
[*patches0*, *patches1*,...]) if the input contains multiple
data.
Keyword arguments:
*bins*:
Either an integer number of bins or a sequence giving the
bins. *x* are the data to be binned. *x* can be an array,
a 2D array with multiple data in its columns, or a list of
arrays with data of different length. Note, if *bins*
is an integer input argument=numbins, *bins* + 1 bin edges
will be returned, compatible with the semantics of
:func:`numpy.histogram` with the *new* = True argument.
Unequally spaced bins are supported if *bins* is a sequence.
*range*:
The lower and upper range of the bins. Lower and upper outliers
are ignored. If not provided, *range* is (x.min(), x.max()).
Range has no effect if *bins* is a sequence.
If *bins* is a sequence or *range* is specified, autoscaling is
set off (*autoscale_on* is set to *False*) and the xaxis limits
are set to encompass the full specified bin range.
*normed*:
If *True*, the first element of the return tuple will
be the counts normalized to form a probability density, i.e.,
``n/(len(x)*dbin)``. In a probability density, the integral of
the histogram should be 1; you can verify that with a
trapezoidal integration of the probability density function::
pdf, bins, patches = ax.hist(...)
print np.sum(pdf * np.diff(bins))
*cumulative*:
If *True*, then a histogram is computed where each bin
gives the counts in that bin plus all bins for smaller values.
The last bin gives the total number of datapoints. If *normed*
is also *True* then the histogram is normalized such that the
last bin equals 1. If *cumulative* evaluates to less than 0
(e.g. -1), the direction of accumulation is reversed. In this
case, if *normed* is also *True*, then the histogram is normalized
such that the first bin equals 1.
*histtype*: [ 'bar' | 'barstacked' | 'step' | 'stepfilled' ]
The type of histogram to draw.
- 'bar' is a traditional bar-type histogram. If multiple data
are given the bars are aranged side by side.
- 'barstacked' is a bar-type histogram where multiple
data are stacked on top of each other.
- 'step' generates a lineplot that is by default
unfilled.
- 'stepfilled' generates a lineplot that is by default
filled.
*align*: ['left' | 'mid' | 'right' ]
Controls how the histogram is plotted.
- 'left': bars are centered on the left bin edges.
- 'mid': bars are centered between the bin edges.
- 'right': bars are centered on the right bin edges.
*orientation*: [ 'horizontal' | 'vertical' ]
If 'horizontal', :func:`~matplotlib.pyplot.barh` will be
used for bar-type histograms and the *bottom* kwarg will be
the left edges.
*rwidth*:
The relative width of the bars as a fraction of the bin
width. If *None*, automatically compute the width. Ignored
if *histtype* = 'step' or 'stepfilled'.
*log*:
If *True*, the histogram axis will be set to a log scale.
If *log* is *True* and *x* is a 1D array, empty bins will
be filtered out and only the non-empty (*n*, *bins*,
*patches*) will be returned.
kwargs are used to update the properties of the hist
:class:`~matplotlib.patches.Rectangle` instances:
%(Rectangle)s
You can use labels for your histogram, and only the first
:class:`~matplotlib.patches.Rectangle` gets the label (the
others get the magic string '_nolegend_'. This will make the
histograms work in the intuitive way for bar charts::
ax.hist(10+2*np.random.randn(1000), label='men')
ax.hist(12+3*np.random.randn(1000), label='women', alpha=0.5)
ax.legend()
**Example:**
.. plot:: mpl_examples/pylab_examples/histogram_demo.py
"""
if not self._hold: self.cla()
# NOTE: the range keyword overwrites the built-in func range !!!
# needs to be fixed in with numpy !!!
if kwargs.get('width') is not None:
raise DeprecationWarning(
'hist now uses the rwidth to give relative width '
'and not absolute width')
try:
# make sure a copy is created: don't use asarray
x = np.transpose(np.array(x))
if len(x.shape)==1:
x.shape = (1,x.shape[0])
elif len(x.shape)==2 and x.shape[1]<x.shape[0]:
warnings.warn('2D hist should be nsamples x nvariables; '
'this looks transposed')
except ValueError:
# multiple hist with data of different length
if iterable(x[0]) and not is_string_like(x[0]):
tx = []
for i in xrange(len(x)):
tx.append( np.array(x[i]) )
x = tx
else:
raise ValueError, 'Can not use providet data to create a histogram'
# Check whether bins or range are given explicitly. In that
# case do not autoscale axes.
binsgiven = (cbook.iterable(bins) or range != None)
# check the version of the numpy
if np.__version__ < "1.3": # version 1.1 and 1.2
hist_kwargs = dict(range=range,
normed=bool(normed), new=True)
else: # version 1.3 and later, drop new=True
hist_kwargs = dict(range=range,
normed=bool(normed))
n = []
for i in xrange(len(x)):
# this will automatically overwrite bins,
# so that each histogram uses the same bins
m, bins = np.histogram(x[i], bins, **hist_kwargs)
n.append(m)
if cumulative:
slc = slice(None)
if cbook.is_numlike(cumulative) and cumulative < 0:
slc = slice(None,None,-1)
if normed:
n = [(m * np.diff(bins))[slc].cumsum()[slc] for m in n]
else:
n = [m[slc].cumsum()[slc] for m in n]
patches = []
if histtype.startswith('bar'):
totwidth = np.diff(bins)
stacked = False
if rwidth is not None: dr = min(1., max(0., rwidth))
elif len(n)>1: dr = 0.8
else: dr = 1.0
if histtype=='bar':
width = dr*totwidth/len(n)
dw = width
if len(n)>1:
boffset = -0.5*dr*totwidth*(1.-1./len(n))
else:
boffset = 0.0
elif histtype=='barstacked':
width = dr*totwidth
boffset, dw = 0.0, 0.0
stacked = True
else:
raise ValueError, 'invalid histtype: %s' % histtype
if align == 'mid' or align == 'edge':
boffset += 0.5*totwidth
elif align == 'right':
boffset += totwidth
elif align != 'left' and align != 'center':
raise ValueError, 'invalid align: %s' % align
if orientation == 'horizontal':
for m in n:
color = self._get_lines._get_next_cycle_color()
patch = self.barh(bins[:-1]+boffset, m, height=width,
left=bottom, align='center', log=log,
color=color)
patches.append(patch)
if stacked:
if bottom is None: bottom = 0.0
bottom += m
boffset += dw
elif orientation == 'vertical':
for m in n:
color = self._get_lines._get_next_cycle_color()
patch = self.bar(bins[:-1]+boffset, m, width=width,
bottom=bottom, align='center', log=log,
color=color)
patches.append(patch)
if stacked:
if bottom is None: bottom = 0.0
bottom += m
boffset += dw
else:
raise ValueError, 'invalid orientation: %s' % orientation
elif histtype.startswith('step'):
x = np.zeros( 2*len(bins), np.float )
y = np.zeros( 2*len(bins), np.float )
x[0::2], x[1::2] = bins, bins
if align == 'left' or align == 'center':
x -= 0.5*(bins[1]-bins[0])
elif align == 'right':
x += 0.5*(bins[1]-bins[0])
elif align != 'mid' and align != 'edge':
raise ValueError, 'invalid align: %s' % align
if log:
y[0],y[-1] = 1e-100, 1e-100
if orientation == 'horizontal':
self.set_xscale('log')
elif orientation == 'vertical':
self.set_yscale('log')
fill = False
if histtype == 'stepfilled':
fill = True
elif histtype != 'step':
raise ValueError, 'invalid histtype: %s' % histtype
for m in n:
y[1:-1:2], y[2::2] = m, m
if orientation == 'horizontal':
x,y = y,x
elif orientation != 'vertical':
raise ValueError, 'invalid orientation: %s' % orientation
color = self._get_lines._get_next_cycle_color()
if fill:
patches.append( self.fill(x, y,
closed=False, facecolor=color) )
else:
patches.append( self.fill(x, y,
closed=False, edgecolor=color, fill=False) )
# adopted from adjust_x/ylim part of the bar method
if orientation == 'horizontal':
xmin, xmax = 0, self.dataLim.intervalx[1]
for m in n:
xmin = np.amin(m[m!=0]) # filter out the 0 height bins
xmin = max(xmin*0.9, 1e-100)
self.dataLim.intervalx = (xmin, xmax)
elif orientation == 'vertical':
ymin, ymax = 0, self.dataLim.intervaly[1]
for m in n:
ymin = np.amin(m[m!=0]) # filter out the 0 height bins
ymin = max(ymin*0.9, 1e-100)
self.dataLim.intervaly = (ymin, ymax)
self.autoscale_view()
else:
raise ValueError, 'invalid histtype: %s' % histtype
label = kwargs.pop('label', '')
for patch in patches:
for p in patch:
p.update(kwargs)
p.set_label(label)
label = '_nolegend_'
if binsgiven:
self.set_autoscale_on(False)
if orientation == 'vertical':
self.autoscale_view(scalex=False, scaley=True)
XL = self.xaxis.get_major_locator().view_limits(bins[0], bins[-1])
self.set_xbound(XL)
else:
self.autoscale_view(scalex=True, scaley=False)
YL = self.yaxis.get_major_locator().view_limits(bins[0], bins[-1])
self.set_ybound(YL)
if len(n)==1:
return n[0], bins, cbook.silent_list('Patch', patches[0])
else:
return n, bins, cbook.silent_list('Lists of Patches', patches)
hist.__doc__ = cbook.dedent(hist.__doc__) % martist.kwdocd
def psd(self, x, NFFT=256, Fs=2, Fc=0, detrend=mlab.detrend_none,
window=mlab.window_hanning, noverlap=0, pad_to=None,
sides='default', scale_by_freq=None, **kwargs):
"""
call signature::
psd(x, NFFT=256, Fs=2, Fc=0, detrend=mlab.detrend_none,
window=mlab.window_hanning, noverlap=0, pad_to=None,
sides='default', scale_by_freq=None, **kwargs)
The power spectral density by Welch's average periodogram
method. The vector *x* is divided into *NFFT* length
segments. Each segment is detrended by function *detrend* and
windowed by function *window*. *noverlap* gives the length of
the overlap between segments. The :math:`|\mathrm{fft}(i)|^2`
of each segment :math:`i` are averaged to compute *Pxx*, with a
scaling to correct for power loss due to windowing. *Fs* is the
sampling frequency.
%(PSD)s
*Fc*: integer
The center frequency of *x* (defaults to 0), which offsets
the x extents of the plot to reflect the frequency range used
when a signal is acquired and then filtered and downsampled to
baseband.
Returns the tuple (*Pxx*, *freqs*).
For plotting, the power is plotted as
:math:`10\log_{10}(P_{xx})` for decibels, though *Pxx* itself
is returned.
References:
Bendat & Piersol -- Random Data: Analysis and Measurement
Procedures, John Wiley & Sons (1986)
kwargs control the :class:`~matplotlib.lines.Line2D` properties:
%(Line2D)s
**Example:**
.. plot:: mpl_examples/pylab_examples/psd_demo.py
"""
if not self._hold: self.cla()
pxx, freqs = mlab.psd(x, NFFT, Fs, detrend, window, noverlap, pad_to,
sides, scale_by_freq)
pxx.shape = len(freqs),
freqs += Fc
if scale_by_freq in (None, True):
psd_units = 'dB/Hz'
else:
psd_units = 'dB'
self.plot(freqs, 10*np.log10(pxx), **kwargs)
self.set_xlabel('Frequency')
self.set_ylabel('Power Spectral Density (%s)' % psd_units)
self.grid(True)
vmin, vmax = self.viewLim.intervaly
intv = vmax-vmin
logi = int(np.log10(intv))
if logi==0: logi=.1
step = 10*logi
#print vmin, vmax, step, intv, math.floor(vmin), math.ceil(vmax)+1
ticks = np.arange(math.floor(vmin), math.ceil(vmax)+1, step)
self.set_yticks(ticks)
return pxx, freqs
psd_doc_dict = dict()
psd_doc_dict.update(martist.kwdocd)
psd_doc_dict.update(mlab.kwdocd)
psd_doc_dict['PSD'] = cbook.dedent(psd_doc_dict['PSD'])
psd.__doc__ = cbook.dedent(psd.__doc__) % psd_doc_dict
def csd(self, x, y, NFFT=256, Fs=2, Fc=0, detrend=mlab.detrend_none,
window=mlab.window_hanning, noverlap=0, pad_to=None,
sides='default', scale_by_freq=None, **kwargs):
"""
call signature::
csd(x, y, NFFT=256, Fs=2, Fc=0, detrend=mlab.detrend_none,
window=mlab.window_hanning, noverlap=0, pad_to=None,
sides='default', scale_by_freq=None, **kwargs)
The cross spectral density :math:`P_{xy}` by Welch's average
periodogram method. The vectors *x* and *y* are divided into
*NFFT* length segments. Each segment is detrended by function
*detrend* and windowed by function *window*. The product of
the direct FFTs of *x* and *y* are averaged over each segment
to compute :math:`P_{xy}`, with a scaling to correct for power
loss due to windowing.
Returns the tuple (*Pxy*, *freqs*). *P* is the cross spectrum
(complex valued), and :math:`10\log_{10}|P_{xy}|` is
plotted.
%(PSD)s
*Fc*: integer
The center frequency of *x* (defaults to 0), which offsets
the x extents of the plot to reflect the frequency range used
when a signal is acquired and then filtered and downsampled to
baseband.
References:
Bendat & Piersol -- Random Data: Analysis and Measurement
Procedures, John Wiley & Sons (1986)
kwargs control the Line2D properties:
%(Line2D)s
**Example:**
.. plot:: mpl_examples/pylab_examples/csd_demo.py
.. seealso:
:meth:`psd`
For a description of the optional parameters.
"""
if not self._hold: self.cla()
pxy, freqs = mlab.csd(x, y, NFFT, Fs, detrend, window, noverlap,
pad_to, sides, scale_by_freq)
pxy.shape = len(freqs),
# pxy is complex
freqs += Fc
self.plot(freqs, 10*np.log10(np.absolute(pxy)), **kwargs)
self.set_xlabel('Frequency')
self.set_ylabel('Cross Spectrum Magnitude (dB)')
self.grid(True)
vmin, vmax = self.viewLim.intervaly
intv = vmax-vmin
step = 10*int(np.log10(intv))
ticks = np.arange(math.floor(vmin), math.ceil(vmax)+1, step)
self.set_yticks(ticks)
return pxy, freqs
csd.__doc__ = cbook.dedent(csd.__doc__) % psd_doc_dict
def cohere(self, x, y, NFFT=256, Fs=2, Fc=0, detrend=mlab.detrend_none,
window=mlab.window_hanning, noverlap=0, pad_to=None,
sides='default', scale_by_freq=None, **kwargs):
"""
call signature::
cohere(x, y, NFFT=256, Fs=2, Fc=0, detrend = mlab.detrend_none,
window = mlab.window_hanning, noverlap=0, pad_to=None,
sides='default', scale_by_freq=None, **kwargs)
cohere the coherence between *x* and *y*. Coherence is the normalized
cross spectral density:
.. math::
C_{xy} = \\frac{|P_{xy}|^2}{P_{xx}P_{yy}}
%(PSD)s
*Fc*: integer
The center frequency of *x* (defaults to 0), which offsets
the x extents of the plot to reflect the frequency range used
when a signal is acquired and then filtered and downsampled to
baseband.
The return value is a tuple (*Cxy*, *f*), where *f* are the
frequencies of the coherence vector.
kwargs are applied to the lines.
References:
* Bendat & Piersol -- Random Data: Analysis and Measurement
Procedures, John Wiley & Sons (1986)
kwargs control the :class:`~matplotlib.lines.Line2D`
properties of the coherence plot:
%(Line2D)s
**Example:**
.. plot:: mpl_examples/pylab_examples/cohere_demo.py
"""
if not self._hold: self.cla()
cxy, freqs = mlab.cohere(x, y, NFFT, Fs, detrend, window, noverlap,
scale_by_freq)
freqs += Fc
self.plot(freqs, cxy, **kwargs)
self.set_xlabel('Frequency')
self.set_ylabel('Coherence')
self.grid(True)
return cxy, freqs
cohere.__doc__ = cbook.dedent(cohere.__doc__) % psd_doc_dict
def specgram(self, x, NFFT=256, Fs=2, Fc=0, detrend=mlab.detrend_none,
window=mlab.window_hanning, noverlap=128,
cmap=None, xextent=None, pad_to=None, sides='default',
scale_by_freq=None):
"""
call signature::
specgram(x, NFFT=256, Fs=2, Fc=0, detrend=mlab.detrend_none,
window=mlab.window_hanning, noverlap=128,
cmap=None, xextent=None, pad_to=None, sides='default',
scale_by_freq=None)
Compute a spectrogram of data in *x*. Data are split into
*NFFT* length segments and the PSD of each section is
computed. The windowing function *window* is applied to each
segment, and the amount of overlap of each segment is
specified with *noverlap*.
%(PSD)s
*Fc*: integer
The center frequency of *x* (defaults to 0), which offsets
the y extents of the plot to reflect the frequency range used
when a signal is acquired and then filtered and downsampled to
baseband.
*cmap*:
A :class:`matplotlib.cm.Colormap` instance; if *None* use
default determined by rc
*xextent*:
The image extent along the x-axis. xextent = (xmin,xmax)
The default is (0,max(bins)), where bins is the return
value from :func:`mlab.specgram`
Return value is (*Pxx*, *freqs*, *bins*, *im*):
- *bins* are the time points the spectrogram is calculated over
- *freqs* is an array of frequencies
- *Pxx* is a len(times) x len(freqs) array of power
- *im* is a :class:`matplotlib.image.AxesImage` instance
Note: If *x* is real (i.e. non-complex), only the positive
spectrum is shown. If *x* is complex, both positive and
negative parts of the spectrum are shown. This can be
overridden using the *sides* keyword argument.
**Example:**
.. plot:: mpl_examples/pylab_examples/specgram_demo.py
"""
if not self._hold: self.cla()
Pxx, freqs, bins = mlab.specgram(x, NFFT, Fs, detrend,
window, noverlap, pad_to, sides, scale_by_freq)
Z = 10. * np.log10(Pxx)
Z = np.flipud(Z)
if xextent is None: xextent = 0, np.amax(bins)
xmin, xmax = xextent
freqs += Fc
extent = xmin, xmax, freqs[0], freqs[-1]
im = self.imshow(Z, cmap, extent=extent)
self.axis('auto')
return Pxx, freqs, bins, im
specgram.__doc__ = cbook.dedent(specgram.__doc__) % psd_doc_dict
del psd_doc_dict #So that this does not become an Axes attribute
def spy(self, Z, precision=0, marker=None, markersize=None,
aspect='equal', **kwargs):
"""
call signature::
spy(Z, precision=0, marker=None, markersize=None,
aspect='equal', **kwargs)
``spy(Z)`` plots the sparsity pattern of the 2-D array *Z*.
If *precision* is 0, any non-zero value will be plotted;
else, values of :math:`|Z| > precision` will be plotted.
For :class:`scipy.sparse.spmatrix` instances, there is a
special case: if *precision* is 'present', any value present in
the array will be plotted, even if it is identically zero.
The array will be plotted as it would be printed, with
the first index (row) increasing down and the second
index (column) increasing to the right.
By default aspect is 'equal', so that each array element
occupies a square space; set the aspect kwarg to 'auto'
to allow the plot to fill the plot box, or to any scalar
number to specify the aspect ratio of an array element
directly.
Two plotting styles are available: image or marker. Both
are available for full arrays, but only the marker style
works for :class:`scipy.sparse.spmatrix` instances.
If *marker* and *markersize* are *None*, an image will be
returned and any remaining kwargs are passed to
:func:`~matplotlib.pyplot.imshow`; else, a
:class:`~matplotlib.lines.Line2D` object will be returned with
the value of marker determining the marker type, and any
remaining kwargs passed to the
:meth:`~matplotlib.axes.Axes.plot` method.
If *marker* and *markersize* are *None*, useful kwargs include:
* *cmap*
* *alpha*
.. seealso::
:func:`~matplotlib.pyplot.imshow`
For controlling colors, e.g. cyan background and red marks,
use::
cmap = mcolors.ListedColormap(['c','r'])
If *marker* or *markersize* is not *None*, useful kwargs include:
* *marker*
* *markersize*
* *color*
Useful values for *marker* include:
* 's' square (default)
* 'o' circle
* '.' point
* ',' pixel
.. seealso::
:func:`~matplotlib.pyplot.plot`
"""
if precision is None:
precision = 0
warnings.DeprecationWarning("Use precision=0 instead of None")
# 2008/10/03
if marker is None and markersize is None and hasattr(Z, 'tocoo'):
marker = 's'
if marker is None and markersize is None:
Z = np.asarray(Z)
mask = np.absolute(Z)>precision
if 'cmap' not in kwargs:
kwargs['cmap'] = mcolors.ListedColormap(['w', 'k'],
name='binary')
nr, nc = Z.shape
extent = [-0.5, nc-0.5, nr-0.5, -0.5]
ret = self.imshow(mask, interpolation='nearest', aspect=aspect,
extent=extent, origin='upper', **kwargs)
else:
if hasattr(Z, 'tocoo'):
c = Z.tocoo()
if precision == 'present':
y = c.row
x = c.col
else:
nonzero = np.absolute(c.data) > precision
y = c.row[nonzero]
x = c.col[nonzero]
else:
Z = np.asarray(Z)
nonzero = np.absolute(Z)>precision
y, x = np.nonzero(nonzero)
if marker is None: marker = 's'
if markersize is None: markersize = 10
marks = mlines.Line2D(x, y, linestyle='None',
marker=marker, markersize=markersize, **kwargs)
self.add_line(marks)
nr, nc = Z.shape
self.set_xlim(xmin=-0.5, xmax=nc-0.5)
self.set_ylim(ymin=nr-0.5, ymax=-0.5)
self.set_aspect(aspect)
ret = marks
self.title.set_y(1.05)
self.xaxis.tick_top()
self.xaxis.set_ticks_position('both')
self.xaxis.set_major_locator(mticker.MaxNLocator(nbins=9,
steps=[1, 2, 5, 10],
integer=True))
self.yaxis.set_major_locator(mticker.MaxNLocator(nbins=9,
steps=[1, 2, 5, 10],
integer=True))
return ret
def matshow(self, Z, **kwargs):
'''
Plot a matrix or array as an image.
The matrix will be shown the way it would be printed,
with the first row at the top. Row and column numbering
is zero-based.
Argument:
*Z* anything that can be interpreted as a 2-D array
kwargs all are passed to :meth:`~matplotlib.axes.Axes.imshow`.
:meth:`matshow` sets defaults for *extent*, *origin*,
*interpolation*, and *aspect*; use care in overriding the
*extent* and *origin* kwargs, because they interact. (Also,
if you want to change them, you probably should be using
imshow directly in your own version of matshow.)
Returns: an :class:`matplotlib.image.AxesImage` instance.
'''
Z = np.asarray(Z)
nr, nc = Z.shape
extent = [-0.5, nc-0.5, nr-0.5, -0.5]
kw = {'extent': extent,
'origin': 'upper',
'interpolation': 'nearest',
'aspect': 'equal'} # (already the imshow default)
kw.update(kwargs)
im = self.imshow(Z, **kw)
self.title.set_y(1.05)
self.xaxis.tick_top()
self.xaxis.set_ticks_position('both')
self.xaxis.set_major_locator(mticker.MaxNLocator(nbins=9,
steps=[1, 2, 5, 10],
integer=True))
self.yaxis.set_major_locator(mticker.MaxNLocator(nbins=9,
steps=[1, 2, 5, 10],
integer=True))
return im
class SubplotBase:
"""
Base class for subplots, which are :class:`Axes` instances with
additional methods to facilitate generating and manipulating a set
of :class:`Axes` within a figure.
"""
def __init__(self, fig, *args, **kwargs):
"""
*fig* is a :class:`matplotlib.figure.Figure` instance.
*args* is the tuple (*numRows*, *numCols*, *plotNum*), where
the array of subplots in the figure has dimensions *numRows*,
*numCols*, and where *plotNum* is the number of the subplot
being created. *plotNum* starts at 1 in the upper left
corner and increases to the right.
If *numRows* <= *numCols* <= *plotNum* < 10, *args* can be the
decimal integer *numRows* * 100 + *numCols* * 10 + *plotNum*.
"""
self.figure = fig
if len(args)==1:
s = str(args[0])
if len(s) != 3:
raise ValueError('Argument to subplot must be a 3 digits long')
rows, cols, num = map(int, s)
elif len(args)==3:
rows, cols, num = args
else:
raise ValueError( 'Illegal argument to subplot')
total = rows*cols
num -= 1 # convert from matlab to python indexing
# ie num in range(0,total)
if num >= total:
raise ValueError( 'Subplot number exceeds total subplots')
self._rows = rows
self._cols = cols
self._num = num
self.update_params()
# _axes_class is set in the subplot_class_factory
self._axes_class.__init__(self, fig, self.figbox, **kwargs)
def get_geometry(self):
'get the subplot geometry, eg 2,2,3'
return self._rows, self._cols, self._num+1
# COVERAGE NOTE: Never used internally or from examples
def change_geometry(self, numrows, numcols, num):
'change subplot geometry, eg. from 1,1,1 to 2,2,3'
self._rows = numrows
self._cols = numcols
self._num = num-1
self.update_params()
self.set_position(self.figbox)
def update_params(self):
'update the subplot position from fig.subplotpars'
rows = self._rows
cols = self._cols
num = self._num
pars = self.figure.subplotpars
left = pars.left
right = pars.right
bottom = pars.bottom
top = pars.top
wspace = pars.wspace
hspace = pars.hspace
totWidth = right-left
totHeight = top-bottom
figH = totHeight/(rows + hspace*(rows-1))
sepH = hspace*figH
figW = totWidth/(cols + wspace*(cols-1))
sepW = wspace*figW
rowNum, colNum = divmod(num, cols)
figBottom = top - (rowNum+1)*figH - rowNum*sepH
figLeft = left + colNum*(figW + sepW)
self.figbox = mtransforms.Bbox.from_bounds(figLeft, figBottom,
figW, figH)
self.rowNum = rowNum
self.colNum = colNum
self.numRows = rows
self.numCols = cols
if 0:
print 'rcn', rows, cols, num
print 'lbrt', left, bottom, right, top
print 'self.figBottom', self.figBottom
print 'self.figLeft', self.figLeft
print 'self.figW', self.figW
print 'self.figH', self.figH
print 'self.rowNum', self.rowNum
print 'self.colNum', self.colNum
print 'self.numRows', self.numRows
print 'self.numCols', self.numCols
def is_first_col(self):
return self.colNum==0
def is_first_row(self):
return self.rowNum==0
def is_last_row(self):
return self.rowNum==self.numRows-1
def is_last_col(self):
return self.colNum==self.numCols-1
# COVERAGE NOTE: Never used internally or from examples
def label_outer(self):
"""
set the visible property on ticklabels so xticklabels are
visible only if the subplot is in the last row and yticklabels
are visible only if the subplot is in the first column
"""
lastrow = self.is_last_row()
firstcol = self.is_first_col()
for label in self.get_xticklabels():
label.set_visible(lastrow)
for label in self.get_yticklabels():
label.set_visible(firstcol)
_subplot_classes = {}
def subplot_class_factory(axes_class=None):
# This makes a new class that inherits from SubclassBase and the
# given axes_class (which is assumed to be a subclass of Axes).
# This is perhaps a little bit roundabout to make a new class on
# the fly like this, but it means that a new Subplot class does
# not have to be created for every type of Axes.
if axes_class is None:
axes_class = Axes
new_class = _subplot_classes.get(axes_class)
if new_class is None:
new_class = new.classobj("%sSubplot" % (axes_class.__name__),
(SubplotBase, axes_class),
{'_axes_class': axes_class})
_subplot_classes[axes_class] = new_class
return new_class
# This is provided for backward compatibility
Subplot = subplot_class_factory()
martist.kwdocd['Axes'] = martist.kwdocd['Subplot'] = martist.kwdoc(Axes)
"""
# this is some discarded code I was using to find the minimum positive
# data point for some log scaling fixes. I realized there was a
# cleaner way to do it, but am keeping this around as an example for
# how to get the data out of the axes. Might want to make something
# like this a method one day, or better yet make get_verts an Artist
# method
minx, maxx = self.get_xlim()
if minx<=0 or maxx<=0:
# find the min pos value in the data
xs = []
for line in self.lines:
xs.extend(line.get_xdata(orig=False))
for patch in self.patches:
xs.extend([x for x,y in patch.get_verts()])
for collection in self.collections:
xs.extend([x for x,y in collection.get_verts()])
posx = [x for x in xs if x>0]
if len(posx):
minx = min(posx)
maxx = max(posx)
# warning, probably breaks inverted axis
self.set_xlim((0.1*minx, maxx))
"""
|
agpl-3.0
|
AlexanderFabisch/scikit-learn
|
benchmarks/bench_isotonic.py
|
268
|
3046
|
"""
Benchmarks of isotonic regression performance.
We generate a synthetic dataset of size 10^n, for n in [min, max], and
examine the time taken to run isotonic regression over the dataset.
The timings are then output to stdout, or visualized on a log-log scale
with matplotlib.
This alows the scaling of the algorithm with the problem size to be
visualized and understood.
"""
from __future__ import print_function
import numpy as np
import gc
from datetime import datetime
from sklearn.isotonic import isotonic_regression
from sklearn.utils.bench import total_seconds
import matplotlib.pyplot as plt
import argparse
def generate_perturbed_logarithm_dataset(size):
return np.random.randint(-50, 50, size=n) \
+ 50. * np.log(1 + np.arange(n))
def generate_logistic_dataset(size):
X = np.sort(np.random.normal(size=size))
return np.random.random(size=size) < 1.0 / (1.0 + np.exp(-X))
DATASET_GENERATORS = {
'perturbed_logarithm': generate_perturbed_logarithm_dataset,
'logistic': generate_logistic_dataset
}
def bench_isotonic_regression(Y):
"""
Runs a single iteration of isotonic regression on the input data,
and reports the total time taken (in seconds).
"""
gc.collect()
tstart = datetime.now()
isotonic_regression(Y)
delta = datetime.now() - tstart
return total_seconds(delta)
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description="Isotonic Regression benchmark tool")
parser.add_argument('--iterations', type=int, required=True,
help="Number of iterations to average timings over "
"for each problem size")
parser.add_argument('--log_min_problem_size', type=int, required=True,
help="Base 10 logarithm of the minimum problem size")
parser.add_argument('--log_max_problem_size', type=int, required=True,
help="Base 10 logarithm of the maximum problem size")
parser.add_argument('--show_plot', action='store_true',
help="Plot timing output with matplotlib")
parser.add_argument('--dataset', choices=DATASET_GENERATORS.keys(),
required=True)
args = parser.parse_args()
timings = []
for exponent in range(args.log_min_problem_size,
args.log_max_problem_size):
n = 10 ** exponent
Y = DATASET_GENERATORS[args.dataset](n)
time_per_iteration = \
[bench_isotonic_regression(Y) for i in range(args.iterations)]
timing = (n, np.mean(time_per_iteration))
timings.append(timing)
# If we're not plotting, dump the timing to stdout
if not args.show_plot:
print(n, np.mean(time_per_iteration))
if args.show_plot:
plt.plot(*zip(*timings))
plt.title("Average time taken running isotonic regression")
plt.xlabel('Number of observations')
plt.ylabel('Time (s)')
plt.axis('tight')
plt.loglog()
plt.show()
|
bsd-3-clause
|
luo66/scikit-learn
|
examples/svm/plot_rbf_parameters.py
|
132
|
8096
|
'''
==================
RBF SVM parameters
==================
This example illustrates the effect of the parameters ``gamma`` and ``C`` of
the Radial Basis Function (RBF) kernel SVM.
Intuitively, the ``gamma`` parameter defines how far the influence of a single
training example reaches, with low values meaning 'far' and high values meaning
'close'. The ``gamma`` parameters can be seen as the inverse of the radius of
influence of samples selected by the model as support vectors.
The ``C`` parameter trades off misclassification of training examples against
simplicity of the decision surface. A low ``C`` makes the decision surface
smooth, while a high ``C`` aims at classifying all training examples correctly
by giving the model freedom to select more samples as support vectors.
The first plot is a visualization of the decision function for a variety of
parameter values on a simplified classification problem involving only 2 input
features and 2 possible target classes (binary classification). Note that this
kind of plot is not possible to do for problems with more features or target
classes.
The second plot is a heatmap of the classifier's cross-validation accuracy as a
function of ``C`` and ``gamma``. For this example we explore a relatively large
grid for illustration purposes. In practice, a logarithmic grid from
:math:`10^{-3}` to :math:`10^3` is usually sufficient. If the best parameters
lie on the boundaries of the grid, it can be extended in that direction in a
subsequent search.
Note that the heat map plot has a special colorbar with a midpoint value close
to the score values of the best performing models so as to make it easy to tell
them appart in the blink of an eye.
The behavior of the model is very sensitive to the ``gamma`` parameter. If
``gamma`` is too large, the radius of the area of influence of the support
vectors only includes the support vector itself and no amount of
regularization with ``C`` will be able to prevent overfitting.
When ``gamma`` is very small, the model is too constrained and cannot capture
the complexity or "shape" of the data. The region of influence of any selected
support vector would include the whole training set. The resulting model will
behave similarly to a linear model with a set of hyperplanes that separate the
centers of high density of any pair of two classes.
For intermediate values, we can see on the second plot that good models can
be found on a diagonal of ``C`` and ``gamma``. Smooth models (lower ``gamma``
values) can be made more complex by selecting a larger number of support
vectors (larger ``C`` values) hence the diagonal of good performing models.
Finally one can also observe that for some intermediate values of ``gamma`` we
get equally performing models when ``C`` becomes very large: it is not
necessary to regularize by limiting the number of support vectors. The radius of
the RBF kernel alone acts as a good structural regularizer. In practice though
it might still be interesting to limit the number of support vectors with a
lower value of ``C`` so as to favor models that use less memory and that are
faster to predict.
We should also note that small differences in scores results from the random
splits of the cross-validation procedure. Those spurious variations can be
smoothed out by increasing the number of CV iterations ``n_iter`` at the
expense of compute time. Increasing the value number of ``C_range`` and
``gamma_range`` steps will increase the resolution of the hyper-parameter heat
map.
'''
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.colors import Normalize
from sklearn.svm import SVC
from sklearn.preprocessing import StandardScaler
from sklearn.datasets import load_iris
from sklearn.cross_validation import StratifiedShuffleSplit
from sklearn.grid_search import GridSearchCV
# Utility function to move the midpoint of a colormap to be around
# the values of interest.
class MidpointNormalize(Normalize):
def __init__(self, vmin=None, vmax=None, midpoint=None, clip=False):
self.midpoint = midpoint
Normalize.__init__(self, vmin, vmax, clip)
def __call__(self, value, clip=None):
x, y = [self.vmin, self.midpoint, self.vmax], [0, 0.5, 1]
return np.ma.masked_array(np.interp(value, x, y))
##############################################################################
# Load and prepare data set
#
# dataset for grid search
iris = load_iris()
X = iris.data
y = iris.target
# Dataset for decision function visualization: we only keep the first two
# features in X and sub-sample the dataset to keep only 2 classes and
# make it a binary classification problem.
X_2d = X[:, :2]
X_2d = X_2d[y > 0]
y_2d = y[y > 0]
y_2d -= 1
# It is usually a good idea to scale the data for SVM training.
# We are cheating a bit in this example in scaling all of the data,
# instead of fitting the transformation on the training set and
# just applying it on the test set.
scaler = StandardScaler()
X = scaler.fit_transform(X)
X_2d = scaler.fit_transform(X_2d)
##############################################################################
# Train classifiers
#
# For an initial search, a logarithmic grid with basis
# 10 is often helpful. Using a basis of 2, a finer
# tuning can be achieved but at a much higher cost.
C_range = np.logspace(-2, 10, 13)
gamma_range = np.logspace(-9, 3, 13)
param_grid = dict(gamma=gamma_range, C=C_range)
cv = StratifiedShuffleSplit(y, n_iter=5, test_size=0.2, random_state=42)
grid = GridSearchCV(SVC(), param_grid=param_grid, cv=cv)
grid.fit(X, y)
print("The best parameters are %s with a score of %0.2f"
% (grid.best_params_, grid.best_score_))
# Now we need to fit a classifier for all parameters in the 2d version
# (we use a smaller set of parameters here because it takes a while to train)
C_2d_range = [1e-2, 1, 1e2]
gamma_2d_range = [1e-1, 1, 1e1]
classifiers = []
for C in C_2d_range:
for gamma in gamma_2d_range:
clf = SVC(C=C, gamma=gamma)
clf.fit(X_2d, y_2d)
classifiers.append((C, gamma, clf))
##############################################################################
# visualization
#
# draw visualization of parameter effects
plt.figure(figsize=(8, 6))
xx, yy = np.meshgrid(np.linspace(-3, 3, 200), np.linspace(-3, 3, 200))
for (k, (C, gamma, clf)) in enumerate(classifiers):
# evaluate decision function in a grid
Z = clf.decision_function(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
# visualize decision function for these parameters
plt.subplot(len(C_2d_range), len(gamma_2d_range), k + 1)
plt.title("gamma=10^%d, C=10^%d" % (np.log10(gamma), np.log10(C)),
size='medium')
# visualize parameter's effect on decision function
plt.pcolormesh(xx, yy, -Z, cmap=plt.cm.RdBu)
plt.scatter(X_2d[:, 0], X_2d[:, 1], c=y_2d, cmap=plt.cm.RdBu_r)
plt.xticks(())
plt.yticks(())
plt.axis('tight')
# plot the scores of the grid
# grid_scores_ contains parameter settings and scores
# We extract just the scores
scores = [x[1] for x in grid.grid_scores_]
scores = np.array(scores).reshape(len(C_range), len(gamma_range))
# Draw heatmap of the validation accuracy as a function of gamma and C
#
# The score are encoded as colors with the hot colormap which varies from dark
# red to bright yellow. As the most interesting scores are all located in the
# 0.92 to 0.97 range we use a custom normalizer to set the mid-point to 0.92 so
# as to make it easier to visualize the small variations of score values in the
# interesting range while not brutally collapsing all the low score values to
# the same color.
plt.figure(figsize=(8, 6))
plt.subplots_adjust(left=.2, right=0.95, bottom=0.15, top=0.95)
plt.imshow(scores, interpolation='nearest', cmap=plt.cm.hot,
norm=MidpointNormalize(vmin=0.2, midpoint=0.92))
plt.xlabel('gamma')
plt.ylabel('C')
plt.colorbar()
plt.xticks(np.arange(len(gamma_range)), gamma_range, rotation=45)
plt.yticks(np.arange(len(C_range)), C_range)
plt.title('Validation accuracy')
plt.show()
|
bsd-3-clause
|
leal26/pyXFOIL
|
examples/2D/flight_conditions/convergence_study.py
|
2
|
4724
|
import pickle
import numpy as np
import pandas as pd
import seaborn as sns
from scipy import interpolate
import matplotlib.pyplot as plt
from sklearn.cluster import KMeans
from sklearn.metrics import pairwise_distances_argmin_min
import aeropy.xfoil_module as xf
from aeropy.aero_module import Reynolds
from aeropy.geometry.airfoil import CST, create_x
import scipy.io
import numpy as np
import matplotlib.pyplot as plt
from scipy.integrate import simps
from weather.scraper.flight_conditions import properties, Airframe
def expected(data, airFrame):
# data = data[data[:,0].argsort()]
alpha, velocity, lift_to_drag = data
pdf = airFrame.pdf.score_samples(np.vstack([alpha.ravel(), velocity.ravel()]).T)
pdf = np.exp(pdf.reshape(lift_to_drag.shape))
expected_value = 0
numerator_list = []
denominator_list = []
N = len(alpha.ravel())
# V = 12*45
V = 1
total_pdf = sum(pdf)
for i in range(len(lift_to_drag)):
if lift_to_drag[i] is not None:
expected_value += (V/total_pdf)*pdf[i]*lift_to_drag[i]
return(expected_value)
# Define object
C172 = pickle.load(open('C172.p', 'rb'))
airfoil_database = pickle.load(open('../2D/fitting.p', 'rb'))
Al_database = np.array(airfoil_database['Al'])
Au_database = np.array(airfoil_database['Au'])
dl_database = np.array(airfoil_database['dl'])
du_database = np.array(airfoil_database['du'])
airfoil = 'from_database_3'
altitude = 10000
chord = 1
grids = [160, 180, 200]
data = {'Mean':[], 'STD':[], 'grids': grids}
j = 1105
print(j, airfoil_database['names'][j])
for n in grids:
expected_data = []
for k in range(7):
# Plot histograms
parameters = []
for i in range(n):
sample = C172.pdf.sample(1)
while sample[0][0]<0 or sample[0][0]>12:
sample = C172.pdf.sample(1)
parameters.append(sample)
C172.samples = np.array(parameters)
[AOAs, velocities] = C172.samples.T
AOAs = AOAs[0]
velocities = velocities[0]
# data[-1]['V'].append(velocities)
# data[-1]['AOA'].append(AOAs)
Au = Au_database[j, :]
Al = Al_database[j, :]
x = create_x(1., distribution = 'linear')
y = CST(x, chord, deltasz=[du_database[j], dl_database[j]],
Al=Al, Au=Au)
xf.create_input(x, y['u'], y['l'], airfoil, different_x_upper_lower = False)
LDs = []
for i in range(len(AOAs)):
AOA = AOAs[i]
V = velocities[i]
try:
Data = xf.find_coefficients(airfoil, AOA,
Reynolds=Reynolds(10000, V, chord),
iteration=100, NACA=False,
delete=True)
lift_drag_ratio = Data['CL']/Data['CD']
except:
lift_drag_ratio = None
increment = 0.1
conv_counter = 0
while lift_drag_ratio is None and conv_counter <2:
print(increment)
Data_f = xf.find_coefficients(airfoil, AOA*(1+increment),
Reynolds=Reynolds(10000, V*(1+increment), chord),
iteration=100, NACA=False,
delete=True)
Data_b = xf.find_coefficients(airfoil, AOA*(1-increment),
Reynolds=Reynolds(10000, V*(1-increment), chord),
iteration=100, NACA=False,
delete=True)
print(Data_f['CL'], Data_f['CD'])
print(Data_b['CL'], Data_b['CD'])
try:
lift_drag_ratio = .5*(Data_f['CL']/Data_f['CD'] +
Data_b['CL']/Data_b['CD'])
except(TypeError):
increment += 0.1
conv_counter += 1
print(i, AOA, V, lift_drag_ratio)
LDs.append(lift_drag_ratio)
data_i = np.array([AOAs.flatten(), velocities.flatten(),
LDs])
expected_data.append(expected(data_i, C172))
data['Mean'].append(np.mean(np.array(expected_data)))
data['STD'].append(np.std(np.array(expected_data)))
print(data)
df = pd.DataFrame(data)
df.to_pickle('convergence.p')
plt.figure()
plt.errorbar(grids, data['Mean'], data['STD'], linestyle='None', capsize = 2, marker = ".")
plt.xlabel('Sample size')
plt.ylabel('Expected value')
plt.show()
|
mit
|
fmaguire/BayeHem
|
Spearmint/spearmint/tests/models/in_progress/gp.py
|
2
|
15806
|
# -*- coding: utf-8 -*-
# Spearmint
#
# Academic and Non-Commercial Research Use Software License and Terms
# of Use
#
# Spearmint is a software package to perform Bayesian optimization
# according to specific algorithms (the “Software”). The Software is
# designed to automatically run experiments (thus the code name
# 'spearmint') in a manner that iteratively adjusts a number of
# parameters so as to minimize some objective in as few runs as
# possible.
#
# The Software was developed by Ryan P. Adams, Michael Gelbart, and
# Jasper Snoek at Harvard University, Kevin Swersky at the
# University of Toronto (“Toronto”), and Hugo Larochelle at the
# Université de Sherbrooke (“Sherbrooke”), which assigned its rights
# in the Software to Socpra Sciences et Génie
# S.E.C. (“Socpra”). Pursuant to an inter-institutional agreement
# between the parties, it is distributed for free academic and
# non-commercial research use by the President and Fellows of Harvard
# College (“Harvard”).
#
# Using the Software indicates your agreement to be bound by the terms
# of this Software Use Agreement (“Agreement”). Absent your agreement
# to the terms below, you (the “End User”) have no rights to hold or
# use the Software whatsoever.
#
# Harvard agrees to grant hereunder the limited non-exclusive license
# to End User for the use of the Software in the performance of End
# User’s internal, non-commercial research and academic use at End
# User’s academic or not-for-profit research institution
# (“Institution”) on the following terms and conditions:
#
# 1. NO REDISTRIBUTION. The Software remains the property Harvard,
# Toronto and Socpra, and except as set forth in Section 4, End User
# shall not publish, distribute, or otherwise transfer or make
# available the Software to any other party.
#
# 2. NO COMMERCIAL USE. End User shall not use the Software for
# commercial purposes and any such use of the Software is expressly
# prohibited. This includes, but is not limited to, use of the
# Software in fee-for-service arrangements, core facilities or
# laboratories or to provide research services to (or in collaboration
# with) third parties for a fee, and in industry-sponsored
# collaborative research projects where any commercial rights are
# granted to the sponsor. If End User wishes to use the Software for
# commercial purposes or for any other restricted purpose, End User
# must execute a separate license agreement with Harvard.
#
# Requests for use of the Software for commercial purposes, please
# contact:
#
# Office of Technology Development
# Harvard University
# Smith Campus Center, Suite 727E
# 1350 Massachusetts Avenue
# Cambridge, MA 02138 USA
# Telephone: (617) 495-3067
# Facsimile: (617) 495-9568
# E-mail: otd@harvard.edu
#
# 3. OWNERSHIP AND COPYRIGHT NOTICE. Harvard, Toronto and Socpra own
# all intellectual property in the Software. End User shall gain no
# ownership to the Software. End User shall not remove or delete and
# shall retain in the Software, in any modifications to Software and
# in any Derivative Works, the copyright, trademark, or other notices
# pertaining to Software as provided with the Software.
#
# 4. DERIVATIVE WORKS. End User may create and use Derivative Works,
# as such term is defined under U.S. copyright laws, provided that any
# such Derivative Works shall be restricted to non-commercial,
# internal research and academic use at End User’s Institution. End
# User may distribute Derivative Works to other Institutions solely
# for the performance of non-commercial, internal research and
# academic use on terms substantially similar to this License and
# Terms of Use.
#
# 5. FEEDBACK. In order to improve the Software, comments from End
# Users may be useful. End User agrees to provide Harvard with
# feedback on the End User’s use of the Software (e.g., any bugs in
# the Software, the user experience, etc.). Harvard is permitted to
# use such information provided by End User in making changes and
# improvements to the Software without compensation or an accounting
# to End User.
#
# 6. NON ASSERT. End User acknowledges that Harvard, Toronto and/or
# Sherbrooke or Socpra may develop modifications to the Software that
# may be based on the feedback provided by End User under Section 5
# above. Harvard, Toronto and Sherbrooke/Socpra shall not be
# restricted in any way by End User regarding their use of such
# information. End User acknowledges the right of Harvard, Toronto
# and Sherbrooke/Socpra to prepare, publish, display, reproduce,
# transmit and or use modifications to the Software that may be
# substantially similar or functionally equivalent to End User’s
# modifications and/or improvements if any. In the event that End
# User obtains patent protection for any modification or improvement
# to Software, End User agrees not to allege or enjoin infringement of
# End User’s patent against Harvard, Toronto or Sherbrooke or Socpra,
# or any of the researchers, medical or research staff, officers,
# directors and employees of those institutions.
#
# 7. PUBLICATION & ATTRIBUTION. End User has the right to publish,
# present, or share results from the use of the Software. In
# accordance with customary academic practice, End User will
# acknowledge Harvard, Toronto and Sherbrooke/Socpra as the providers
# of the Software and may cite the relevant reference(s) from the
# following list of publications:
#
# Practical Bayesian Optimization of Machine Learning Algorithms
# Jasper Snoek, Hugo Larochelle and Ryan Prescott Adams
# Neural Information Processing Systems, 2012
#
# Multi-Task Bayesian Optimization
# Kevin Swersky, Jasper Snoek and Ryan Prescott Adams
# Advances in Neural Information Processing Systems, 2013
#
# Input Warping for Bayesian Optimization of Non-stationary Functions
# Jasper Snoek, Kevin Swersky, Richard Zemel and Ryan Prescott Adams
# Preprint, arXiv:1402.0929, http://arxiv.org/abs/1402.0929, 2013
#
# Bayesian Optimization and Semiparametric Models with Applications to
# Assistive Technology Jasper Snoek, PhD Thesis, University of
# Toronto, 2013
#
# 8. NO WARRANTIES. THE SOFTWARE IS PROVIDED "AS IS." TO THE FULLEST
# EXTENT PERMITTED BY LAW, HARVARD, TORONTO AND SHERBROOKE AND SOCPRA
# HEREBY DISCLAIM ALL WARRANTIES OF ANY KIND (EXPRESS, IMPLIED OR
# OTHERWISE) REGARDING THE SOFTWARE, INCLUDING BUT NOT LIMITED TO ANY
# IMPLIED WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
# PURPOSE, OWNERSHIP, AND NON-INFRINGEMENT. HARVARD, TORONTO AND
# SHERBROOKE AND SOCPRA MAKE NO WARRANTY ABOUT THE ACCURACY,
# RELIABILITY, COMPLETENESS, TIMELINESS, SUFFICIENCY OR QUALITY OF THE
# SOFTWARE. HARVARD, TORONTO AND SHERBROOKE AND SOCPRA DO NOT WARRANT
# THAT THE SOFTWARE WILL OPERATE WITHOUT ERROR OR INTERRUPTION.
#
# 9. LIMITATIONS OF LIABILITY AND REMEDIES. USE OF THE SOFTWARE IS AT
# END USER’S OWN RISK. IF END USER IS DISSATISFIED WITH THE SOFTWARE,
# ITS EXCLUSIVE REMEDY IS TO STOP USING IT. IN NO EVENT SHALL
# HARVARD, TORONTO OR SHERBROOKE OR SOCPRA BE LIABLE TO END USER OR
# ITS INSTITUTION, IN CONTRACT, TORT OR OTHERWISE, FOR ANY DIRECT,
# INDIRECT, SPECIAL, INCIDENTAL, CONSEQUENTIAL, PUNITIVE OR OTHER
# DAMAGES OF ANY KIND WHATSOEVER ARISING OUT OF OR IN CONNECTION WITH
# THE SOFTWARE, EVEN IF HARVARD, TORONTO OR SHERBROOKE OR SOCPRA IS
# NEGLIGENT OR OTHERWISE AT FAULT, AND REGARDLESS OF WHETHER HARVARD,
# TORONTO OR SHERBROOKE OR SOCPRA IS ADVISED OF THE POSSIBILITY OF
# SUCH DAMAGES.
#
# 10. INDEMNIFICATION. To the extent permitted by law, End User shall
# indemnify, defend and hold harmless Harvard, Toronto and Sherbrooke
# and Socpra, their corporate affiliates, current or future directors,
# trustees, officers, faculty, medical and professional staff,
# employees, students and agents and their respective successors,
# heirs and assigns (the "Indemnitees"), against any liability,
# damage, loss or expense (including reasonable attorney's fees and
# expenses of litigation) incurred by or imposed upon the Indemnitees
# or any one of them in connection with any claims, suits, actions,
# demands or judgments arising from End User’s breach of this
# Agreement or its Institution’s use of the Software except to the
# extent caused by the gross negligence or willful misconduct of
# Harvard, Toronto or Sherbrooke or Socpra. This indemnification
# provision shall survive expiration or termination of this Agreement.
#
# 11. GOVERNING LAW. This Agreement shall be construed and governed by
# the laws of the Commonwealth of Massachusetts regardless of
# otherwise applicable choice of law standards.
#
# 12. NON-USE OF NAME. Nothing in this License and Terms of Use shall
# be construed as granting End Users or their Institutions any rights
# or licenses to use any trademarks, service marks or logos associated
# with the Software. You may not use the terms “Harvard” or
# “University of Toronto” or “Université de Sherbrooke” or “Socpra
# Sciences et Génie S.E.C.” (or a substantially similar term) in any
# way that is inconsistent with the permitted uses described
# herein. You agree not to use any name or emblem of Harvard, Toronto
# or Sherbrooke, or any of their subdivisions for any purpose, or to
# falsely suggest any relationship between End User (or its
# Institution) and Harvard, Toronto and/or Sherbrooke, or in any
# manner that would infringe or violate any of their rights.
#
# 13. End User represents and warrants that it has the legal authority
# to enter into this License and Terms of Use on behalf of itself and
# its Institution.
import sys
import os
import ast
import numpy as np
import numpy.random as npr
import scipy.linalg as spla
import scipy.stats as sps
from spearmint.models import GP
from spearmint.utils.param import Param as Hyperparameter
from spearmint.kernels import Matern, Noise, Scale, SumKernel, TransformKernel
from spearmint.sampling.slice_sampler import SliceSampler
from spearmint.utils import priors
from spearmint.transformations import BetaWarp, IgnoreDims, Linear, Normalization, Transformer
import spearmint.utils.param as param_util
import spearmint.sampling.sampler as sampler
import copy
from collections import defaultdict
#TODO: The tests below should be converted into proper nosetests.
class DiagnosticGP(GP):
# The above functions sample the latent function. This is the observation model
# (i.i.d. Gaussian noise)
def observation_model(self, y):
if self.noiseless:
return y
elif type(y) == float or y.size==1:
return y + npr.randn() * np.sqrt(self.noise.value)
else:
return np.squeeze(y + npr.randn(*y.shape) * np.sqrt(self.noise.value))
# There seem to be 2 tests called the Geweke test. One is the Geweke convergence test
# and one is the Geweke correctness test. This is the correctness test. It is descibed at
# https://hips.seas.harvard.edu/blog/2013/06/10/testing-mcmc-code-part-2-integration-tests/
# This test uses an arbitrary statistic of the data (outputs). Here we use the sum.
def geweke_correctness_test(self):
print 'Initiating Geweke Correctness test'
# Note: the horseshoe prior on the noise will make the line slightly not straight
# because we don't have the actual log pdf
import matplotlib.pyplot as plt
# First, check that all priors and models can be sampled from
for param in self.hypers:
if not hasattr(param.prior, 'sample'):
print 'Prior of param %s cannot be sampled from. Cannot perform the Geweke correctness test.' % param.name
return
n = 10000 # number of samples # n = self.mcmc_iters
statistic_of_interest = np.mean
true_data = copy.copy(self.data) # reset this at the end
# Case A:
# 1) Draw new hypers from priors
# 2) Draw new data given hypers (**NOT** given hypers and data !!!!)
caseA = np.zeros(n)
for i in xrange(n):
if i % 1000 == 0:
print 'Geweke Part A Sample %d/%d' % (i,n)
for param in self.hypers:
param.sample_from_prior()
latent_y = self.sample_from_prior_given_hypers(self.data) # only inputs used
# fants = latent_y
fants = self.observation_model(latent_y)
# self.noise.print_diagnostics()
# print fants
caseA[i] = statistic_of_interest(fants)
# Case B:
# 1) Resample all hypers one step given data
# 2) Resample data given hypers
# repeat a bunch of times
caseB = np.zeros(n)
for i in xrange(n):
if i % 1000 == 0:
print 'Geweke Part B Sample %d/%d' % (i,n)
# Take MCMC step on theta given data
self.sampler.generate_sample() # data['inputs'] and data['values'] used
# Resample data
latent_y = self.sample_from_prior_given_hypers(self.data) # only data['inputs'] used
# self.data['values'] = latent_y
self.data['values'] = self.observation_model(latent_y) # add noise
# self.noise.print_diagnostics()
# print self.data['values']
caseB[i] = statistic_of_interest(self.data['values'])
print np.mean(caseA)
print np.std(caseA)
print np.mean(caseB)
print np.std(caseB)
# Then, sort the sets A and B.
caseA = np.sort(caseA)
caseB = np.sort(caseB)
# Then for each a in A, take the fraction of B smaller than it.
yAxis = np.zeros(n)
for i in xrange(n):
yAxis[i] = np.sum(caseB < caseA[i]) / float(n)
xAxis = np.arange(n)/float(n)
# Plot fractional index of a vs this fraction.
# Repeat for all a in A so number of points on graph is |A| ( = |B| )
if not os.path.isdir('diagnostics'):
os.mkdir('diagnostics')
if not os.path.isdir('diagnostics/correctness'):
os.mkdir('diagnostics/correctness')
plt.figure(1)
plt.clf()
plt.plot(xAxis, yAxis, 'b')
plt.plot(xAxis, xAxis, '--r')
plt.title('Geweke test P-P plot with %d samples' % n)
plt.savefig('diagnostics/correctness/GewekeCorrectness_%d_samples.pdf' % n)
self.data = true_data
def test_gp():
x = np.linspace(-5,5,10)[:,None] # 10 data points, 1-D
xtest = np.linspace(-6,6,200)[:,None]
y = np.sin(x.flatten()) + np.sqrt(1e-3)*np.random.randn(x.shape[0])
ytest = np.sin(xtest.flatten())
# print 'Inputs'
# print x
# print 'Outputs'
# print y
data = {'inputs':x, 'values':y}
pred = {'inputs':xtest, 'values':ytest}
options = {'likelihood':'GAUSSIAN',
'mcmc-iters':500,
'burn-in':500,
'verbose':False,
'mcmc-diagnostics':True,
'thinning':0,
'priors': {'mean':{'distribution':'Gaussian', 'parameters':{'mu':0.0, 'sigma':1.0}},
'noise':{'distribution':'Lognormal', 'parameters':{'scale':1.0}},
'amp2' :{'distribution':'Lognormal', 'parameters':{'scale':1.0}}
}
}
gp = GP(x.shape[1], **options)
gp.fit(data)
func_m, func_v = gp.predict(pred, full_cov=False, compute_grad=False)
# func_m, func_v, grad_m, grad_v = gp.predict(pred, full_cov=False, compute_grad=True)
# print np.hstack((func_m[:,None], ytest[:,None]))
if __name__ == '__main__':
test_gp()
|
apache-2.0
|
rgommers/scipy
|
scipy/integrate/_ivp/ivp.py
|
21
|
27556
|
import inspect
import numpy as np
from .bdf import BDF
from .radau import Radau
from .rk import RK23, RK45, DOP853
from .lsoda import LSODA
from scipy.optimize import OptimizeResult
from .common import EPS, OdeSolution
from .base import OdeSolver
METHODS = {'RK23': RK23,
'RK45': RK45,
'DOP853': DOP853,
'Radau': Radau,
'BDF': BDF,
'LSODA': LSODA}
MESSAGES = {0: "The solver successfully reached the end of the integration interval.",
1: "A termination event occurred."}
class OdeResult(OptimizeResult):
pass
def prepare_events(events):
"""Standardize event functions and extract is_terminal and direction."""
if callable(events):
events = (events,)
if events is not None:
is_terminal = np.empty(len(events), dtype=bool)
direction = np.empty(len(events))
for i, event in enumerate(events):
try:
is_terminal[i] = event.terminal
except AttributeError:
is_terminal[i] = False
try:
direction[i] = event.direction
except AttributeError:
direction[i] = 0
else:
is_terminal = None
direction = None
return events, is_terminal, direction
def solve_event_equation(event, sol, t_old, t):
"""Solve an equation corresponding to an ODE event.
The equation is ``event(t, y(t)) = 0``, here ``y(t)`` is known from an
ODE solver using some sort of interpolation. It is solved by
`scipy.optimize.brentq` with xtol=atol=4*EPS.
Parameters
----------
event : callable
Function ``event(t, y)``.
sol : callable
Function ``sol(t)`` which evaluates an ODE solution between `t_old`
and `t`.
t_old, t : float
Previous and new values of time. They will be used as a bracketing
interval.
Returns
-------
root : float
Found solution.
"""
from scipy.optimize import brentq
return brentq(lambda t: event(t, sol(t)), t_old, t,
xtol=4 * EPS, rtol=4 * EPS)
def handle_events(sol, events, active_events, is_terminal, t_old, t):
"""Helper function to handle events.
Parameters
----------
sol : DenseOutput
Function ``sol(t)`` which evaluates an ODE solution between `t_old`
and `t`.
events : list of callables, length n_events
Event functions with signatures ``event(t, y)``.
active_events : ndarray
Indices of events which occurred.
is_terminal : ndarray, shape (n_events,)
Which events are terminal.
t_old, t : float
Previous and new values of time.
Returns
-------
root_indices : ndarray
Indices of events which take zero between `t_old` and `t` and before
a possible termination.
roots : ndarray
Values of t at which events occurred.
terminate : bool
Whether a terminal event occurred.
"""
roots = [solve_event_equation(events[event_index], sol, t_old, t)
for event_index in active_events]
roots = np.asarray(roots)
if np.any(is_terminal[active_events]):
if t > t_old:
order = np.argsort(roots)
else:
order = np.argsort(-roots)
active_events = active_events[order]
roots = roots[order]
t = np.nonzero(is_terminal[active_events])[0][0]
active_events = active_events[:t + 1]
roots = roots[:t + 1]
terminate = True
else:
terminate = False
return active_events, roots, terminate
def find_active_events(g, g_new, direction):
"""Find which event occurred during an integration step.
Parameters
----------
g, g_new : array_like, shape (n_events,)
Values of event functions at a current and next points.
direction : ndarray, shape (n_events,)
Event "direction" according to the definition in `solve_ivp`.
Returns
-------
active_events : ndarray
Indices of events which occurred during the step.
"""
g, g_new = np.asarray(g), np.asarray(g_new)
up = (g <= 0) & (g_new >= 0)
down = (g >= 0) & (g_new <= 0)
either = up | down
mask = (up & (direction > 0) |
down & (direction < 0) |
either & (direction == 0))
return np.nonzero(mask)[0]
def solve_ivp(fun, t_span, y0, method='RK45', t_eval=None, dense_output=False,
events=None, vectorized=False, args=None, **options):
"""Solve an initial value problem for a system of ODEs.
This function numerically integrates a system of ordinary differential
equations given an initial value::
dy / dt = f(t, y)
y(t0) = y0
Here t is a 1-D independent variable (time), y(t) is an
N-D vector-valued function (state), and an N-D
vector-valued function f(t, y) determines the differential equations.
The goal is to find y(t) approximately satisfying the differential
equations, given an initial value y(t0)=y0.
Some of the solvers support integration in the complex domain, but note
that for stiff ODE solvers, the right-hand side must be
complex-differentiable (satisfy Cauchy-Riemann equations [11]_).
To solve a problem in the complex domain, pass y0 with a complex data type.
Another option always available is to rewrite your problem for real and
imaginary parts separately.
Parameters
----------
fun : callable
Right-hand side of the system. The calling signature is ``fun(t, y)``.
Here `t` is a scalar, and there are two options for the ndarray `y`:
It can either have shape (n,); then `fun` must return array_like with
shape (n,). Alternatively, it can have shape (n, k); then `fun`
must return an array_like with shape (n, k), i.e., each column
corresponds to a single column in `y`. The choice between the two
options is determined by `vectorized` argument (see below). The
vectorized implementation allows a faster approximation of the Jacobian
by finite differences (required for stiff solvers).
t_span : 2-tuple of floats
Interval of integration (t0, tf). The solver starts with t=t0 and
integrates until it reaches t=tf.
y0 : array_like, shape (n,)
Initial state. For problems in the complex domain, pass `y0` with a
complex data type (even if the initial value is purely real).
method : string or `OdeSolver`, optional
Integration method to use:
* 'RK45' (default): Explicit Runge-Kutta method of order 5(4) [1]_.
The error is controlled assuming accuracy of the fourth-order
method, but steps are taken using the fifth-order accurate
formula (local extrapolation is done). A quartic interpolation
polynomial is used for the dense output [2]_. Can be applied in
the complex domain.
* 'RK23': Explicit Runge-Kutta method of order 3(2) [3]_. The error
is controlled assuming accuracy of the second-order method, but
steps are taken using the third-order accurate formula (local
extrapolation is done). A cubic Hermite polynomial is used for the
dense output. Can be applied in the complex domain.
* 'DOP853': Explicit Runge-Kutta method of order 8 [13]_.
Python implementation of the "DOP853" algorithm originally
written in Fortran [14]_. A 7-th order interpolation polynomial
accurate to 7-th order is used for the dense output.
Can be applied in the complex domain.
* 'Radau': Implicit Runge-Kutta method of the Radau IIA family of
order 5 [4]_. The error is controlled with a third-order accurate
embedded formula. A cubic polynomial which satisfies the
collocation conditions is used for the dense output.
* 'BDF': Implicit multi-step variable-order (1 to 5) method based
on a backward differentiation formula for the derivative
approximation [5]_. The implementation follows the one described
in [6]_. A quasi-constant step scheme is used and accuracy is
enhanced using the NDF modification. Can be applied in the
complex domain.
* 'LSODA': Adams/BDF method with automatic stiffness detection and
switching [7]_, [8]_. This is a wrapper of the Fortran solver
from ODEPACK.
Explicit Runge-Kutta methods ('RK23', 'RK45', 'DOP853') should be used
for non-stiff problems and implicit methods ('Radau', 'BDF') for
stiff problems [9]_. Among Runge-Kutta methods, 'DOP853' is recommended
for solving with high precision (low values of `rtol` and `atol`).
If not sure, first try to run 'RK45'. If it makes unusually many
iterations, diverges, or fails, your problem is likely to be stiff and
you should use 'Radau' or 'BDF'. 'LSODA' can also be a good universal
choice, but it might be somewhat less convenient to work with as it
wraps old Fortran code.
You can also pass an arbitrary class derived from `OdeSolver` which
implements the solver.
t_eval : array_like or None, optional
Times at which to store the computed solution, must be sorted and lie
within `t_span`. If None (default), use points selected by the solver.
dense_output : bool, optional
Whether to compute a continuous solution. Default is False.
events : callable, or list of callables, optional
Events to track. If None (default), no events will be tracked.
Each event occurs at the zeros of a continuous function of time and
state. Each function must have the signature ``event(t, y)`` and return
a float. The solver will find an accurate value of `t` at which
``event(t, y(t)) = 0`` using a root-finding algorithm. By default, all
zeros will be found. The solver looks for a sign change over each step,
so if multiple zero crossings occur within one step, events may be
missed. Additionally each `event` function might have the following
attributes:
terminal: bool, optional
Whether to terminate integration if this event occurs.
Implicitly False if not assigned.
direction: float, optional
Direction of a zero crossing. If `direction` is positive,
`event` will only trigger when going from negative to positive,
and vice versa if `direction` is negative. If 0, then either
direction will trigger event. Implicitly 0 if not assigned.
You can assign attributes like ``event.terminal = True`` to any
function in Python.
vectorized : bool, optional
Whether `fun` is implemented in a vectorized fashion. Default is False.
args : tuple, optional
Additional arguments to pass to the user-defined functions. If given,
the additional arguments are passed to all user-defined functions.
So if, for example, `fun` has the signature ``fun(t, y, a, b, c)``,
then `jac` (if given) and any event functions must have the same
signature, and `args` must be a tuple of length 3.
options
Options passed to a chosen solver. All options available for already
implemented solvers are listed below.
first_step : float or None, optional
Initial step size. Default is `None` which means that the algorithm
should choose.
max_step : float, optional
Maximum allowed step size. Default is np.inf, i.e., the step size is not
bounded and determined solely by the solver.
rtol, atol : float or array_like, optional
Relative and absolute tolerances. The solver keeps the local error
estimates less than ``atol + rtol * abs(y)``. Here `rtol` controls a
relative accuracy (number of correct digits). But if a component of `y`
is approximately below `atol`, the error only needs to fall within
the same `atol` threshold, and the number of correct digits is not
guaranteed. If components of y have different scales, it might be
beneficial to set different `atol` values for different components by
passing array_like with shape (n,) for `atol`. Default values are
1e-3 for `rtol` and 1e-6 for `atol`.
jac : array_like, sparse_matrix, callable or None, optional
Jacobian matrix of the right-hand side of the system with respect
to y, required by the 'Radau', 'BDF' and 'LSODA' method. The
Jacobian matrix has shape (n, n) and its element (i, j) is equal to
``d f_i / d y_j``. There are three ways to define the Jacobian:
* If array_like or sparse_matrix, the Jacobian is assumed to
be constant. Not supported by 'LSODA'.
* If callable, the Jacobian is assumed to depend on both
t and y; it will be called as ``jac(t, y)``, as necessary.
For 'Radau' and 'BDF' methods, the return value might be a
sparse matrix.
* If None (default), the Jacobian will be approximated by
finite differences.
It is generally recommended to provide the Jacobian rather than
relying on a finite-difference approximation.
jac_sparsity : array_like, sparse matrix or None, optional
Defines a sparsity structure of the Jacobian matrix for a finite-
difference approximation. Its shape must be (n, n). This argument
is ignored if `jac` is not `None`. If the Jacobian has only few
non-zero elements in *each* row, providing the sparsity structure
will greatly speed up the computations [10]_. A zero entry means that
a corresponding element in the Jacobian is always zero. If None
(default), the Jacobian is assumed to be dense.
Not supported by 'LSODA', see `lband` and `uband` instead.
lband, uband : int or None, optional
Parameters defining the bandwidth of the Jacobian for the 'LSODA'
method, i.e., ``jac[i, j] != 0 only for i - lband <= j <= i + uband``.
Default is None. Setting these requires your jac routine to return the
Jacobian in the packed format: the returned array must have ``n``
columns and ``uband + lband + 1`` rows in which Jacobian diagonals are
written. Specifically ``jac_packed[uband + i - j , j] = jac[i, j]``.
The same format is used in `scipy.linalg.solve_banded` (check for an
illustration). These parameters can be also used with ``jac=None`` to
reduce the number of Jacobian elements estimated by finite differences.
min_step : float, optional
The minimum allowed step size for 'LSODA' method.
By default `min_step` is zero.
Returns
-------
Bunch object with the following fields defined:
t : ndarray, shape (n_points,)
Time points.
y : ndarray, shape (n, n_points)
Values of the solution at `t`.
sol : `OdeSolution` or None
Found solution as `OdeSolution` instance; None if `dense_output` was
set to False.
t_events : list of ndarray or None
Contains for each event type a list of arrays at which an event of
that type event was detected. None if `events` was None.
y_events : list of ndarray or None
For each value of `t_events`, the corresponding value of the solution.
None if `events` was None.
nfev : int
Number of evaluations of the right-hand side.
njev : int
Number of evaluations of the Jacobian.
nlu : int
Number of LU decompositions.
status : int
Reason for algorithm termination:
* -1: Integration step failed.
* 0: The solver successfully reached the end of `tspan`.
* 1: A termination event occurred.
message : string
Human-readable description of the termination reason.
success : bool
True if the solver reached the interval end or a termination event
occurred (``status >= 0``).
References
----------
.. [1] J. R. Dormand, P. J. Prince, "A family of embedded Runge-Kutta
formulae", Journal of Computational and Applied Mathematics, Vol. 6,
No. 1, pp. 19-26, 1980.
.. [2] L. W. Shampine, "Some Practical Runge-Kutta Formulas", Mathematics
of Computation,, Vol. 46, No. 173, pp. 135-150, 1986.
.. [3] P. Bogacki, L.F. Shampine, "A 3(2) Pair of Runge-Kutta Formulas",
Appl. Math. Lett. Vol. 2, No. 4. pp. 321-325, 1989.
.. [4] E. Hairer, G. Wanner, "Solving Ordinary Differential Equations II:
Stiff and Differential-Algebraic Problems", Sec. IV.8.
.. [5] `Backward Differentiation Formula
<https://en.wikipedia.org/wiki/Backward_differentiation_formula>`_
on Wikipedia.
.. [6] L. F. Shampine, M. W. Reichelt, "THE MATLAB ODE SUITE", SIAM J. SCI.
COMPUTE., Vol. 18, No. 1, pp. 1-22, January 1997.
.. [7] A. C. Hindmarsh, "ODEPACK, A Systematized Collection of ODE
Solvers," IMACS Transactions on Scientific Computation, Vol 1.,
pp. 55-64, 1983.
.. [8] L. Petzold, "Automatic selection of methods for solving stiff and
nonstiff systems of ordinary differential equations", SIAM Journal
on Scientific and Statistical Computing, Vol. 4, No. 1, pp. 136-148,
1983.
.. [9] `Stiff equation <https://en.wikipedia.org/wiki/Stiff_equation>`_ on
Wikipedia.
.. [10] A. Curtis, M. J. D. Powell, and J. Reid, "On the estimation of
sparse Jacobian matrices", Journal of the Institute of Mathematics
and its Applications, 13, pp. 117-120, 1974.
.. [11] `Cauchy-Riemann equations
<https://en.wikipedia.org/wiki/Cauchy-Riemann_equations>`_ on
Wikipedia.
.. [12] `Lotka-Volterra equations
<https://en.wikipedia.org/wiki/Lotka%E2%80%93Volterra_equations>`_
on Wikipedia.
.. [13] E. Hairer, S. P. Norsett G. Wanner, "Solving Ordinary Differential
Equations I: Nonstiff Problems", Sec. II.
.. [14] `Page with original Fortran code of DOP853
<http://www.unige.ch/~hairer/software.html>`_.
Examples
--------
Basic exponential decay showing automatically chosen time points.
>>> from scipy.integrate import solve_ivp
>>> def exponential_decay(t, y): return -0.5 * y
>>> sol = solve_ivp(exponential_decay, [0, 10], [2, 4, 8])
>>> print(sol.t)
[ 0. 0.11487653 1.26364188 3.06061781 4.81611105 6.57445806
8.33328988 10. ]
>>> print(sol.y)
[[2. 1.88836035 1.06327177 0.43319312 0.18017253 0.07483045
0.03107158 0.01350781]
[4. 3.7767207 2.12654355 0.86638624 0.36034507 0.14966091
0.06214316 0.02701561]
[8. 7.5534414 4.25308709 1.73277247 0.72069014 0.29932181
0.12428631 0.05403123]]
Specifying points where the solution is desired.
>>> sol = solve_ivp(exponential_decay, [0, 10], [2, 4, 8],
... t_eval=[0, 1, 2, 4, 10])
>>> print(sol.t)
[ 0 1 2 4 10]
>>> print(sol.y)
[[2. 1.21305369 0.73534021 0.27066736 0.01350938]
[4. 2.42610739 1.47068043 0.54133472 0.02701876]
[8. 4.85221478 2.94136085 1.08266944 0.05403753]]
Cannon fired upward with terminal event upon impact. The ``terminal`` and
``direction`` fields of an event are applied by monkey patching a function.
Here ``y[0]`` is position and ``y[1]`` is velocity. The projectile starts
at position 0 with velocity +10. Note that the integration never reaches
t=100 because the event is terminal.
>>> def upward_cannon(t, y): return [y[1], -0.5]
>>> def hit_ground(t, y): return y[0]
>>> hit_ground.terminal = True
>>> hit_ground.direction = -1
>>> sol = solve_ivp(upward_cannon, [0, 100], [0, 10], events=hit_ground)
>>> print(sol.t_events)
[array([40.])]
>>> print(sol.t)
[0.00000000e+00 9.99900010e-05 1.09989001e-03 1.10988901e-02
1.11088891e-01 1.11098890e+00 1.11099890e+01 4.00000000e+01]
Use `dense_output` and `events` to find position, which is 100, at the apex
of the cannonball's trajectory. Apex is not defined as terminal, so both
apex and hit_ground are found. There is no information at t=20, so the sol
attribute is used to evaluate the solution. The sol attribute is returned
by setting ``dense_output=True``. Alternatively, the `y_events` attribute
can be used to access the solution at the time of the event.
>>> def apex(t, y): return y[1]
>>> sol = solve_ivp(upward_cannon, [0, 100], [0, 10],
... events=(hit_ground, apex), dense_output=True)
>>> print(sol.t_events)
[array([40.]), array([20.])]
>>> print(sol.t)
[0.00000000e+00 9.99900010e-05 1.09989001e-03 1.10988901e-02
1.11088891e-01 1.11098890e+00 1.11099890e+01 4.00000000e+01]
>>> print(sol.sol(sol.t_events[1][0]))
[100. 0.]
>>> print(sol.y_events)
[array([[-5.68434189e-14, -1.00000000e+01]]), array([[1.00000000e+02, 1.77635684e-15]])]
As an example of a system with additional parameters, we'll implement
the Lotka-Volterra equations [12]_.
>>> def lotkavolterra(t, z, a, b, c, d):
... x, y = z
... return [a*x - b*x*y, -c*y + d*x*y]
...
We pass in the parameter values a=1.5, b=1, c=3 and d=1 with the `args`
argument.
>>> sol = solve_ivp(lotkavolterra, [0, 15], [10, 5], args=(1.5, 1, 3, 1),
... dense_output=True)
Compute a dense solution and plot it.
>>> t = np.linspace(0, 15, 300)
>>> z = sol.sol(t)
>>> import matplotlib.pyplot as plt
>>> plt.plot(t, z.T)
>>> plt.xlabel('t')
>>> plt.legend(['x', 'y'], shadow=True)
>>> plt.title('Lotka-Volterra System')
>>> plt.show()
"""
if method not in METHODS and not (
inspect.isclass(method) and issubclass(method, OdeSolver)):
raise ValueError("`method` must be one of {} or OdeSolver class."
.format(METHODS))
t0, tf = float(t_span[0]), float(t_span[1])
if args is not None:
# Wrap the user's fun (and jac, if given) in lambdas to hide the
# additional parameters. Pass in the original fun as a keyword
# argument to keep it in the scope of the lambda.
fun = lambda t, x, fun=fun: fun(t, x, *args)
jac = options.get('jac')
if callable(jac):
options['jac'] = lambda t, x: jac(t, x, *args)
if t_eval is not None:
t_eval = np.asarray(t_eval)
if t_eval.ndim != 1:
raise ValueError("`t_eval` must be 1-dimensional.")
if np.any(t_eval < min(t0, tf)) or np.any(t_eval > max(t0, tf)):
raise ValueError("Values in `t_eval` are not within `t_span`.")
d = np.diff(t_eval)
if tf > t0 and np.any(d <= 0) or tf < t0 and np.any(d >= 0):
raise ValueError("Values in `t_eval` are not properly sorted.")
if tf > t0:
t_eval_i = 0
else:
# Make order of t_eval decreasing to use np.searchsorted.
t_eval = t_eval[::-1]
# This will be an upper bound for slices.
t_eval_i = t_eval.shape[0]
if method in METHODS:
method = METHODS[method]
solver = method(fun, t0, y0, tf, vectorized=vectorized, **options)
if t_eval is None:
ts = [t0]
ys = [y0]
elif t_eval is not None and dense_output:
ts = []
ti = [t0]
ys = []
else:
ts = []
ys = []
interpolants = []
events, is_terminal, event_dir = prepare_events(events)
if events is not None:
if args is not None:
# Wrap user functions in lambdas to hide the additional parameters.
# The original event function is passed as a keyword argument to the
# lambda to keep the original function in scope (i.e., avoid the
# late binding closure "gotcha").
events = [lambda t, x, event=event: event(t, x, *args)
for event in events]
g = [event(t0, y0) for event in events]
t_events = [[] for _ in range(len(events))]
y_events = [[] for _ in range(len(events))]
else:
t_events = None
y_events = None
status = None
while status is None:
message = solver.step()
if solver.status == 'finished':
status = 0
elif solver.status == 'failed':
status = -1
break
t_old = solver.t_old
t = solver.t
y = solver.y
if dense_output:
sol = solver.dense_output()
interpolants.append(sol)
else:
sol = None
if events is not None:
g_new = [event(t, y) for event in events]
active_events = find_active_events(g, g_new, event_dir)
if active_events.size > 0:
if sol is None:
sol = solver.dense_output()
root_indices, roots, terminate = handle_events(
sol, events, active_events, is_terminal, t_old, t)
for e, te in zip(root_indices, roots):
t_events[e].append(te)
y_events[e].append(sol(te))
if terminate:
status = 1
t = roots[-1]
y = sol(t)
g = g_new
if t_eval is None:
ts.append(t)
ys.append(y)
else:
# The value in t_eval equal to t will be included.
if solver.direction > 0:
t_eval_i_new = np.searchsorted(t_eval, t, side='right')
t_eval_step = t_eval[t_eval_i:t_eval_i_new]
else:
t_eval_i_new = np.searchsorted(t_eval, t, side='left')
# It has to be done with two slice operations, because
# you can't slice to 0th element inclusive using backward
# slicing.
t_eval_step = t_eval[t_eval_i_new:t_eval_i][::-1]
if t_eval_step.size > 0:
if sol is None:
sol = solver.dense_output()
ts.append(t_eval_step)
ys.append(sol(t_eval_step))
t_eval_i = t_eval_i_new
if t_eval is not None and dense_output:
ti.append(t)
message = MESSAGES.get(status, message)
if t_events is not None:
t_events = [np.asarray(te) for te in t_events]
y_events = [np.asarray(ye) for ye in y_events]
if t_eval is None:
ts = np.array(ts)
ys = np.vstack(ys).T
else:
ts = np.hstack(ts)
ys = np.hstack(ys)
if dense_output:
if t_eval is None:
sol = OdeSolution(ts, interpolants)
else:
sol = OdeSolution(ti, interpolants)
else:
sol = None
return OdeResult(t=ts, y=ys, sol=sol, t_events=t_events, y_events=y_events,
nfev=solver.nfev, njev=solver.njev, nlu=solver.nlu,
status=status, message=message, success=status >= 0)
|
bsd-3-clause
|
DailyActie/Surrogate-Model
|
01-codes/deap-master/examples/ga/nsga2.py
|
1
|
5078
|
# This file is part of DEAP.
#
# DEAP is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as
# published by the Free Software Foundation, either version 3 of
# the License, or (at your option) any later version.
#
# DEAP is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with DEAP. If not, see <http://www.gnu.org/licenses/>.
import array
import random
import numpy
from deap import base
from deap import benchmarks
from deap import creator
from deap import tools
from deap.benchmarks.tools import hypervolume
creator.create("FitnessMin", base.Fitness, weights=(-1.0, -1.0))
creator.create("Individual", array.array, typecode='d', fitness=creator.FitnessMin)
toolbox = base.Toolbox()
# Problem definition
# Functions zdt1, zdt2, zdt3, zdt6 have bounds [0, 1]
BOUND_LOW, BOUND_UP = 0.0, 1.0
# Functions zdt4 has bounds x1 = [0, 1], xn = [-5, 5], with n = 2, ..., 10
# BOUND_LOW, BOUND_UP = [0.0] + [-5.0]*9, [1.0] + [5.0]*9
# Functions zdt1, zdt2, zdt3 have 30 dimensions, zdt4 and zdt6 have 10
NDIM = 30
def uniform(low, up, size=None):
try:
return [random.uniform(a, b) for a, b in zip(low, up)]
except TypeError:
return [random.uniform(a, b) for a, b in zip([low] * size, [up] * size)]
toolbox.register("attr_float", uniform, BOUND_LOW, BOUND_UP, NDIM)
toolbox.register("individual", tools.initIterate, creator.Individual, toolbox.attr_float)
toolbox.register("population", tools.initRepeat, list, toolbox.individual)
toolbox.register("evaluate", benchmarks.zdt1)
toolbox.register("mate", tools.cxSimulatedBinaryBounded, low=BOUND_LOW, up=BOUND_UP, eta=20.0)
toolbox.register("mutate", tools.mutPolynomialBounded, low=BOUND_LOW, up=BOUND_UP, eta=20.0, indpb=1.0 / NDIM)
toolbox.register("select", tools.selNSGA2)
def main(seed=None):
random.seed(seed)
NGEN = 250
MU = 100
CXPB = 0.9
stats = tools.Statistics(lambda ind: ind.fitness.values)
# stats.register("avg", numpy.mean, axis=0)
# stats.register("std", numpy.std, axis=0)
stats.register("min", numpy.min, axis=0)
stats.register("max", numpy.max, axis=0)
logbook = tools.Logbook()
logbook.header = "gen", "evals", "std", "min", "avg", "max"
pop = toolbox.population(n=MU)
# Evaluate the individuals with an invalid fitness
invalid_ind = [ind for ind in pop if not ind.fitness.valid]
fitnesses = toolbox.map(toolbox.evaluate, invalid_ind)
for ind, fit in zip(invalid_ind, fitnesses):
ind.fitness.values = fit
# This is just to assign the crowding distance to the individuals
# no actual selection is done
pop = toolbox.select(pop, len(pop))
record = stats.compile(pop)
logbook.record(gen=0, evals=len(invalid_ind), **record)
print(logbook.stream)
# Begin the generational process
for gen in range(1, NGEN):
# Vary the population
offspring = tools.selTournamentDCD(pop, len(pop))
offspring = [toolbox.clone(ind) for ind in offspring]
for ind1, ind2 in zip(offspring[::2], offspring[1::2]):
if random.random() <= CXPB:
toolbox.mate(ind1, ind2)
toolbox.mutate(ind1)
toolbox.mutate(ind2)
del ind1.fitness.values, ind2.fitness.values
# Evaluate the individuals with an invalid fitness
invalid_ind = [ind for ind in offspring if not ind.fitness.valid]
fitnesses = toolbox.map(toolbox.evaluate, invalid_ind)
for ind, fit in zip(invalid_ind, fitnesses):
ind.fitness.values = fit
# Select the next generation population
pop = toolbox.select(pop + offspring, MU)
record = stats.compile(pop)
logbook.record(gen=gen, evals=len(invalid_ind), **record)
print(logbook.stream)
print("Final population hypervolume is %f" % hypervolume(pop, [11.0, 11.0]))
return pop, logbook
if __name__ == "__main__":
# with open("pareto_front/zdt1_front.json") as optimal_front_data:
# optimal_front = json.load(optimal_front_data)
# Use 500 of the 1000 points in the json file
# optimal_front = sorted(optimal_front[i] for i in range(0, len(optimal_front), 2))
pop, stats = main()
# pop.sort(key=lambda x: x.fitness.values)
# print(stats)
# print("Convergence: ", convergence(pop, optimal_front))
# print("Diversity: ", diversity(pop, optimal_front[0], optimal_front[-1]))
# import matplotlib.pyplot as plt
# import numpy
# front = numpy.array([ind.fitness.values for ind in pop])
# optimal_front = numpy.array(optimal_front)
# plt.scatter(optimal_front[:,0], optimal_front[:,1], c="r")
# plt.scatter(front[:,0], front[:,1], c="b")
# plt.axis("tight")
# plt.show()
|
mit
|
shikhardb/scikit-learn
|
sklearn/linear_model/tests/test_logistic.py
|
11
|
23587
|
import numpy as np
import scipy.sparse as sp
from scipy import linalg, optimize, sparse
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_warns
from sklearn.utils.testing import raises
from sklearn.utils.testing import ignore_warnings
from sklearn.utils.testing import assert_raise_message
from sklearn.utils import ConvergenceWarning
from sklearn.linear_model.logistic import (
LogisticRegression,
logistic_regression_path, LogisticRegressionCV,
_logistic_loss_and_grad, _logistic_loss_grad_hess,
_multinomial_loss_grad_hess
)
from sklearn.cross_validation import StratifiedKFold
from sklearn.datasets import load_iris, make_classification
X = [[-1, 0], [0, 1], [1, 1]]
X_sp = sp.csr_matrix(X)
Y1 = [0, 1, 1]
Y2 = [2, 1, 0]
iris = load_iris()
def check_predictions(clf, X, y):
"""Check that the model is able to fit the classification data"""
n_samples = len(y)
classes = np.unique(y)
n_classes = classes.shape[0]
predicted = clf.fit(X, y).predict(X)
assert_array_equal(clf.classes_, classes)
assert_equal(predicted.shape, (n_samples,))
assert_array_equal(predicted, y)
probabilities = clf.predict_proba(X)
assert_equal(probabilities.shape, (n_samples, n_classes))
assert_array_almost_equal(probabilities.sum(axis=1), np.ones(n_samples))
assert_array_equal(probabilities.argmax(axis=1), y)
def test_predict_2_classes():
# Simple sanity check on a 2 classes dataset
# Make sure it predicts the correct result on simple datasets.
check_predictions(LogisticRegression(random_state=0), X, Y1)
check_predictions(LogisticRegression(random_state=0), X_sp, Y1)
check_predictions(LogisticRegression(C=100, random_state=0), X, Y1)
check_predictions(LogisticRegression(C=100, random_state=0), X_sp, Y1)
check_predictions(LogisticRegression(fit_intercept=False,
random_state=0), X, Y1)
check_predictions(LogisticRegression(fit_intercept=False,
random_state=0), X_sp, Y1)
def test_error():
# Test for appropriate exception on errors
assert_raises(ValueError, LogisticRegression(C=-1).fit, X, Y1)
def test_predict_3_classes():
check_predictions(LogisticRegression(C=10), X, Y2)
check_predictions(LogisticRegression(C=10), X_sp, Y2)
def test_predict_iris():
# Test logistic regression with the iris dataset
n_samples, n_features = iris.data.shape
target = iris.target_names[iris.target]
# Test that both multinomial and OvR solvers handle
# multiclass data correctly and give good accuracy
# score (>0.95) for the training data.
for clf in [LogisticRegression(C=len(iris.data)),
LogisticRegression(C=len(iris.data), solver='lbfgs',
multi_class='multinomial'),
LogisticRegression(C=len(iris.data), solver='newton-cg',
multi_class='multinomial')]:
clf.fit(iris.data, target)
assert_array_equal(np.unique(target), clf.classes_)
pred = clf.predict(iris.data)
assert_greater(np.mean(pred == target), .95)
probabilities = clf.predict_proba(iris.data)
assert_array_almost_equal(probabilities.sum(axis=1),
np.ones(n_samples))
pred = iris.target_names[probabilities.argmax(axis=1)]
assert_greater(np.mean(pred == target), .95)
def test_multinomial_validation():
for solver in ['lbfgs', 'newton-cg']:
lr = LogisticRegression(C=-1, solver=solver, multi_class='multinomial')
assert_raises(ValueError, lr.fit, [[0, 1], [1, 0]], [0, 1])
def test_multinomial_binary():
# Test multinomial LR on a binary problem.
target = (iris.target > 0).astype(np.intp)
target = np.array(["setosa", "not-setosa"])[target]
for solver in ['lbfgs', 'newton-cg']:
clf = LogisticRegression(solver=solver, multi_class='multinomial')
clf.fit(iris.data, target)
assert_equal(clf.coef_.shape, (1, iris.data.shape[1]))
assert_equal(clf.intercept_.shape, (1,))
assert_array_equal(clf.predict(iris.data), target)
mlr = LogisticRegression(solver=solver, multi_class='multinomial',
fit_intercept=False)
mlr.fit(iris.data, target)
pred = clf.classes_[np.argmax(clf.predict_log_proba(iris.data),
axis=1)]
assert_greater(np.mean(pred == target), .9)
def test_sparsify():
# Test sparsify and densify members.
n_samples, n_features = iris.data.shape
target = iris.target_names[iris.target]
clf = LogisticRegression(random_state=0).fit(iris.data, target)
pred_d_d = clf.decision_function(iris.data)
clf.sparsify()
assert_true(sp.issparse(clf.coef_))
pred_s_d = clf.decision_function(iris.data)
sp_data = sp.coo_matrix(iris.data)
pred_s_s = clf.decision_function(sp_data)
clf.densify()
pred_d_s = clf.decision_function(sp_data)
assert_array_almost_equal(pred_d_d, pred_s_d)
assert_array_almost_equal(pred_d_d, pred_s_s)
assert_array_almost_equal(pred_d_d, pred_d_s)
def test_inconsistent_input():
# Test that an exception is raised on inconsistent input
rng = np.random.RandomState(0)
X_ = rng.random_sample((5, 10))
y_ = np.ones(X_.shape[0])
y_[0] = 0
clf = LogisticRegression(random_state=0)
# Wrong dimensions for training data
y_wrong = y_[:-1]
assert_raises(ValueError, clf.fit, X, y_wrong)
# Wrong dimensions for test data
assert_raises(ValueError, clf.fit(X_, y_).predict,
rng.random_sample((3, 12)))
def test_write_parameters():
# Test that we can write to coef_ and intercept_
clf = LogisticRegression(random_state=0)
clf.fit(X, Y1)
clf.coef_[:] = 0
clf.intercept_[:] = 0
assert_array_almost_equal(clf.decision_function(X), 0)
@raises(ValueError)
def test_nan():
# Test proper NaN handling.
# Regression test for Issue #252: fit used to go into an infinite loop.
Xnan = np.array(X, dtype=np.float64)
Xnan[0, 1] = np.nan
LogisticRegression(random_state=0).fit(Xnan, Y1)
def test_consistency_path():
# Test that the path algorithm is consistent
rng = np.random.RandomState(0)
X = np.concatenate((rng.randn(100, 2) + [1, 1], rng.randn(100, 2)))
y = [1] * 100 + [-1] * 100
Cs = np.logspace(0, 4, 10)
f = ignore_warnings
# can't test with fit_intercept=True since LIBLINEAR
# penalizes the intercept
for method in ('lbfgs', 'newton-cg', 'liblinear'):
coefs, Cs = f(logistic_regression_path)(
X, y, Cs=Cs, fit_intercept=False, tol=1e-16, solver=method)
for i, C in enumerate(Cs):
lr = LogisticRegression(C=C, fit_intercept=False, tol=1e-16)
lr.fit(X, y)
lr_coef = lr.coef_.ravel()
assert_array_almost_equal(lr_coef, coefs[i], decimal=4)
# test for fit_intercept=True
for method in ('lbfgs', 'newton-cg', 'liblinear'):
Cs = [1e3]
coefs, Cs = f(logistic_regression_path)(
X, y, Cs=Cs, fit_intercept=True, tol=1e-4, solver=method)
lr = LogisticRegression(C=Cs[0], fit_intercept=True, tol=1e-4,
intercept_scaling=10000)
lr.fit(X, y)
lr_coef = np.concatenate([lr.coef_.ravel(), lr.intercept_])
assert_array_almost_equal(lr_coef, coefs[0], decimal=4)
def test_liblinear_random_state():
X, y = make_classification(n_samples=20)
lr1 = LogisticRegression(random_state=0)
lr1.fit(X, y)
lr2 = LogisticRegression(random_state=0)
lr2.fit(X, y)
assert_array_almost_equal(lr1.coef_, lr2.coef_)
def test_logistic_loss_and_grad():
X_ref, y = make_classification(n_samples=20)
n_features = X_ref.shape[1]
X_sp = X_ref.copy()
X_sp[X_sp < .1] = 0
X_sp = sp.csr_matrix(X_sp)
for X in (X_ref, X_sp):
w = np.zeros(n_features)
# First check that our derivation of the grad is correct
loss, grad = _logistic_loss_and_grad(w, X, y, alpha=1.)
approx_grad = optimize.approx_fprime(
w, lambda w: _logistic_loss_and_grad(w, X, y, alpha=1.)[0], 1e-3
)
assert_array_almost_equal(grad, approx_grad, decimal=2)
# Second check that our intercept implementation is good
w = np.zeros(n_features + 1)
loss_interp, grad_interp = _logistic_loss_and_grad(
w, X, y, alpha=1.
)
assert_array_almost_equal(loss, loss_interp)
approx_grad = optimize.approx_fprime(
w, lambda w: _logistic_loss_and_grad(w, X, y, alpha=1.)[0], 1e-3
)
assert_array_almost_equal(grad_interp, approx_grad, decimal=2)
def test_logistic_loss_grad_hess():
rng = np.random.RandomState(0)
n_samples, n_features = 50, 5
X_ref = rng.randn(n_samples, n_features)
y = np.sign(X_ref.dot(5 * rng.randn(n_features)))
X_ref -= X_ref.mean()
X_ref /= X_ref.std()
X_sp = X_ref.copy()
X_sp[X_sp < .1] = 0
X_sp = sp.csr_matrix(X_sp)
for X in (X_ref, X_sp):
w = .1 * np.ones(n_features)
# First check that _logistic_loss_grad_hess is consistent
# with _logistic_loss_and_grad
loss, grad = _logistic_loss_and_grad(w, X, y, alpha=1.)
loss_2, grad_2, hess = _logistic_loss_grad_hess(w, X, y, alpha=1.)
assert_array_almost_equal(grad, grad_2)
# Now check our hessian along the second direction of the grad
vector = np.zeros_like(grad)
vector[1] = 1
hess_col = hess(vector)
# Computation of the Hessian is particularly fragile to numerical
# errors when doing simple finite differences. Here we compute the
# grad along a path in the direction of the vector and then use a
# least-square regression to estimate the slope
e = 1e-3
d_x = np.linspace(-e, e, 30)
d_grad = np.array([
_logistic_loss_and_grad(w + t * vector, X, y, alpha=1.)[1]
for t in d_x
])
d_grad -= d_grad.mean(axis=0)
approx_hess_col = linalg.lstsq(d_x[:, np.newaxis], d_grad)[0].ravel()
assert_array_almost_equal(approx_hess_col, hess_col, decimal=3)
# Second check that our intercept implementation is good
w = np.zeros(n_features + 1)
loss_interp, grad_interp = _logistic_loss_and_grad(
w, X, y, alpha=1.
)
loss_interp_2, grad_interp_2, hess = \
_logistic_loss_grad_hess(w, X, y, alpha=1.)
assert_array_almost_equal(loss_interp, loss_interp_2)
assert_array_almost_equal(grad_interp, grad_interp_2)
def test_logistic_cv():
# test for LogisticRegressionCV object
n_samples, n_features = 50, 5
rng = np.random.RandomState(0)
X_ref = rng.randn(n_samples, n_features)
y = np.sign(X_ref.dot(5 * rng.randn(n_features)))
X_ref -= X_ref.mean()
X_ref /= X_ref.std()
lr_cv = LogisticRegressionCV(Cs=[1.], fit_intercept=False,
solver='liblinear')
lr_cv.fit(X_ref, y)
lr = LogisticRegression(C=1., fit_intercept=False)
lr.fit(X_ref, y)
assert_array_almost_equal(lr.coef_, lr_cv.coef_)
assert_array_equal(lr_cv.coef_.shape, (1, n_features))
assert_array_equal(lr_cv.classes_, [-1, 1])
assert_equal(len(lr_cv.classes_), 2)
coefs_paths = np.asarray(list(lr_cv.coefs_paths_.values()))
assert_array_equal(coefs_paths.shape, (1, 3, 1, n_features))
assert_array_equal(lr_cv.Cs_.shape, (1, ))
scores = np.asarray(list(lr_cv.scores_.values()))
assert_array_equal(scores.shape, (1, 3, 1))
def test_logistic_cv_sparse():
X, y = make_classification(n_samples=50, n_features=5,
random_state=0)
X[X < 1.0] = 0.0
csr = sp.csr_matrix(X)
clf = LogisticRegressionCV(fit_intercept=True)
clf.fit(X, y)
clfs = LogisticRegressionCV(fit_intercept=True)
clfs.fit(csr, y)
assert_array_almost_equal(clfs.coef_, clf.coef_)
assert_array_almost_equal(clfs.intercept_, clf.intercept_)
assert_equal(clfs.C_, clf.C_)
def test_intercept_logistic_helper():
n_samples, n_features = 10, 5
X, y = make_classification(n_samples=n_samples, n_features=n_features,
random_state=0)
# Fit intercept case.
alpha = 1.
w = np.ones(n_features + 1)
loss_interp, grad_interp, hess_interp = _logistic_loss_grad_hess(
w, X, y, alpha)
# Do not fit intercept. This can be considered equivalent to adding
# a feature vector of ones, i.e column of one vectors.
X_ = np.hstack((X, np.ones(10)[:, np.newaxis]))
loss, grad, hess = _logistic_loss_grad_hess(w, X_, y, alpha)
# In the fit_intercept=False case, the feature vector of ones is
# penalized. This should be taken care of.
assert_almost_equal(loss_interp + 0.5 * (w[-1] ** 2), loss)
# Check gradient.
assert_array_almost_equal(grad_interp[:n_features], grad[:n_features])
assert_almost_equal(grad_interp[-1] + alpha * w[-1], grad[-1])
rng = np.random.RandomState(0)
grad = rng.rand(n_features + 1)
hess_interp = hess_interp(grad)
hess = hess(grad)
assert_array_almost_equal(hess_interp[:n_features], hess[:n_features])
assert_almost_equal(hess_interp[-1] + alpha * grad[-1], hess[-1])
def test_ovr_multinomial_iris():
# Test that OvR and multinomial are correct using the iris dataset.
train, target = iris.data, iris.target
n_samples, n_features = train.shape
# Use pre-defined fold as folds generated for different y
cv = StratifiedKFold(target, 3)
clf = LogisticRegressionCV(cv=cv)
clf.fit(train, target)
clf1 = LogisticRegressionCV(cv=cv)
target_copy = target.copy()
target_copy[target_copy == 0] = 1
clf1.fit(train, target_copy)
assert_array_almost_equal(clf.scores_[2], clf1.scores_[2])
assert_array_almost_equal(clf.intercept_[2:], clf1.intercept_)
assert_array_almost_equal(clf.coef_[2][np.newaxis, :], clf1.coef_)
# Test the shape of various attributes.
assert_equal(clf.coef_.shape, (3, n_features))
assert_array_equal(clf.classes_, [0, 1, 2])
coefs_paths = np.asarray(list(clf.coefs_paths_.values()))
assert_array_almost_equal(coefs_paths.shape, (3, 3, 10, n_features + 1))
assert_equal(clf.Cs_.shape, (10, ))
scores = np.asarray(list(clf.scores_.values()))
assert_equal(scores.shape, (3, 3, 10))
# Test that for the iris data multinomial gives a better accuracy than OvR
for solver in ['lbfgs', 'newton-cg']:
clf_multi = LogisticRegressionCV(
solver=solver, multi_class='multinomial', max_iter=15
)
clf_multi.fit(train, target)
multi_score = clf_multi.score(train, target)
ovr_score = clf.score(train, target)
assert_greater(multi_score, ovr_score)
# Test attributes of LogisticRegressionCV
assert_equal(clf.coef_.shape, clf_multi.coef_.shape)
assert_array_equal(clf_multi.classes_, [0, 1, 2])
coefs_paths = np.asarray(list(clf_multi.coefs_paths_.values()))
assert_array_almost_equal(coefs_paths.shape, (3, 3, 10,
n_features + 1))
assert_equal(clf_multi.Cs_.shape, (10, ))
scores = np.asarray(list(clf_multi.scores_.values()))
assert_equal(scores.shape, (3, 3, 10))
def test_logistic_regression_solvers():
X, y = make_classification(n_features=10, n_informative=5, random_state=0)
clf_n = LogisticRegression(solver='newton-cg', fit_intercept=False)
clf_n.fit(X, y)
clf_lbf = LogisticRegression(solver='lbfgs', fit_intercept=False)
clf_lbf.fit(X, y)
clf_lib = LogisticRegression(fit_intercept=False)
clf_lib.fit(X, y)
assert_array_almost_equal(clf_n.coef_, clf_lib.coef_, decimal=3)
assert_array_almost_equal(clf_lib.coef_, clf_lbf.coef_, decimal=3)
assert_array_almost_equal(clf_n.coef_, clf_lbf.coef_, decimal=3)
def test_logistic_regression_solvers_multiclass():
X, y = make_classification(n_samples=20, n_features=20, n_informative=10,
n_classes=3, random_state=0)
clf_n = LogisticRegression(solver='newton-cg', fit_intercept=False)
clf_n.fit(X, y)
clf_lbf = LogisticRegression(solver='lbfgs', fit_intercept=False)
clf_lbf.fit(X, y)
clf_lib = LogisticRegression(fit_intercept=False)
clf_lib.fit(X, y)
assert_array_almost_equal(clf_n.coef_, clf_lib.coef_, decimal=4)
assert_array_almost_equal(clf_lib.coef_, clf_lbf.coef_, decimal=4)
assert_array_almost_equal(clf_n.coef_, clf_lbf.coef_, decimal=4)
def test_logistic_regressioncv_class_weights():
X, y = make_classification(n_samples=20, n_features=20, n_informative=10,
n_classes=3, random_state=0)
# Test the liblinear fails when class_weight of type dict is
# provided, when it is multiclass. However it can handle
# binary problems.
clf_lib = LogisticRegressionCV(class_weight={0: 0.1, 1: 0.2},
solver='liblinear')
assert_raises(ValueError, clf_lib.fit, X, y)
y_ = y.copy()
y_[y == 2] = 1
clf_lib.fit(X, y_)
assert_array_equal(clf_lib.classes_, [0, 1])
# Test for class_weight=auto
X, y = make_classification(n_samples=20, n_features=20, n_informative=10,
random_state=0)
clf_lbf = LogisticRegressionCV(solver='lbfgs', fit_intercept=False,
class_weight='auto')
clf_lbf.fit(X, y)
clf_lib = LogisticRegressionCV(solver='liblinear', fit_intercept=False,
class_weight='auto')
clf_lib.fit(X, y)
assert_array_almost_equal(clf_lib.coef_, clf_lbf.coef_, decimal=4)
def test_logistic_regression_convergence_warnings():
# Test that warnings are raised if model does not converge
X, y = make_classification(n_samples=20, n_features=20)
clf_lib = LogisticRegression(solver='liblinear', max_iter=2, verbose=1)
assert_warns(ConvergenceWarning, clf_lib.fit, X, y)
assert_equal(clf_lib.n_iter_, 2)
def test_logistic_regression_multinomial():
# Tests for the multinomial option in logistic regression
# Some basic attributes of Logistic Regression
n_samples, n_features, n_classes = 50, 20, 3
X, y = make_classification(n_samples=n_samples,
n_features=n_features,
n_informative=10,
n_classes=n_classes, random_state=0)
clf_int = LogisticRegression(solver='lbfgs', multi_class='multinomial')
clf_int.fit(X, y)
assert_array_equal(clf_int.coef_.shape, (n_classes, n_features))
clf_wint = LogisticRegression(solver='lbfgs', multi_class='multinomial',
fit_intercept=False)
clf_wint.fit(X, y)
assert_array_equal(clf_wint.coef_.shape, (n_classes, n_features))
# Similar tests for newton-cg solver option
clf_ncg_int = LogisticRegression(solver='newton-cg',
multi_class='multinomial')
clf_ncg_int.fit(X, y)
assert_array_equal(clf_ncg_int.coef_.shape, (n_classes, n_features))
clf_ncg_wint = LogisticRegression(solver='newton-cg', fit_intercept=False,
multi_class='multinomial')
clf_ncg_wint.fit(X, y)
assert_array_equal(clf_ncg_wint.coef_.shape, (n_classes, n_features))
# Compare solutions between lbfgs and newton-cg
assert_almost_equal(clf_int.coef_, clf_ncg_int.coef_, decimal=3)
assert_almost_equal(clf_wint.coef_, clf_ncg_wint.coef_, decimal=3)
assert_almost_equal(clf_int.intercept_, clf_ncg_int.intercept_, decimal=3)
# Test that the path give almost the same results. However since in this
# case we take the average of the coefs after fitting across all the
# folds, it need not be exactly the same.
for solver in ['lbfgs', 'newton-cg']:
clf_path = LogisticRegressionCV(solver=solver,
multi_class='multinomial', Cs=[1.])
clf_path.fit(X, y)
assert_array_almost_equal(clf_path.coef_, clf_int.coef_, decimal=3)
assert_almost_equal(clf_path.intercept_, clf_int.intercept_, decimal=3)
def test_multinomial_loss_grad_hess():
rng = np.random.RandomState(0)
n_samples, n_features, n_classes = 100, 5, 3
X = rng.randn(n_samples, n_features)
w = rng.rand(n_classes, n_features)
Y = np.zeros((n_samples, n_classes))
ind = np.argmax(np.dot(X, w.T), axis=1)
Y[range(0, n_samples), ind] = 1
w = w.ravel()
sample_weights = np.ones(X.shape[0])
_, grad, hessp = _multinomial_loss_grad_hess(w, X, Y, alpha=1.,
sample_weight=sample_weights)
# extract first column of hessian matrix
vec = np.zeros(n_features * n_classes)
vec[0] = 1
hess_col = hessp(vec)
# Estimate hessian using least squares as done in
# test_logistic_loss_grad_hess
e = 1e-3
d_x = np.linspace(-e, e, 30)
d_grad = np.array([
_multinomial_loss_grad_hess(w + t * vec, X, Y, alpha=1.,
sample_weight=sample_weights)[1]
for t in d_x
])
d_grad -= d_grad.mean(axis=0)
approx_hess_col = linalg.lstsq(d_x[:, np.newaxis], d_grad)[0].ravel()
assert_array_almost_equal(hess_col, approx_hess_col)
def test_liblinear_decision_function_zero():
# Test negative prediction when decision_function values are zero.
# Liblinear predicts the positive class when decision_function values
# are zero. This is a test to verify that we do not do the same.
# See Issue: https://github.com/scikit-learn/scikit-learn/issues/3600
# and the PR https://github.com/scikit-learn/scikit-learn/pull/3623
X, y = make_classification(n_samples=5, n_features=5)
clf = LogisticRegression(fit_intercept=False)
clf.fit(X, y)
# Dummy data such that the decision function becomes zero.
X = np.zeros((5, 5))
assert_array_equal(clf.predict(X), np.zeros(5))
def test_liblinear_logregcv_sparse():
# Test LogRegCV with solver='liblinear' works for sparse matrices
X, y = make_classification(n_samples=10, n_features=5)
clf = LogisticRegressionCV(solver='liblinear')
clf.fit(sparse.csr_matrix(X), y)
def test_logreg_intercept_scaling():
# Test that the right error message is thrown when intercept_scaling <= 0
for i in [-1, 0]:
clf = LogisticRegression(intercept_scaling=i)
msg = ('Intercept scaling is %r but needs to be greater than 0.'
' To disable fitting an intercept,'
' set fit_intercept=False.' % clf.intercept_scaling)
assert_raise_message(ValueError, msg, clf.fit, X, Y1)
def test_logreg_intercept_scaling_zero():
# Test that intercept_scaling is ignored when fit_intercept is False
clf = LogisticRegression(fit_intercept=False)
clf.fit(X, Y1)
assert_equal(clf.intercept_, 0.)
|
bsd-3-clause
|
Stargrazer82301/CAAPR
|
CAAPR/CAAPR_Main.py
|
1
|
4967
|
# Import smorgasbord
import sys
import os
import gc
import time
import random
#import warnings
#warnings.filterwarnings('ignore')
import matplotlib
matplotlib.use('Agg')
import multiprocessing as mp
import CAAPR
import CAAPR.CAAPR_IO
import CAAPR.CAAPR_Pipeline
import pdb
# Define the function that runs the CAAPR pipeline
def Run(bands_table_path = '../CAAPR_Example/CAAPR_Band_Table.csv',
sources_table_path = '../CAAPR_Example/CAAPR_Source_Table.csv',
output_dir_path = os.path.join(os.getcwd(),'CAAPR_Output'),
temp_dir_path = os.path.join(os.getcwd(),'CAAPR_Temp'),
fit_apertures = True,
aperture_table_path = None,#'CAAPR_Aperture_Table.csv',
photom_table_path = None,
expansion_factor = 1.25,
polysub = True,
starsub = True,
do_photom = True,
extinction_corr = True,
parallel = True,
n_proc = mp.cpu_count()-2,
thumbnails = True,
debug = False,
verbose = True,
messy = False
):
# Create dictionary of kwarg values
kwargs_dict = {'sources_table_path':sources_table_path,
'bands_table_path':bands_table_path,
'output_dir_path':output_dir_path,
'temp_dir_path':temp_dir_path,
'fit_apertures':fit_apertures,
'aperture_table_path':aperture_table_path,
'photom_table_path':photom_table_path,
'expansion_factor':expansion_factor,
'polysub':polysub,
'starsub':starsub,
'do_photom':do_photom,
'extinction_corr':extinction_corr,
'parallel':parallel,
'n_proc':n_proc,
'thumbnails':thumbnails,
'debug':debug,
'verbose':verbose,
'messy':messy}
# Read in sources table, and convert into dictionary
sources_dict = CAAPR.CAAPR_IO.SourcesDictFromCSV(sources_table_path)
# Read in bands table, and convert into dictionary
bands_dict = CAAPR.CAAPR_IO.BandsDictFromCSV(bands_table_path)
# Prepare output directory
CAAPR.CAAPR_IO.OutputDirPrepare(kwargs_dict)
# Prepare temp directory, deleting any pre-existing directory at the specified location
CAAPR.CAAPR_IO.TempDirPrepare(kwargs_dict)
# Make inviolate copy of original band directories, to insure against over-writing when temp cutout directories are handled later
for band in bands_dict.keys():
bands_dict[band]['band_dir_inviolate'] = bands_dict[band]['band_dir']
# Record timestamp
kwargs_dict['timestamp'] = str(time.time()).replace('.','-')
# If no aperture table file provided, and aperture-fitting is requested, create and prepare CSV file to store aperture dimensions for each source
kwargs_dict = CAAPR.CAAPR_IO.ApertureTablePrepare(kwargs_dict)
# If no photometry table path provided, and photometry is requested, create and prepare CSV file to store photometry output for each source
kwargs_dict = CAAPR.CAAPR_IO.PhotomTablePrepare(kwargs_dict)
# Randomise order of source dictionary keys (to "smooth out" average system resource usage)
source_dict_keys = sources_dict.keys()
random.shuffle(source_dict_keys)
# Loop over each target source, processing in turn
time_list = [time.time()]
if verbose: print('[CAAPR] '+str(len(source_dict_keys))+' target objects to be processed.')
for source in source_dict_keys:
source_dict = sources_dict[source]
CAAPR.CAAPR_Pipeline.PipelineMain(source_dict, bands_dict, kwargs_dict)
# Estimate time until completions, and collect garbage
CAAPR.CAAPR_Pipeline.TimeEst(time_list, len(source_dict_keys), output_dir_path, source_dict, kwargs_dict)
gc.collect()
# Commence main task; generally you want to be calling CAAPR as a function, but it's useful to initiate a run this way for development and testing
if __name__ == "__main__":
# Set parameters, and run function
testing = True
parallel = False
starsub = True
fit_apertures = True
if fit_apertures==True:
aperture_table_path = None
elif fit_apertures==False:
aperture_table_path = '../DustPedia/CAAPR_Aperture_Table_Test.csv'
if testing:
Run(temp_dir_path='/home/saruman/spx7cjc/DustPedia/CAAPR_Temp',
n_proc=4,
sources_table_path='../DustPedia/CAAPR_Source_Table_Test.csv',
starsub=starsub,
fit_apertures=fit_apertures,
do_photom=False,
aperture_table_path=aperture_table_path,
parallel=parallel,
debug=False,
thumbnails=True)
# Jubilate
print('All done!')
|
mit
|
MicheleMaris/grasp_lib
|
grasp_lib.py
|
1
|
64897
|
__DESCRIPTION__="""
grasp_lib.py V 0.6 - 3 Feb 2012 - 23 Mar 2012 (0.4) - 2012 Nov 27 (0.5) - 2013 Dec 12 -
M.Maris, M.Sandri, F.Villa
From a set of routines created by M.Sandri e F.Villa
This library allows to import a grasp file in GRASP format and to convert it in an healpix map, it also
performs interpolation of the grasp map
In GRASP convention theta and phi are colatitude and longitude, with
phi meridian circle longitude, [0,180]deg
theta on meridian circle polar distance, [-180,180] deg
a point with theta < 0 denotes a point located at the same polar distance of abs(theta) but with longitude pi+180deg
The usual polar convention is
phi meridian halfcircle longitude, [-180,180]deg
theta on meridian circle polar distance, [0,180] deg
"""
def thetaUVphiUV2UV(thetaUV,phiUV,deg=True) :
"""converts thetaUV and phiUV into U=x0=sin(thetaUV)*cos(phiUV), V=y0=sin(thetaUV)*sin(phiUV)"""
from numpy import pi, sin, cos
if deg : return sin(180./pi*thetaUV)*cos(180./pi*phiUV),sin(180./pi*thetaUV)*sin(180./pi*phiUV)
return sin(thetaUV)*cos(phiUV),sin(thetaUV)*sin(phiUV)
def UV2thetaUVphiUV(U,V,deg=True) :
from numpy import pi, arctan2, arccos,arcsin,sin,cos,array,mod
f=(180./pi) if deg else 1.
phiUV=arctan2(V,U)
A=cos(phiUV)
B=sin(phiUV)
thetaUV=arcsin((A*U+B*V)/(A*A+B*B))
if deg : return f*thetaUV,mod(f*phiUV,360.)
return f*thetaUV,mod(phiUV,2.*pi)
def phitheta2longcolat(phi_grasp,theta_grasp) :
""" converts GRASP (phi,theta) coordinates into standard (long,colat)
upon request returns a structure """
import numpy as np
_long = phi_grasp*1.
colat=theta_grasp*1.
idx = np.where(colat < 0)[0]
if len(idx) > 0 :
_long[idx]=_long[idx]+180.
colat[idx]=np.abs(colat[idx])
return _long,colat
def longcolat2phitheta(_long,colat) :
"""converts ususal polar (long,colat) coordinates into GRASP (phi,theta) returns a structure"""
import numpy as np
phi=_long*1.
theta=colat*1.
idx = np.where(phi >= 180.)[0]
if len(idx) > 0 :
phi[idx]=phi[idx]-180.
theta[idx]=-theta[idx]
return phi,theta
def longcolat2rowcol(_long,colat,phi0,dphi,theta0,dtheta) :
""" converts (long,colat) into index of phi and of theta in the matrix """
import numpy as np
phi,theta=longcolat2phitheta(_long,colat)
return (pt.phi-phi0)/dphi,(pt.theta-theta0)/dtheta
def ipix2rowcol(nside,ipix,phi0,dphi,theta0,dtheta,nest=False) :
""" converts an healpix ipix (ring) into index of phi and of theta in the matrix"""
from healpy import pix2ang_ring
colat,_long=pix2ang(nside,ipix,nest=nest)
return longcolat2rowcol(_long/np.pi*180.,colat/np.pi*180.,phi0,dphi,theta0,dtheta)
def nside2ipix(nside,Reversed=False) :
""" converts nside into a list of pixels (ring)
reversed = True means the orderring is reversed
"""
import numpy as np
if not Reversed : return np.arange(12*int(nside)*int(nside))
return np.arange(12*int(nside)*int(nside)-1,-1,-1)
from grid2d import *
#class GraspMapsCube :
#"a list of grasp maps is used to integrate over a grasp map in band"
#def _class_line_cube :
#def __init__(self,listNames) :
#self.Name=[]
#for k in range(len(listNames)) :
#try :
#self.plane(open(listNames[k],'r').readlines())
#self.Name.append(listNames)
#except :
#print k," impossible to read, skipped"
#self.N=len(self.Name)
#def __len__(self) :
#return self.N
#def __getitem__(self,i) :
#import numpy as np
#l=[]
#for k in range(len(self)) :
#l.append(self.plane[k][i]))
#return l
#def __init__(self,listNames) :
#import numpy as np
#self._line=-1
#self._cube=_class_line_cube(listNames)
#def __len__(self) :
#return len(self._cube)
#def _fetch_line(self) :
#self._line=+1
#return self._cube[self._line]
def components2cocross(r1,i1,r2,i2) :
"given r1,i1,r2,i2 return Eco,Ecross"
re=[r1,r2]
im=[i1,i2]
p=[]
p.append(r1**2+i1**2)
p.append(r2**2+i2**2)
if p[0].max() > p[1].max() :
ico=0
icross=1
else :
ico=1
icross=0
Eco=re[ico]*complex(0,1.)*re[ico]
Ecross=re[icross]*complex(0,1.)*re[icross]
return Eco,Ecross
def cocross2rhclhc(Eco,Ecross) :
"given Eco,Ecross return Erhc,Elhc"
isqrt2=2.**(-0.5)
Erhc=(Eco-complex(0,1.)*Ecross)*isqrt2
Elhc=(Eco+complex(0,1.)*Ecross)*isqrt2
return Erhc,Elhc
def components2rhclhc(r1,i1,r2,i2) :
"given r1,i1,r2,i2 return Erhc,Elhc"
Eco,Ecross=components2cocross(r1,i1,r2,i2)
return cocross2rhclhc(Eco,Ecross)
def polarization_ellipse_from_fields(Erhc,Elhc) :
"given Erhc,Elhc returns rmajor, rminor, directivity, psi_pol_rad"
import numpy as np
isqrt2=2.**(-0.5)
rmajor=abs(abs(Erhc)+abs(Elhc))*isqrt2
rminor=abs(abs(Erhc)-abs(Elhc))*isqrt2
directivity=abs(Erhc)**2+abs(Elhc)**2
aa=(Erhc/Elhc)**0.5
psi_pol=np.arctan2(aa.imag,aa.real)
return rmajor,rminor,directivity,psi_pol
def components2polarization_ellipse(r1,i1,r2,i2) :
"given r1,i1,r2,i2 return rmajor, rminor, directivity, psi_pol_rad"
Erhc,Elhc=components2rhclhc(r1,i1,r2,i2)
return polarization_ellipse_from_fields(Erhc,Elhc)
class GraspMap(MapGrid) :
def __init__(self,inputfile,skiplines,CounterPhi=[1e6,1.],silent=False,useCounterPhi=False,closeColumn=False,Pickle=False,periodicColumn=False,badConversionValue=0.) :
"""badConversionValue = Value to replace samples with problems in converting strings to numbers"""
MapGrid.__init__(self)
self._init_failed = True
if Pickle :
self.load(inputfile)
self._init_failed = False
return
self.info['graps_file']=inputfile.strip()
self.info['projection']='GRASP-CUT'
self.info['ReferenceDocument']="LFI BEAMS DELIVERY: FORMAT SPECIFICATIONS\nM. Sandri\nPL-LFI-PST-TN-044, 1.0,July 2003"
self.info['numbad']=-1
if inputfile.strip() == '' :
return
self.get_cuts(inputfile,skiplines,badConversionValue=badConversionValue)#,CounterPhi=CounterPhi,silent=silent,useCounterPhi=useCounterPhi)
if periodicColumn :
self.set_col_periodic()
if closeColumn :
print 'Closing rows at right'
for k in self.M.keys() :
if k!='_row_values' and k!='_col_values' and k!='_row_index' and k!='_col_index' :
for r in range(self.R['n']) :
self.M[k][r][-1]=self.M[k][self.R['n']-1-r][0]
#if closeColumn : self.right_close_col()
def get_cuts(self,inputfile,skiplines,CounterPhi=[1e6,1.],silent=False,useCounterPhi=False,badConversionValue=0.) :
""" get_cuts
this program get the cuts file
inputfile = name of the input file
the output is a structure with an entry for each cut in phi
COUNTER_PHI v.z. HEADER_PHI
By default the field PHI in each cut is the one declared in the
header of the block of the input grasp file.
The value is also returned as HEADER_PHI in the structure
There are some cases in which the header is bad formatted and
the PHI is not reliable.
To solve this problem GET_CUTS provides an internal PHI calculator
assuming PHI increments on constant steps, the value from
the calculator is in COUNTER_PHI
The counter is tuned by using the keyword
CounterPhi = [phi0,step] (default [1d6,1d0])
so the default is simply a counter of the number of cuts.
The value 1e6. as first value is to assure the COUNTER_PHI is not
confused with an angle.
To make COUNTER_PHI as PHI it is sufficient to add the keyword
/useCounterPhi
so that the PHI will be forced to be COUNTER_PHI instead of
HEADER_PHI.
At last the tag PHI_TYPE in the structure specifies wether PHI
comes from the HEADER or from the COUNTER
In self.info['numbad'] is the number of lines which can not be properly decoded into float,
they are marked bad (1) in the flag_bad map.
badConversionValue = Value to replace samples with problems in converting strings to numbers
"""
import sys
import numpy as np
import copy
CounterPhi0=CounterPhi[0]*1.
CounterDeltaPhi=CounterPhi[1]*1.
deltaphi=1.
phi0=0.
header='header'
fileinput='fileinput'
thetai=0.
dtheta=0.
ntheta=1
phi=0.
k1=1
k2=1
k3=1
comp1r=0.
comp1i=0.
comp2r=0.
comp2i=0.
#
#********************************
#
self.clean()
self.info['hdr']={'file':inputfile}
self.info['wrong_lines']=[]
print "Reading ",inputfile
try :
h=open(inputfile,'r').readlines()
self.mapname=inputfile
self.info['inputfile']=inputfile
except :
print "File %s not found"%inputfile
return
# removes the new line
for i in range(len(h)) :
h[i] = h[i].split('\n')[0].split('\r')[0]
# skips a given number of lines
self.info['hdr']['skipped']=[]
self.info['hdr']['skiplines']=skiplines*1
if skiplines > 0 :
for line in h[0:(skiplines-1)] :
self.info['hdr']['skipped'].append(line.split('\n')[0])
h=h[skiplines:]
# skips all the lines until it reaches an header
notHeader = True
icount=-1
while notHeader :
icount+=1
ll=h[icount].split('\n')[0].strip().split()
try :
lla = np.array(ll)
notHeader = False
except :
notHeader = True
if not notHeader :
if len(ll) != 7 :
notHeader = True
icount-=1
if icount > 0 :
for k in h[0:icount] :
self.info['hdr']['skipped'].append(k)
self.info['hdr']['skiplines']+=1
h=h[icount:]
# the second line of the first block gives the number of lines per block
currentline=1
ll=h[currentline].split('\n')[0].strip().split()
try :
thetai = float(ll[0])
dtheta = float(ll[1])
ntheta = int(ll[2])
header_phi = float(ll[3])
k1 = int(ll[4])
k2 = int(ll[5])
k3 = int(ll[6])
except :
return h[currentline-1],'level 1',currentline,h
self.info['nlines']=len(h)
self.info['blocksize']=ntheta+2
self.info['nblocks'] = len(h)/(ntheta+2)
nblocks = self.info['nblocks']
self.info['thetai']=np.zeros(nblocks)
self.info['dtheta']=np.zeros(nblocks)
self.info['ntheta']=np.zeros(nblocks,dtype='int')
self.info['phi']=np.zeros(nblocks)
self.info['k1']=np.zeros(nblocks,dtype='int')
self.info['k2']=np.zeros(nblocks,dtype='int')
self.info['k3']=np.zeros(nblocks,dtype='int')
self.info['line']=np.zeros(nblocks,dtype='string')
self.info['fail']=np.zeros(nblocks,dtype='int')+1
self.info['iline']=np.zeros(nblocks,dtype='int')
if (ntheta+2)*self.info['nblocks']-len(h) != 0 :
print "Error: too much or too few lines to form the required number of blocks"
print "Nblocks : ",self.info['nblocks']
print "lines : ",len(h)
print "lines per block : ",ntheta+2
print "lines in blocks : ",(ntheta+2)*nblocks
print "residual lines : ",(ntheta+2)*nblocks-len(h)
return None
print self.info['nblocks']," x ",ntheta," elements"
# decomposes all the headers
for i_block in range(nblocks) :
ii = i_block*self.info['blocksize']
self.info['iline'][i_block]=ii*1
self.info['line'][i_block]=h[ii]+''
ll=h[ii+1].split('\n')[0].strip().split()
try :
self.info['thetai'][i_block] = float(ll[0])
self.info['dtheta'][i_block] = float(ll[1])
self.info['ntheta'][i_block] = int(ll[2])
self.info['phi'][i_block] = float(ll[3])
self.info['k1'][i_block] = int(ll[4])
self.info['k2'][i_block] = int(ll[5])
self.info['k3'][i_block] = int(ll[6])
self.info['fail'][i_block] = 0
except :
print "Fail to decode block %d, line %d\n'%s'\n"%(i_block,ii,h[ii+1])
if self.info['fail'].sum() > 0 :
print "fail to decode blocks"
return
# sets the phi along the x axis of the grid i.e. the columns
self.set_col_scale('phi','deg',self.info['phi'])
self.dphi=self.C['delta']
self.phi0=self.C['min']
# sets the theta along the y axis of the grid i.e. the rows
self.set_row_scale('theta','deg',np.arange(self.info['ntheta'][0])*self.info['dtheta'][0]+self.info['thetai'][0])
self.dtheta=self.R['delta']
self.theta0=self.R['min']
#initialize private compoenents used for debug
self.newmap('_line_index',dtype='int')
# initializes the five component matrices
self.newmap('r1')
self.newmap('i1')
self.newmap('r2')
self.newmap('i2')
self.newmap('power')
self.newmap('flag_bad',dtype='int')
#
# fill the component matrices
# each block is for a given phi, i.e. a given Column
# each value is for a given theta, i,e, a given Row
self.newmap('phi')
self.newmap('theta')
self.info['numbad']=0
for i_block in range(nblocks) :
ii = i_block*self.info['blocksize']+2
for i_raw in range(self.R['n']) :
iline = ii+i_raw
self.M['phi'][i_raw,i_block]=self.C['v'][i_block]*1
self.M['theta'][i_raw,i_block]=self.R['v'][i_raw]*1
self.M['_row_values'][i_raw,i_block]=self.R['v'][i_raw]*1
self.M['_col_values'][i_raw,i_block]=self.C['v'][i_block]*1
self.M['_row_index'][i_raw,i_block]=i_raw*1
self.M['_col_index'][i_raw,i_block]=i_block*1
self.M['_line_index'][i_raw,i_block]=iline*1
ll=h[iline].split('\n')[0].strip().split()
lla=np.zeros(4)
nbad=0
for icol in range(4) :
try :
lla[icol]=np.array(ll[icol],dtype=float)
except :
lla[icol]=badConversionValue
nbad+=1
if nbad == 0 :
self.M['flag_bad'][i_raw,i_block]=0
else :
self.info['wrong_lines'].append("skiplines %d, line %d, row %d, block %d\n%s"%(self.info['hdr']['skiplines'],iline,i_raw,i_block,h[iline]))
print "Invalid litteral in file, skiplines %d, line %d, row %d, block %d\n"%(self.info['hdr']['skiplines'],iline,i_raw,i_block)
print ">"+" ".join(ll)+"< out >",lla,'<'
self.M['flag_bad'][i_raw,i_block]=1
self.info['numbad']+=1
self.M['r1'][i_raw,i_block]=lla[0]*1
self.M['i1'][i_raw,i_block]=lla[1]*1
self.M['r2'][i_raw,i_block]=lla[2]*1
self.M['i2'][i_raw,i_block]=lla[3]*1
self.M['power']=self.M['r1']**2+self.M['i1']**2+self.M['r2']**2+self.M['i2']**2
# stores longitudes and latitudes
self.newmap('long')
self.newmap('colat')
self.M['long']=self.M['_row_values']*0
self.M['colat']=self.M['_row_values']*0
self.M['long'],self.M['colat']=phitheta2longcolat(self.M['phi'],self.M['theta'])
self._init_failed = False
def initFailed(self) :
return self._init_failed
def haveBadSamples(self) :
if self.initFailed(): return False
return self.info['numbad']>0
def formatGrasp(self) : return {'float':' %17.10e','int':' %4d'}
def recompose_header(self) :
import copy
hdr=[]
if self.info['hdr']['skiplines']>0 :
hdr=copy.deepcopy(self.info['hdr']['skipped'])
return hdr
def recompose_block_header(self,i_block) :
import numpy as np
fmtF=self.formatGrasp()['float']
fmtI=self.formatGrasp()['int']
if i_block < 0 : return
if i_block >= self.info['nblocks'] : return
ll=''
ll+=fmtF%self.info['thetai'][i_block]
ll+=fmtF%self.info['dtheta'][i_block]
ll+=fmtI%self.info['ntheta'][i_block]
ll+=fmtF%self.info['phi'][i_block]
ll+=fmtI%self.info['k1'][i_block]
ll+=fmtI%self.info['k2'][i_block]
ll+=fmtI%self.info['k3'][i_block]
return ['Planck,',ll.upper()]
def recompose_block_data(self,i_block,tab0=None) :
import numpy as np
import copy
fmtF=self.formatGrasp()['float']
fmtI=self.formatGrasp()['int']
if i_block < 0 : return
if i_block >= self.info['nblocks'] : return
tab=[]
if type(tab0)==type([]) : tab=copy.deepcopy(tab0)
if type(tab0)==type('') : tab=[tab0]
for i_raw in range(self.R['n']) :
ll=''
ll+=fmtF%self.M['r1'][i_raw,i_block]
ll+=fmtF%self.M['i1'][i_raw,i_block]
ll+=fmtF%self.M['r2'][i_raw,i_block]
ll+=fmtF%self.M['i2'][i_raw,i_block]
tab.append(ll.upper())
return tab
def recompose_block(self,i_block,tab0=None,fmt='%18.10e') :
if i_block < 0 : return
if i_block >= self.info['nblocks'] : return
tab=self.recompose_block_header(i_block)
return self.recompose_block_data(i_block,tab0=tab)
def recompose_map(self,tab0=None) :
tab=[]
for i_block in range(self.info['nblocks']) :
for l in self.recompose_block(i_block) :
tab.append(l)
return tab
def FourColumnsPower(self,power1Name='p1',power2Name='p2',powerName='power') :
"a FourColumns map has r1=sqrt(p1), i1=0, r2=sqrt(p2), i2=0"
new=self.copy()
new.info['ktype']=1
if self.M.has_key(power1Name) and self.M.has_key(power1Name) :
new.info['ncomp']=2
new.M['r1']=self[power1Name]**0.5
new.M['r2']=self[power2Name]**0.5
new.M['i1']=self[power1Name]*0
new.M['i2']=self[power1Name]*0
elif self.M.has_key(power) :
new.info['ncomp']=1
new.M['r1']=self[power]**0.5
new.M['r2']=self[power]*0
new.M['i1']=self[power]*0
new.M['i2']=self[power]*0
else :
print "the map shall contain ",power1Name,power2Name," or ",powerName
return
return new
def grasp2longcolat(self,phi_grasp,theta_grasp) :
""" converts GRASP (phi,theta) coordinates into standard (long,colat)
upon request returns a structure """
import numpy as np
_long = phi_grasp*1.
colat=theta_grasp*1.
idx = np.where(colat < 0)[0]
if len(idx) > 0 :
_long[idx]=_long[idx]+180.
colat[idx]=np.abs(colat[idx])
return _long,colat
def longcolat2grasp(self,_long,colat) :
"""converts ususal polar (long,colat) coordinates into GRASP (phi,theta) returns a structure"""
import numpy as np
phi=_long*1.
theta=colat*1.
idx = np.where(phi >= 180.)[0]
if len(idx) > 0 :
phi[idx]=phi[idx]-180.
theta[idx]=-theta[idx]
return phi,theta
def longcolat2rowcol(self,_long,colat) :
""" converts (long,colat) into index of phi and of theta in the matrix """
import numpy as np
phi,theta=self.longcolat2grasp(_long,colat)
return (theta-self.theta0)/self.dtheta,(phi-self.phi0)/self.dphi
def ipix2longcolat(self,nside,ipix,nest=False,deg=True) :
""" converts an healpix ipix (ring) into index of phi and of theta in the matrix"""
from healpy import pix2ang
import numpy as np
colat,_long=pix2ang(nside,ipix,nest=nest)
if deg : return _long*180./np.pi,colat*180./np.pi
return _long,colat
def ipix2rowcol(self,nside,ipix,nest=False,deg=False) :
""" converts an healpix ipix (ring) into index of phi and of theta in the matrix"""
from healpy import pix2ang
import numpy as np
colat,_long=pix2ang(nside,ipix,nest=nest)
if deg : return self.longcolat2rowcol(_long,colat)
return self.longcolat2rowcol(_long/np.pi*180.,colat/np.pi*180.)
def nside2ipix(self,nside,Reversed=False) :
""" converts nside into a list of pixels (ring)
reversed = True means the orderring is reversed
"""
return nside2ipix(nside,Reversed=Reversed)
def parseColatRange(self,colatrange) :
prs=(colatrange.strip()).split(',')
left = [prs[0][0],float(prs[0][1:])]
right = [prs[1][-1],float(prs[1][0:-1])]
return left,right
def healpix(self,nside,mapname='power',nest=False,Reversed=False,rot=[0.,0.],colatrange=None) :
"""converts to healpix or a stack of healpix maps of given nside
colatrange=None , takes all the map
colatrange=']a,b['
colatrange='[a,b['
colatrange=']a,b]'
for gridmap
"""
import numpy as np
import healpy as H
if colatrange==None :
ipix=self.nside2ipix(nside,Reversed=Reversed)
if rot[0]==0. and rot[1]==0 :
_long,colat = self.ipix2longcolat(nside,ipix)
phi,theta=self.longcolat2grasp(_long,colat)
r1=self.bilinearXY(mapname,phi,theta)
return r1
else :
fact=180./np.pi
prs=(colatrange.strip()).split(',')
left = [prs[0][0],float(prs[0][1:])]
right = [prs[1][-1],float(prs[1][0:-1])]
NPEQ=12*nside/2
print left,right
ipixmin=H.ang2pix(nside,left[1]/fact,0)-NPEQ
ipixmax=H.ang2pix(nside,right[1]/fact,0)+NPEQ
if ipixmin < 0 : ipixmin=0
if ipixmax > 12*nside**2-1 : ipixmax=12*nside**2
ipix = np.arange(ipixmin,ipixmax)
colat,Long = H.pix2ang(nside,ipix)
fl=np.ones(len(colat))
if left[1] == ']' :
fl*=(left[1]/fact)<colat
else :
fl*=(left[1]/fact)<=colat
if right[1] == '[' :
fl*=colat<(right[1]/fact)
else :
fl*=colat<=(right[1]/fact)
idx=np.where(fl)[0]
ipix=ipix[idx]
colat=colat[idx]*fact
Long=Long[idx]*fact
fl=None
idx=None
phi,theta=self.longcolat2grasp(Long,colat)
r1=self.bilinearXY(mapname,phi,theta)
return r1,ipix
def polarplot(self,mapname,long_step=2,colat_step=10,log=None,cm=None,grayBad="#707070",adAxes=True,area=[12.,1.],cmap='hsv',vmin=-30,vmax=-0.1) :
import numpy as np
from matplotlib import colors as Colors
import pylab
from matplotlib import pyplot as plt
from matplotlib import cm
try :
_cm=cm.__dict__[cmap]
except :
print "required cmap ",cmap," no found, replaced with 'hsv'"
print "allowed values "
print cm.__dict__.keys()
if adAxes : ax = plt.subplot(111, polar=True)
y = self.M[mapname]*1
if log == 'e' or log == 'ln' :
y=np.log(y)
elif log == '10' or log == 'log10' :
y=np.log(y)/np.log(10)
elif log == '2' :
y=np.log(y)/np.log(2)
else :
try :
b=np.float(log)
except :
b=None
if b!= None :
y=np.log(y)/np.log(b)
shape2=y.shape
shape1=shape2[0]*shape2[1]
idxLong=np.arange(0,shape2[1],colat_step)
idxColat=np.arange(0,shape2[0],long_step)
for i in idxColat :
tt=np.pi/180*self['long'][i][idxLong]
cc=self['colat'][i][idxLong]
aa=(area[0]-area[1])*(1-np.cos(np.pi/180.*cc))/2.+area[1]
print i,cc.min(),cc.max(),aa.min(),aa.max()
try :
c=plt.scatter(tt,cc, c=y[i][idxLong], s=aa, cmap=_cm,edgecolor='none',vmin=vmin,vmax=vmax)
except :
pass
plt.axis([0,360,0,180])
plt.title(self.mapname)
def mercatore(self,reversed=False) :
"""converts a CUT map to a MERCATOR MAP"""
import numpy as np
import copy
M=MercatoreMap()
shape=self.shape
halfrow=(self.R['n']-1)/2
newrow=(self.R['n']-1)/2+1
newcol=self.C['n']*2
for k in self.M.keys() :
M.M[k]=[]
if reversed :
for drow in np.arange(newrow-1,-1,-1) :
M.M[k].append(np.concatenate((self.M[k][halfrow+drow],self.M[k][halfrow-drow])))
else :
for drow in np.arange(0,newrow) :
M.M[k].append(np.concatenate((self.M[k][halfrow+drow],self.M[k][halfrow-drow])))
M.M[k]=np.array(M.M[k])
M.shape=[M.M['phi'].shape[0],M.M['phi'].shape[1]]
M.M['long']=copy.deepcopy(M.M['phi'])
M.M['colat']=copy.deepcopy(M.M['theta'])
# long is phi where theta > 0 and phi+180 where theta < 0
M.M['long']+=180*(M.M['colat']<0)
# but for theta=0 the algorithm fails, so this is a patch
idx2=np.where(np.sign(M.M['theta']).ptp(axis=1)==2)[0][0]
for k in np.where(np.sign(M.M['theta']).ptp(axis=1)==0)[0] :
M.M['long'][k]=M.M['long'][idx2]
# colat is abs(theta)
M.M['colat']=abs(M.M['colat'])
M.C=copy.deepcopy(self.C)
M.R['name']='long'
M.C['v']=M.M['long'][0]*1
M.C['n']=len(M.C['v'])
M.C['min']=M.C['v'].min()
M.C['max']=M.C['v'].max()
M.R=copy.deepcopy(self.R)
M.R['name']='colat'
M.R['v']=M.M['colat'][:,0]*1
M.R['n']=len(M.R['v'])
M.R['min']=M.R['v'].min()
M.R['max']=M.R['v'].max()
M.info['projection']='GRASP-CUT,Mercatore'
return M
class MercatoreMap(MapGrid) :
"""A Mercator map: a map with x=column=longitude, y=row=colatitude"""
def __init__(self,*args) :
MapGrid.__init__(self)
self.info['projection']='GRASP-Mercatore'
def right_close_col(self,period=False,right_col_value=None) :
MapGrid.right_close_col(self,period=period,right_col_value=None)
if right_col_value != None :
try :
self.C['v'][-1]=float(right_col_value)
except :
pass
self.C['max'] = self.C['v'].max()
try :
self.M['long'][:,-1]=float(right_col_value)
except :
pass
#def resample(self,Long,Colat) :
#new=self.copy()
def unitPixelArea(self) :
import numpy as np
return np.deg2rad(self.R['delta'])*np.deg2rad(self.C['delta'])
def radialIntegral(self,arg,returnJustIntegral=False,thetaRange=None,asStruct=False) :
import numpy as np
sinColat=np.sin(np.deg2rad(self.M['colat']))
if type(arg) == type('') :
try :
field=self[arg]*sinColat*self.unitPixelArea()
except :
return None
else :
try :
field=arg*sinColat*self.unitPixelArea()
except :
return None
dIdtheta=np.zeros(self.R['n'])
for itheta in range(len(dIdtheta)) : dIdtheta[itheta]=0.5*(field[itheta][1:]+field[itheta][0:-1]).sum()
midH=0.5*(dIdtheta[1:]+dIdtheta[0:-1])
if returnJustIntegral and thetaRange==None: return np.sort(midH).sum()
Itheta=np.zeros(self.R['n'])
Itheta[0]=dIdtheta[0]*1
for itheta in range(1,len(Itheta)) : Itheta[itheta]=np.sort(midH[0:itheta+1]).sum()
if asStruct : return {'colat':self.M['colat'].mean(axis=1),'dIdcolat':dIdtheta,'Icolat':Itheta,'method':'spherical,trapezoidal'}
return self.M['colat'].mean(axis=1),dIdtheta,Itheta,'spherical,trapezoidal'
class DirectionalMapMoments_Base_Old:
def __init__(self,method,maxorder) :
"""computs the directional moments on a map using healpix integration"""
import numpy as np
self.method=method
self.TreatNaNasZero=True
self.TreatInfAsZero=True
self.TreatOutBoundsAsZero=True
if Bounds == None :
self.Bounds =np.array([-np.inf,np.inf])
else :
self.Bounds =Bounds
if type(Map) == type('') :
self.load(Map)
return
self.exclusionRadius = 180.
self.mu=None
self.nside=None
self.npix=None
self.pixelArea = None
self.n=None
self.maxorder=maxorder
self.Sum=np.zeros([self.maxorder,self.maxorder,self.maxorder])
for xp in range(maxorder) :
for yp in range(maxorder) :
for zp in range(maxorder) :
self.Sum[xp,yp,zp]=(m*x**xp*y**yp*z**zp).sum()
def __getitem__(self,xp,yp,zp) :
return self.Sum[xp,yp,zp]
def calc_integral(self) :
Pi4=4*np.pi
Integral = self.Sum*self.pixelArea
norm=Integral[0,0,0]
Sx=Integral[1,0,0]/norm
Sy=Integral[0,1,0]/norm
Sz=Integral[0,0,1]/norm
line=',%e,%e,%e,%e'%(Sx,Sy,Sz,norm)
return Sx,Sy,Sz,norm,line
def save(self,pickle_file) :
import pickle
if type(pickle_file)==type('') :
self.filename=pickle_file
try :
pickle.dump(self.__dict__,open(pickle_file,'w'))
except :
return False
else :
try :
pickle.dump(self.__dict__,pickle_file)
except :
return False
return True
def load(self,pickle_file) :
import pickle
if type(pickle_file)==type('') :
self.filename=pickle_file
try :
self.__dict__=pickle.load(open(pickle_file,'r'))
except :
return False
else :
try :
self.__dict__=pickle.load(pickle_file)
except :
return False
return True
class DirectionalMapMoments_GRD(DirectionalMapMoments_Base_Old) :
def __init__(self,GrdMap,exclusionRadius=None,reverse=False,excludeOut=False,NormalizedByBeam=False,asDict =True,Nside=None,ipixVec=None,maxorder=3,TreatNaNasZero=True,TreatInfAsZero=True,TreatOutBoundsAsZero=True,Bounds=None) :
"""computes the directional moments on a GRD map """
import numpy as np
DirectionalMapMoments_Base.__init__(self,'grd',maxorder)
class DirectionalMapMoments_CUT_Mercatore(DirectionalMapMoments_Base_Old) :
def __init__(self,GrdMap,exclusionRadius=None,reverse=False,excludeOut=False,NormalizedByBeam=False,asDict =True,Nside=None,ipixVec=None,maxorder=3,TreatNaNasZero=True,TreatInfAsZero=True,TreatOutBoundsAsZero=True,Bounds=None) :
"""computes the directional moments on a CUT map managed as a Mercatore map"""
import numpy as np
DirectionalMapMoments_Base.__init__(self,'cut-mercatore',maxorder)
class DirectionalMapMoments_Healpix(DirectionalMapMoments_Base_Old) :
def __init__(self,Map,exclusionRadius=None,reverse=False,excludeOut=False,NormalizedByBeam=False,asDict =True,Nside=None,ipixVec=None,maxorder=3,TreatNaNasZero=True,TreatInfAsZero=True,TreatOutBoundsAsZero=True,Bounds=None) :
"""computes the directional moments on a map using healpix integration"""
import numpy as np
import healpy as H
DirectionalMapMoments_Base.__init__(self,'healpix',maxorder)
self.TreatNaNasZero=TreatNaNasZero
self.TreatInfAsZero=TreatNaNasZero
self.TreatOutBoundsAsZero=TreatOutBoundsAsZero
if Bounds == None :
self.Bounds =np.array([-np.inf,np.inf])
else :
self.Bounds =Bounds
if type(Map) == type('') :
self.load(Map)
return
if exclusionRadius==None :
self.exclusionRadius = 180. if excludeOut else 0.
else :
self.exclusionRadius = exclusionRadius
self.mu=np.cos(self.exclusionRadius/180.*np.pi)
self.nside=int(np.sqrt(len(Map)/12.)) if Nside == None else int(Nside)
self.npix=int(12.*self.nside**2)
self.pixelArea = 4.*np.pi/float(12.*self.nside**2)
if ipixVec == None :
v=np.array(H.pix2vec(self.nside,nside2ipix(self.nside)))
idxAllowed=np.where(v[2]<=self.mu)
else :
v=np.array(H.pix2vec(self.nside,ipixVec))
idxAllowed=ipixVec
m=Map[idxAllowed]
x=v[0][idxAllowed]
y=v[1][idxAllowed]
z=v[2][idxAllowed]
if self.TreatOutBoundsAsZero :
idx = np.where((m<self.Bounds[0])*(self.Bounds[1]<m))[0]
if len(idx) > 0 : m[idx]=0.
if self.TreatNaNasZero :
idx = np.where(np.isnan(m))[0]
if len(idx) > 0 : m[idx]=0.
if self.TreatInfAsZero :
idx = np.where(1-np.isfinite(m))[0]
if len(idx) > 0 : m[idx]=0.
self.n=len(m)
self.maxorder=maxorder
self.Sum=np.zeros([self.maxorder,self.maxorder,self.maxorder])
for xp in range(maxorder) :
for yp in range(maxorder) :
for zp in range(maxorder) :
self.Sum[xp,yp,zp]=(m*x**xp*y**yp*z**zp).sum()
def calc_integral(self) :
Pi4=4*np.pi
Integral = iBSM.Sum*iBSM.pixelArea
norm=Integral[0,0,0]
Sx=Integral[1,0,0]/norm
Sy=Integral[0,1,0]/norm
Sz=Integral[0,0,1]/norm
line=',%e,%e,%e,%e'%(Sx,Sy,Sz,norm)
return Sx,Sy,Sz,norm,line
#from numpy import nan
#def cuts2matrix(self,No180Pad=No180Pad)
#;
#; converts a structure from get_cuts in a set of matrices, the output is a structure
#; theta = theta values converted from GRASP convention to usual polar convention
#; phi = phi values from GRASP convention to usual polar convention
#; c1r, c1i, c2r, c2i = components 1 and 2 real (r) and imaginary (i) parts
#; GRASP = a structure containing the original grasp theta and phi
#;
#; NOTE = usually GRASP does not generates cuts for PHI=180deg since it is
#; nothing else than PHI=0deg, by default CUTS2MATRIX add a PHI=180deg
#; cut. To exclude this set the /No180Pad keyword
#;
#;
#if not keyword_set(No180Pad) then Pad180 = 1 else Pad180 = 0
#nphi=n_tags(cuts)
#m1=dblarr(nphi+Pad180,cuts.(0).ntheta)
#theta=m1
#phi=m1
#long=m1
#colat=m1
#c1r=m1
#c1i=m1
#c2r=m1
#c2i=m1
#vtheta=cuts.(0).theta
#vphi=dblarr(nphi+Pad180)
#vlong=dblarr(nphi+Pad180)
#for iphi=0,nphi-1 do begin
#theta[iphi,*]=cuts.(iphi).theta
#phi[iphi,*]=cuts.(iphi).phi
#c1r[iphi,*]=cuts.(iphi).c1r
#c1i[iphi,*]=cuts.(iphi).c1i
#c2r[iphi,*]=cuts.(iphi).c2r
#c2i[iphi,*]=cuts.(iphi).c2i
#vphi[iphi]=cuts.(iphi).phi
#endfor
#if Pad180 ne 0 then begin
#; performs padding of 180 deg
#theta[nphi,*]=-reverse(cuts.(0).theta)
#phi[nphi,*]=cuts.(0).phi+180.
#c1r[nphi,*]=reverse(cuts.(0).c1r)
#c1i[nphi,*]=reverse(cuts.(0).c1i)
#c2r[nphi,*]=reverse(cuts.(0).c2r)
#c2i[nphi,*]=reverse(cuts.(0).c2i)
#vphi[nphi]=180.
#endif
#return,create_struct( $
#'theta',theta $
#,'phi',phi $
#,'c1r',c1r $
#,'c1i',c1i $
#,'c2r',c2r $
#,'c2i',c2i $
#,'GRASP',create_struct('theta',vtheta,'phi',vphi) $
#,'nphi',nphi $
#,'Pad180',Pad180 $
#,'phi0',vphi[0] $
#,'Delta_phi',vphi[1]-vphi[0] $
#,'theta0',vtheta[0] $
#,'Delta_theta',vtheta[1]-vtheta[0] $
#)
#end
#function cuts_grid,cuts
#; derives gridding parameters for a structure produced by get_cuts
#phi0=cuts.(0).phi
#theta0=cuts.(0).theta[0]
#dphi = cuts.(1).phi-cuts.(0).phi
#dtheta = cuts.(0).theta[1]-cuts.(0).theta[0]
#xmax = -1e6
#for kk = 0,n_tags(cuts)-1 do begin
#_xmax=max(abs(cuts.(0).theta))
#if _xmax gt xmax then xmax = _xmax
#endfor
#return,create_struct('phi0',phi0,'theta0',theta0,'dphi',dphi,'dtheta',dtheta,'thetamax',xmax)
#end
#function componentsTOstoke,component1_real,component1_imaginary,component2_real,component2_imaginary,direct=direct
#;
#; converts component maps to stokes
#;
#E1 = component1_real^2+component1_imaginary^2
#E2 = component2_real^2+component2_imaginary^2
#SI = E1+E2
#SQ = E1-E2
#E1 = sqrt(E1)
#E2 = sqrt(E2)
#F1 = ATAN(component1_imaginary,component1_real)
#F2 = ATAN(component2_imaginary,component2_real)
#SU = 2*E1*E2*COS(F2 - F1)
#SV = 2*E1*E2*SIN(F2 - F1)
#return,create_struct('I',SI,'Q',SQ,'U',SU,'V',SV,'F1',F1,'F2',F2)
#end
#function cuts2cartesian,cuts,side=side,stokes=stokes
#; converts a cut in a matrix using cartesian polar coordinates
#if not keyword_set(side) then side = 600
#phi0=cuts.(0).phi
#theta0=cuts.(0).theta[0]
#dphi = cuts.(1).phi-cuts.(0).phi
#dtheta = cuts.(0).theta[1]-cuts.(0).theta[0]
#npix=side+1
#xmin=-max(abs(cuts.(0).theta))
#xmax = -1e6
#for kk = 0,n_tags(cuts)-1 do begin
#_xmax=max(abs(cuts.(0).theta))
#if _xmax gt xmax then xmax = _xmax
#endfor
#ix0=(npix-1)/2
#iy0=(npix-1)/2
#xmap=dblarr(npix,npix)
#ymap=dblarr(npix,npix)
#for r = 0,npix-1 do xmap[r,*]=(double(indgen(npix))/double(npix-1)-0.5)*xmax*2
#for c = 0,npix-1 do ymap[*,c]=(double(indgen(npix))/double(npix-1)-0.5)*xmax*2
#colatmap=sqrt(xmap^2+ymap^2)
#longmap=atan(ymap,xmap)/!dpi*180.
#idx = where(longmap lt 0,count)
#if count gt 0 then longmap[idx]=360+longmap[idx]
#pt=longcolat2phitheta(longmap,colatmap)
#rc=longcolat2rowcol(longmap,colatmap,dphi=dphi,dtheta=dtheta,phi0=phi0,theta0=theta0)
#slm=cuts2matrix(cuts)
#c1r=map_interpolate(rc.iphi,rc.itheta,slm.c1r)
#c1i=map_interpolate(rc.iphi,rc.itheta,slm.c1i)
#c2r=map_interpolate(rc.iphi,rc.itheta,slm.c2r)
#c2i=map_interpolate(rc.iphi,rc.itheta,slm.c2i)
#out=create_struct('x',xmap,'y',ymap,'ix0',ix0,'iy0',iy0,'colat',colatmap,'long',longmap,'phi',pt.phi,'theta',pt.theta,'iphi',rc.iphi,'itheta',rc.itheta)
#if keyword_set(stokes) then begin
#return,create_struct(out,'stokes',componentsTOstoke(c1r,c1i,c2r,c2i))
#endif
#return,create_struct(out,'power',c1r^2+c1i^2+c2r^2+c2i^2)
#end
#function cuts2healpix,nside,cuts,reversed=reversed,ipix=ipix,onlyPower=onlyPower,dbi=dbi,stokes=stokes
#; convert a cuts into an healpix max by bilinear interpolation
#; if /onlyPower returns just the power otherwise returns
#; c1r = component 1 real part
#; c1i = component 1 imaginary part
#; c2r = component 2 real part
#; c2i = component 2 imaginary part
#; power
#; if /dbi power is converted in 10*alog10(power) (if /stokes this is not done)
#; if /stokes return stokes parameters and F1 and F2 instead of components c1, c2
#;
#if not keyword_set(reversed) then ipix=nside2ipix(nside) else ipix=nside2ipix(nside,/reversed)
#slm = cuts2matrix(cuts)
#rc=ipix2rowcol(nside,ipix,slm.phi0,slm.delta_phi,slm.theta0,slm.delta_theta)
#r1=map_interpolate(rc.iphi,rc.itheta,slm.c1r)
#i1=map_interpolate(rc.iphi,rc.itheta,slm.c1i)
#r2=map_interpolate(rc.iphi,rc.itheta,slm.c2r)
#I2=map_interpolate(rc.iphi,rc.itheta,slm.c2i)
#if keyword_set(stokes) then return,componentsTOstoke(r1,i1,r2,i2)
#power = r1^2+i1^2+r2^2+i2^2
#if keyword_set(dbi) then power=10d0*alog10(power)
#if keyword_set(onlyPower) then return,power
#return,create_struct('c1r',r1,'c2r',r2,'c1i',i1,'c2i',i2,'power',power)
#end
#function beamSumMap,map,exclusionRadius=exclusionRadius,reverse=reverse,v=v,excludeOut=excludeOut,notNormalizedByBeam=notNormalizedByBeam,asStruct=asStruct
#; computs the directional moments on a map
#if not keyword_set(excludeOut) then excludeOut = 0
#if not keyword_set(exclusionRadius) then $
#if excludeOut then exclusionRadius=[180d0] else exclusionRadius=[0d0]
#npix=n_elements(map)
#nside=long(sqrt(npix/12.))
#pix2vec_ring,nside,indgen(npix,/long),v
#z = v[*,2]
#for i = 0,2 do v[*,i]=v[*,i]*map
#sss = dblarr(10,n_elements(exclusionRadius))
#sss[9,*]=4d0*!dpi/double(npix)
#sss[5,*]=npix
#sss[6,*]=nside
#for ir=0,n_elements(exclusionRadius)-1 do begin
#xr=exclusionRadius[ir]
#sss[7,ir]=xr
#mu=cos(xr/180d0*!dpi)
#imin = 0
#imax = npix-1l
#count=-1
#if excludeOut then $
#if xr eq 180. then imax=npix-1 else imax = min(where(mu ge z,count)) $
#else $
#if xr eq 0. then imin=0 else imin = min(where(mu ge z,count))
#print,ir,xr,excludeOut,ir,count,imin,imax
#sss[8,ir]=count
#sss[4,ir]=total(map[imin:imax])
#for ic = 0,2 do sss[ic,ir] = total(v[imin:imax,ic])
#if not keyword_set(notNormalizedByBeam) then for ic = 0,2 do sss[ic,ir] = sss[ic,ir]/sss[4,ir]
#sss[3,ir]=sqrt(sss[0,ir]^2+sss[1,ir]^2+sss[2,ir]^2)
#for ic = 0,2 do sss[ic,ir]=sss[ic,ir]/sss[3,ir]
#endfor
#if not keyword_set(asStruct) then return,sss
#; computes the polar deflection
#polar_deflection=dblarr(n_elements(exclusionRadius))
#longitude_deflection=dblarr(n_elements(exclusionRadius))
#for ir=0,n_elements(exclusionRadius)-1 do begin
#polar_deflection=acos(sss[2,ir])*180d0/!dpi
#normxy = sqrt(total(sss[0:1,ir]^2))
#longitude_deflection=atan(sss[1,ir]/normxy,sss[0,ir]/normxy)*180d0/!dpi
#endfor
#return,create_struct($
#'vSx',sss[0,*] $
#,'vSy',sss[1,*] $
#,'vSz',sss[2,*] $
#,'S',sss[3,*] $
#,'beam_sum',sss[4,*] $
#,'npix',long(sss[5,*]) $
#,'nside',long(sss[6,*]) $
#,'exclusionRadius',sss[7,*] $
#,'excludedPixels',long(sss[8,*]) $
#,'pixelArea',sss[9,*] $
#,'deflection_polar_deg',polar_deflection $
#,'deflection_longitude_deg',longitude_deflection $
#)
#end
#function beamSums,nside,cuts,exclude_angle=exclude_angle,map=map,asStruct=asStruct
#; computes Sx, Sy. Sz (directional integrals) for a beam map
#map=cuts2healpix(nside,cuts,ipix=ipix,/onlyPower)
#pix2vec_ring,nside,ipix,v
#if keyword_set(exclude_angle) then begin
#print,exclude_angle
#ang = 180./dpi*acos(v[*,2])
#idx = where(ang > exclude_angle,count)
#if count gt 0 then begin
#v1=dblarr(n_elements(idx),3)
#for i=0,2 do v1[*,i]=v[idx,i]
#v=v1
#endif else begin
#print,"Error all pixels removed"
#return,0
#endelse
#endif
#sss = dblarr(7)
#sss[6]=nside
#sss[5]=12d0*double(nside)^2
#sss[4]=total(map)
#; returns the versors
#for i=0,2 do sss[i] = (total(v[*,i]*map))/sss[4]
#; normalize
#sss[3]=sqrt(total(sss[0:2]^2))
#for i=0,2 do sss[i] = sss[i]/sss[3]
#if not keyword_set(asStruct) then return,sss
#; computes the polar deflection
#polar_deflection_deg=acos(sss[2]/sss[3])*180d0/!dpi
#normxy = sqrt(total(sss[0:1]^2))
#longitude_deflection_deg=atan(sss[1]/normxy,sss[0]/normxy)*180d0/!dpi
#return,create_struct( $
#'vSx',sss[0] $
#,'vSy',sss[1] $
#,'vSz',sss[2] $
#,'S',sss[3] $
#,'beam_sum',sss[4] $
#,'npix',long(sss[5]) $
#,'nside',long(sss[6]) $
#,'deflection_polar_deg',polar_deflection_deg $
#,'deflection_longitude_deg',longitude_deflection_deg $
#)
#end
#function beamSumS2,lcuts,hcuts,nside=nside,map=map,returnFirst=returnFirst,returnSecond=returnSecond
#; computes Sx, Sy. Sz for a beam using two maps,
#; lcuts = a lowress map of cuts
#; hcuts = an highres map of cuts
#; /returnFirst returns just the high resolution map (no summation)
#; /returnSecond returns just the second map (no summation)
#;
#; high resolution integral
#hpa=cuts_grid(hcuts)
#if not keyword_set(nside) then nside = 1024l
#map=dblarr(12l*nside*nside)
#radius = -1e6
#for kk = 0,n_tags(hcuts)-1 do begin
#_xmax=max(abs(hcuts.(kk).theta))
#if _xmax gt radius then radius = _xmax
#endfor
#query_disc,nside,[0.,0.,1.],radius,ipix,/deg,/inclusive
#slm = cuts2matrix(hcuts)
#rc=ipix2rowcol(nside,ipix,hpa.phi0,hpa.dphi,hpa.theta0,hpa.dtheta)
#;dphi=hpa.dphi,dtheta=hpa.dtheta,phi0=hpa.phi0,theta0=hpa.theta0)
#r1=map_interpolate(rc.iphi,rc.itheta,slm.c1r)
#i1=map_interpolate(rc.iphi,rc.itheta,slm.c1i)
#r2=map_interpolate(rc.iphi,rc.itheta,slm.c2r)
#I2=map_interpolate(rc.iphi,rc.itheta,slm.c2i)
#map[ipix] = r1^2+r2^2+i1^2+i2^2
#if keyword_set(returnFirst) then return,map
#query_disc,nside,[0.,0.,-1.],180.-radius,ipix,/deg
#;ipix=nside2ipix(nside)
#slm = cuts2matrix(lcuts)
#lpa=cuts_grid(lcuts)
#rc=ipix2rowcol(nside,ipix,lpa.phi0,lpa.dphi,lpa.theta0,lpa.dtheta)
#r1=map_interpolate(rc.iphi,rc.itheta,slm.c1r)
#i1=map_interpolate(rc.iphi,rc.itheta,slm.c1i)
#r2=map_interpolate(rc.iphi,rc.itheta,slm.c2r)
#I2=map_interpolate(rc.iphi,rc.itheta,slm.c2i)
#map[ipix] = r1^2+r2^2+i1^2+i2^2
#if keyword_set(returnSecond) then return,map
#return,beamSumMap(map,/reverse,/asStruct)
#end
#function radialDependence,mapname,listRadiiDeg=listRadiiDeg
#if not keyword_set(listRadiiDeg) then listRadiiDeg=[0.1,0.5,1.,1.5,2.,2.5,5.,7.5,10.,20.,30.,40.,50,60,70,80.,85.,90,100,110,120,130,140,150,160,170,180]
#read_fits_map,mapname,mapX
#xxx = beamSumMap(mapX,exclusionRadius=listRadiiDeg,/excludeOut)
#sss=xxx
#for ic=0,2 do sss[ic,*]=sss[ic,*]/sss[4,*]
#sss[3,*]=sqrt(sss[0,*]^2+sss[1,*]^2+sss[2,*]^2)
#for ic=0,2 do sss[ic,*]=sss[ic,*]/sss[3,*]
#radius=sss[7,*]
#dump=sss[3,*]
#polar_deflection=acos(sss[2,*])/!dpi*180.*60.
#longitudinal_deflection=atan(sss[1,*],sss[0,*])/!dpi*180.
#return,create_struct('name',mapname,'long_def',longitudinal_deflection,'pol_def',polar_deflection,'dump',dump,'radius',radius)
#end
#function readgrd, fileinput
#; reads a grd file
#; (deprecated)
#xs = 0.d
#xs = 0.d
#ye = 0.d
#ye = 0.d
#str='!5 '
#ktype = 1l ; --> data type format
#nset = 1l ; --> number of beams in the file
#icomp = 1l ; --> field component
#ncomp = 1l ; --> number of components
#igrid = 1l ; --> type of field grid
#ix = 1l ; --> center of the beam
#iy = 1l ; (ix,iy)
#c1 = 0.d
#c2 = 0.d
#c3 = 0.d
#c4 = 0.d
#nx = 1l
#ny = 1l
#klimit = 1l
#openr,1,fileinput
#for line=0,100 do begin
#if (strtrim(str,2) ne '++++') then begin
#readf,1,str
#print,str
#endif else begin
#goto, jump1
#endelse
#endfor
#jump1: readf,1,ktype
#readf,1,nset,icomp,ncomp,igrid
#readf,1,ix,iy
#readf,1,xs,ys,xe,ye
#readf,1,nx,ny,klimit
#dx = (xe - xs)/(nx-1)
#x = findgen(nx)*dx + xs
#dy = (ye - ys)/(ny-1)
#y = findgen(ny)*dy + ys
#print,'Reading ', fileinput
#print,'grid of ', nx,' x ', ny,' points'
#c1r = dblarr(nx,ny)
#c1i = dblarr(nx,ny)
#c2r = dblarr(nx,ny)
#c2i = dblarr(nx,ny)
#for i=0,nx-1 do begin
#for j=0,ny-1 do begin
#readf,1,c1,c2,c3,c4
#c1r(j,i) = c1
#c1i(j,i) = c2
#c2r(j,i) = c3
#c2i(j,i) = c4
#endfor
#endfor
#close,1
#power = c1r^2 + c1i^2 + c2r^2 + c2i^2
#res = { x : x , $
#y : y , $
#power : power $
#}
#return, res
#end
def readgrd(inputfile) :
"""
; reads a grd file
; (deprecated)
Reference document:
LFI BEAMS DELIVERY: FORMAT SPECIFICATIONS
M. Sandri
PL-LFI-PST-TN-044, 1.0,July 2003
"""
#xs = 0.d
#xs = 0.d
#ye = 0.d
#ye = 0.d
#str='!5 '
#ktype = 1l ; --> data type format
#nset = 1l ; --> number of beams in the file
#icomp = 1l ; --> field component
#ncomp = 1l ; --> number of components
#igrid = 1l ; --> type of field grid
#ix = 1l ; --> center of the beam
#iy = 1l ; (ix,iy)
#c1 = 0.d
#c2 = 0.d
#c3 = 0.d
#c4 = 0.d
#nx = 1l
#ny = 1l
#klimit = 1l
try :
h=open(inputfile,'r').readlines()
except :
print "File %s not found"%inputfile
return
# removes the new line
for i in range(len(h)) :
h[i] = h[i].split('\n')[0].split('\r')[0]
currentline=0
while(h[currentline] != '++++') :
currentline+=1
if currentline == len(h):
print "Error marker ++++ not found"
return h
infos=h[0:currentline]
currentline +=1
print h[currentline]
ktype = int(h[currentline])
currentline +=1
print h[currentline]
ll = h[currentline].split()
nset = int(ll[0])
icomp = int(ll[1])
ncomp = int(ll[2])
igrid = int(ll[3])
currentline +=1
print h[currentline]
ll = h[currentline].split()
ix = int(ll[0])
iy = int(ll[1])
currentline +=1
print h[currentline]
ll = h[currentline].split()
xs = float(ll[0])
ys = float(ll[1])
xe = float(ll[2])
ye = float(ll[3])
currentline +=1
print h[currentline]
ll = h[currentline].split()
nx = int(ll[0])
ny = int(ll[1])
klimit = int(ll[2])
dx = (xe - xs)/float(nx-1)
xcen=ix*dx
x = np.arange(nx)*dx + xs+xcen
dy = (ye - ys)/float(ny-1)
ycen=iy*dy
y = np.arange(ny)*dy + ys+ycen
print 'Reading ', inputfile
print 'grid of ', nx,' x ', ny,' points'
print 'ix ', ix,' iy ', iy
c1r = np.zeros([ny,nx])
c1i = np.zeros([ny,nx])
c2r = np.zeros([ny,nx])
c2i = np.zeros([ny,nx])
for j in range(ny) :
for i in range(nx) :
currentline +=1
ll = h[currentline].split()
c1r[j,i] = float(ll[0])
c1i[j,i] = float(ll[1])
c2r[j,i] = float(ll[2])
c2i[j,i] = float(ll[3])
return {'x':x,'y':y,'ri':c1r,'r2':c2r,'i1':c1i,'i2':c2i,'power':c1r**2 + c1i**2 + c2r**2 + c2i**2,'infos':infos}
class GridMap(MapGrid) :
def __init__(self,inputfile,skiplines=0,silent=False,closeColumn=False,Pickle=False,nodata=False,addPolar=True,addUV=True,addP1P2=True) :
MapGrid.__init__(self)
if Pickle :
self.load(inputfile)
return
self.info['grd_file']=inputfile.strip()
self.info['projection']='GRASP-GRD'
self.info['ReferenceDocument']="LFI BEAMS DELIVERY: FORMAT SPECIFICATIONS\nM. Sandri\nPL-LFI-PST-TN-044, 1.0,July 2003"
if inputfile.strip() == '' : return
self.get_grd(inputfile,nodata=nodata)#,CounterPhi=CounterPhi,silent=silent,useCounterPhi=useCounterPhi)
if closeColumn :
self.right_close_col()
for k in self.M.keys() :
if k!='_row_values' and k!='_col_values' and k!='_row_index' and k!='_col_index' :
for r in range(self.R['n']) :
self.M[k][r][-1]=self.M[k][self.R['n']-1-r][0]
if addPolar :
self.M['colat']=np.rad2deg(np.arcsin((self.M['_col_values']**2+self.M['_row_values']**2)**0.5))
self.M['long']=np.mod(np.rad2deg(np.arctan2(self.M['_row_values'],self.M['_col_values'])),360.)
if addUV :
self.newmap(self.R['name'],unit='',value=self['_row_values'])
self.newmap(self.C['name'],unit='',value=self['_col_values'])
if addP1P2 :
self.newmap('p1',unit='',value=self['r1']**2+self['i1']**2)
self.newmap('p2',unit='',value=self['r2']**2+self['i2']**2)
def end_of_header_marker(self) :
"""returns the string used as a marker of end of header"""
return '++++'
def formatGrasp(self) : return {'float':' %17.10e','int':' %11d'}
def get_grd(self,inputfile,nodata=False) :
"""
reads a grd file
Reference document:
LFI BEAMS DELIVERY: FORMAT SPECIFICATIONS
M. Sandri
PL-LFI-PST-TN-044, 1.0,July 2003
Beware: the faster moving index is the column,
in IDL readgrd inverts rows with columns to take in
columns reading columns as they would be the slowest index.
"""
import sys
import numpy as np
import copy
try :
h=open(inputfile,'r').readlines()
self.mapname=inputfile
self.info['inputfile']=inputfile
except :
print "File %s not found"%inputfile
return
# removes the new line and other special characters
for i in range(len(h)) :
h[i] = h[i].split('\n')[0].split('\r')[0].strip()
currentline=0
while(h[currentline].strip() != self.end_of_header_marker()) :
currentline+=1
if currentline == len(h):
print "Error marker %s not found" % self.end_of_header_marker()
return h
self.info['header']=copy.deepcopy(h[0:currentline])
currentline +=1
self.info['ktype']=int(h[currentline])
currentline +=1
print h[currentline].strip()
ll = h[currentline].split()
self.info['nset']=int(ll[0])
self.info['icomp']= int(ll[1])
self.info['ncomp']= int(ll[2])
self.info['igrid'] = int(ll[3])
currentline +=1
print h[currentline].strip()
ll = h[currentline].split()
self.info['ix'] = int(ll[0])
self.info['iy'] = int(ll[1])
currentline +=1
print h[currentline].strip()
ll = h[currentline].split()
self.info['xs'] = float(ll[0])
self.info['ys'] = float(ll[1])
self.info['xe'] = float(ll[2])
self.info['ye'] = float(ll[3])
currentline +=1
print h[currentline].strip()
ll = h[currentline].split()
self.info['nx'] = int(ll[0])
self.info['ny'] = int(ll[1])
self.info['klimit'] = int(ll[2])
#computed parameters
# X are columns
self.info['dx'] = (self.info['xe']-self.info['xs'])/float(self.info['nx']-1)
self.info['xcen'] = self.info['dx']*self.info['ix']
self.set_col_scale('U','uv',np.arange(self.info['nx'])*self.info['dx']+ self.info['xs']+self.info['xcen'])
# Y are rows
self.info['dy'] = (self.info['ye']-self.info['ys'])/float(self.info['ny']-1)
self.info['ycen'] = self.info['dy']*self.info['iy']
self.info['grd_file']=inputfile.strip()
self.set_row_scale('V','uv',np.arange(self.info['ny'])*self.info['dy']+ self.info['ys']+self.info['ycen'])
print 'Reading ', inputfile
print 'grid of ', self.info['nx'],' x ', self.info['ny'],' points'
if nodata :
return
# maps used for debug
self.newmap('_line_index','')
# compoenent maps
self.newmap('r1','')
self.newmap('i1','')
self.newmap('r2','')
self.newmap('i2','')
self.newmap('power','')
#self.newmap('rho1','')
#self.newmap('rho2','')
#self.newmap('phi1','')
#self.newmap('phi2','')
for r in range(self.R['n']) :
for c in range(self.C['n']) :
currentline +=1
self.M['_row_values'][r,c]=self.R['v'][r]*1.
self.M['_row_index'][r,c]=r*1.
self.M['_col_values'][r,c]=self.C['v'][c]*1.
self.M['_col_index'][r,c]=c*1.
self.M['_line_index'][r,c]=currentline*1.
ll = h[currentline].split()
self.M['r1'][r,c]=float(ll[0])
self.M['i1'][r,c]=float(ll[1])
self.M['r2'][r,c]=float(ll[2])
self.M['i2'][r,c]=float(ll[3])
self.M['power'][r,c]=float(ll[0])**2+float(ll[1])**2+float(ll[2])**2+float(ll[3])**2
#self.M['rho1'][r,c]=(float(ll[0])**2+float(ll[1])**2)**0.5
#self.M['rho2'][r,c]=(float(ll[2])**2+float(ll[3])**2)**0.5
#self.M['phi1'][r,c]=np.arctan2(float(ll[1]),float(ll[0]))
#self.M['phi2'][r,c]=np.arctan2(float(ll[3]),float(ll[2]))
def UV(self,Vectors=False) :
"""returns the U, V matrix
if Vectors=True the values of R and C are returned
"""
if Vectors :
return self.C['v'],self.R['v']
return self.M['_col_values'],self.M['_row_values']
def thetaUVphiUV(self,deg=True) :
"""returns the thetaUV, phiUV matrix
"""
return UV2thetaUVphiUV(self.M['_col_values'],self.M['_row_values'],deg=deg)
def cart3d(self) :
"""returns the x,y,z matrices
"""
import numpy as np
theta,phi=UV2thetaUVphiUV(self.M['_col_values'],self.M['_row_values'],deg=False)
return np.cos(phi)*np.sin(theta),np.sin(phi)*np.sin(theta),np.cos(theta)
def recompose_header(self,*arg,**karg) :
"keywords: inhdr header in input"
import copy
fmtF=self.formatGrasp()['float']
fmtI=self.formatGrasp()['int']
hdr=None
if karg.has_key('inhdr') :
hdr=copy.deepcopy(karg['inhdr'])
if hdr == None :
hdr=copy.deepcopy(self.info['header'])
if len(arg)>0 :
if type(arg[0]) == type('') :
hdr.append(arg[0])
else :
for k in arg[0] :
hdr.append(k)
hdr.append('In the lines after the header marker defined by 4 "+" characters')
hdr.append('line 1 : ktype')
hdr.append('line 2 : nset icomp ncomp igrid')
hdr.append('line 3 : ix iy')
hdr.append('line 4 : xs ys xe ye')
hdr.append('line 5 : nx ny klimit')
hdr.append(self.end_of_header_marker())
ll=''
ll+=fmtI%int(fmtI%int(self.info['ktype']))
hdr.append(ll.upper())
ll=''
ll+=fmtI%int(self.info['nset'])
ll+=fmtI%int(self.info['icomp'])
ll+=fmtI%int(self.info['ncomp'])
ll+=fmtI%int(self.info['igrid'])
hdr.append(ll.upper())
ll=''
ll+=fmtI%int(self.info['ix'])
ll+=fmtI%int(self.info['iy'])
hdr.append(ll.upper())
ll=''
ll+=fmtF%float(self.info['xs'])
ll+=fmtF%float(self.info['ys'])
ll+=fmtF%float(self.info['xe'])
ll+=fmtF%float(self.info['ye'])
hdr.append(ll.upper())
ll=''
ll+=fmtI%int(self.info['nx'])
ll+=fmtI%int(self.info['ny'])
ll+=fmtI%int(self.info['klimit'])
hdr.append(ll.upper())
return hdr
def recompose_map(self,*arg,**karg) :
import copy
fmtF=self.formatGrasp()['float']
fmtI=self.formatGrasp()['int']
if len(arg) > 0 :
lst=copy.deepcopy(arg[0])
else :
lst=[]
for r in range(self.R['n']) :
for c in range(self.C['n']) :
ll = fmtF%(self.M['r1'][r,c])
ll += fmtF%(self.M['i1'][r,c])
ll += fmtF%(self.M['r2'][r,c])
ll += fmtF%(self.M['i2'][r,c])
lst.append(ll.upper())
return lst
def FourColumnsPower(self,power1Name='p1',power2Name='p2',powerName='power') :
"a FourColumns map has r1=sqrt(p1), i1=0, r2=sqrt(p2), i2=0"
new=self.copy()
new.info['ktype']=1
if self.M.has_key(power1Name) and self.M.has_key(power1Name) :
new.info['ncomp']=2
new.M['r1']=self[power1Name]**0.5
new.M['r2']=self[power2Name]**0.5
new.M['i1']=self[power1Name]*0
new.M['i2']=self[power1Name]*0
elif self.M.has_key(power) :
new.info['ncomp']=1
new.M['r1']=self[power]**0.5
new.M['r2']=self[power]*0
new.M['i1']=self[power]*0
new.M['i2']=self[power]*0
else :
print "the map shall contain ",power1Name,power2Name," or ",powerName
return
return new
def ipix2longcolat(self,nside,ipix,nest=False,deg=True) :
""" converts an healpix ipix (ring) into index of phi and of theta in the matrix"""
from healpy import pix2ang
import numpy as np
colat,_long=pix2ang(nside,ipix,nest=nest)
if deg : return _long*180./np.pi,colat*180./np.pi
return _long,colat
def nside2ipix(self,nside,Reversed=False) :
""" converts nside into a list of pixels (ring)
reversed = True means the orderring is reversed
"""
return nside2ipix(nside,Reversed=Reversed)
def healpix(self,nside,mapname='power',nest=False,Reversed=False,colatrange=None,returnAll=False,usePeriodicalInterpolator=True) :
"""converts to healpix or a stack of healpix maps of given nside
colatrange=None , takes all the map
colatrange=']a,b['
colatrange='[a,b['
colatrange=']a,b]'
"""
import numpy as np
import healpy as H
if colatrange==None :
ipix=self.nside2ipix(nside,Reversed=Reversed)
phiUV,thetaUV = self.ipix2longcolat(nside,ipix,deg=False)
else :
fact=180./np.pi
prs=(colatrange.strip()).split(',')
left = [prs[0][0],float(prs[0][1:])]
right = [prs[1][-1],float(prs[1][0:-1])]
NPEQ=12*nside/2
print left,right
ipixmin=H.ang2pix(nside,left[1]/fact,0)-NPEQ
ipixmax=H.ang2pix(nside,right[1]/fact,0)+NPEQ
if ipixmin < 0 : ipixmin=0
if ipixmax > 12*nside**2-1 : ipixmax=12*nside**2
ipix = np.arange(ipixmin,ipixmax)
colat,Long = H.pix2ang(nside,ipix)
fl=np.ones(len(colat))
if left[1] == ']' :
fl*=(left[1]/fact)<colat
else :
fl*=(left[1]/fact)<=colat
if right[1] == '[' :
fl*=colat<(right[1]/fact)
else :
fl*=colat<=(right[1]/fact)
idx=np.where(fl)[0]
ipix=ipix[idx]
thetaUV=colat[idx]
phiUV=Long[idx]
fl=None
idx=None
colat=None
Long=None
U,V = thetaUVphiUV2UV(thetaUV,phiUV,deg=False)
r1=self.bilinearXY(mapname,U,V)
if returnAll : return r1,ipix,U,V,thetaUV,phiUV
return r1,ipix
def healpix_pixelArea(self,nside) :
import numpy as np
return 4*pi/(12.*65536.**2)
def healpix_integral(self,nside,mapname,Reversed=False,colatrange=None) :
import numpy as np
h=self.healpix(nside,mapname=mapname,nest=False,Reversed=Reversed,colatrange=colatrange,returnAll=False)
pixelaArea=4*np.pi/(12.*65536.**2)
return sort(h[0]).sum()*pixelaArea
def maximumRadius(self) :
"returns the largest possible radius for an inscribed circle"
a=self.C['v'].ptp()/2.
b=self.R['v'].ptp()/2.
return a if a < b else b
def circularMask(self,*arg) :
"returns a mask for the largest possible circle inscribed in the map"
mask = np.array(self.radius()<=self.maximumRadius(),dtype='int')
if len(arg) == 0 : return mask
try :
self.M[arg]*=mask
except :
print arg," not a valid name"
def unitPixelArea(self) :
import numpy as np
return self.R['delta']*self.C['delta']
def radialIntegral(self,arg,method='planar,simpson',returnJustIntegral=False,asStruct=False,nRadii=51) :
"""
returns a radial integral from 0 up to the maximum possible radius divided in nRadii steps
integration method given by "method" keyword, default 'raster,planar,trapezoidal'
raster,planar,trapezoidal :
raster over the rows of the grd map forcing to zero any sample outside the wanted ring
planar,trapezoidal :
uses direct trapezoidal integration
planar,simpson :
uses direct simpson integration
some test shows the difference between planar,simpson and planar,trapezoidal is order of magnitudes larger
than the difference between planar,trapezoidal and raster,planar,trapezoidal
so default is planar,simpson
"""
import numpy as np
if returnJustIntegral :
mm=self.circularMask()*self[arg] if type(arg) == type('') else self.circularMask()*arg
return self.simpson2d(mm) if 'planar,simpson' else self.trapz2d(mm)
oradius=np.arange(nRadii)/float(nRadii-1)*self.maximumRadius()
oradius[nRadii-1]=self.maximumRadius()
_r=self.radius()
mm=self[arg] if type(arg) == type('') else arg
Itheta=np.zeros(len(oradius))
#
if method=='raster,planar,trapezoidal' :
for j in range(len(oradius)) :
u=mm*1
u[np.where(_r>oradius[j])]=0.
acc=np.zeros(u.shape[0])
for r in range(u.shape[0]) :
x=u[r,1:]
acc[r]=((x[1:]+x[0:-1])/2.).sum()
Itheta[j]=(acc[1:]+acc[0:-1]).sum()/2.
Itheta*=self.unitPixelArea()
#
elif method=='planar,trapezoidal' :
for j in range(len(oradius)) :
Itheta[j]=self.trapz2d(mm,outerCut=oradius[j])
#
elif method=='planar,simpson' :
for j in range(len(oradius)) :
Itheta[j]=self.simpson2d(mm,outerCut=oradius[j])
#
else :
print "Unknown integration method %s"%method
return None
#
if asStruct : return {'colat':oradius.mean(axis=1),'dIdcolat':Itheta[1:]-Itheta[0:-1],'Icolat':Itheta,'method':method}
return oradius,Itheta[1:]-Itheta[0:-1],Itheta,method
if __name__=='__main__' :
import numpy as np
print "load"
h={}
h['30']=GraspMap('mb/FB_LFI27_SWE_X_FM_1-0.cut',12)
h['27']=GraspMap('outband/FB_LFI27_SWE_X_FM_1-0_27.cut',0)
h['33']=GraspMap('outband/FB_LFI27_SWE_X_FM_1-0_33.cut',0)
lambda0=1/30.
lambda1=1/27.
WL=np.array([1/27.,1/30.])
deltaWL=WL[1]-WL[0]
print "parameters"
PARS={}
for k in ['r1','i1','r2','i2'] :
A=h['27'].M[k]*1
B=h['30'].M[k]*1
A.shape=A.shape[0]*A.shape[1]
B.shape=B.shape[0]*B.shape[1]
# PARS[k]=np.polyfit(WL,np.array([A,B]),1)
PARS[k]=np.array([(B-A)/deltaWL,A])
C=h['33'].M['power']*1
C.shape=C.shape[0]*C.shape[1]
print "lambda interpolate"
ipt = {}
for nu in [27,28,29,30,31,32,33] :
ipt[str(nu)] ={}
for k in ['r1','i1','r2','i2'] :
#ipt[str(nu)][k]=np.polyval(PARS[k],1/float(nu))
ipt[str(nu)][k]=PARS[k][0]*(1/float(nu)-WL[0])+PARS[k][1]
ipt[str(nu)]['power']=ipt[str(nu)]['r1']**2+ipt[str(nu)]['i1']**2+ipt[str(nu)]['r2']**2+ipt[str(nu)]['i2']**2
print nu,1/float(nu),10*np.log10(ipt[str(nu)]['power'].max())
print 10*np.log10(C.max())
R=h['27'].M['power']*1 ; R.shape=R.shape[0]*R.shape[1]
G=h['30'].M['power']*1 ; G.shape=G.shape[0]*G.shape[1]
B=h['33'].M['power']*1 ; B.shape=B.shape[0]*B.shape[1]
I=np.arange(len(R))*3
sys.exit()
from matplotlib import pyplot as plt
plt.close('all')
plt.figure()
plt.plot(I-3./(33.-27.),10*np.log10(R),'r.')
plt.plot(I+0./(33.-27.),10*np.log10(G),'g.')
plt.plot(I+3./(33.-27.),10*np.log10(B),'b.')
plt.show()
|
gpl-2.0
|
elijah513/scikit-learn
|
examples/decomposition/plot_faces_decomposition.py
|
204
|
4452
|
"""
============================
Faces dataset decompositions
============================
This example applies to :ref:`olivetti_faces` different unsupervised
matrix decomposition (dimension reduction) methods from the module
:py:mod:`sklearn.decomposition` (see the documentation chapter
:ref:`decompositions`) .
"""
print(__doc__)
# Authors: Vlad Niculae, Alexandre Gramfort
# License: BSD 3 clause
import logging
from time import time
from numpy.random import RandomState
import matplotlib.pyplot as plt
from sklearn.datasets import fetch_olivetti_faces
from sklearn.cluster import MiniBatchKMeans
from sklearn import decomposition
# Display progress logs on stdout
logging.basicConfig(level=logging.INFO,
format='%(asctime)s %(levelname)s %(message)s')
n_row, n_col = 2, 3
n_components = n_row * n_col
image_shape = (64, 64)
rng = RandomState(0)
###############################################################################
# Load faces data
dataset = fetch_olivetti_faces(shuffle=True, random_state=rng)
faces = dataset.data
n_samples, n_features = faces.shape
# global centering
faces_centered = faces - faces.mean(axis=0)
# local centering
faces_centered -= faces_centered.mean(axis=1).reshape(n_samples, -1)
print("Dataset consists of %d faces" % n_samples)
###############################################################################
def plot_gallery(title, images, n_col=n_col, n_row=n_row):
plt.figure(figsize=(2. * n_col, 2.26 * n_row))
plt.suptitle(title, size=16)
for i, comp in enumerate(images):
plt.subplot(n_row, n_col, i + 1)
vmax = max(comp.max(), -comp.min())
plt.imshow(comp.reshape(image_shape), cmap=plt.cm.gray,
interpolation='nearest',
vmin=-vmax, vmax=vmax)
plt.xticks(())
plt.yticks(())
plt.subplots_adjust(0.01, 0.05, 0.99, 0.93, 0.04, 0.)
###############################################################################
# List of the different estimators, whether to center and transpose the
# problem, and whether the transformer uses the clustering API.
estimators = [
('Eigenfaces - RandomizedPCA',
decomposition.RandomizedPCA(n_components=n_components, whiten=True),
True),
('Non-negative components - NMF',
decomposition.NMF(n_components=n_components, init='nndsvda', beta=5.0,
tol=5e-3, sparseness='components'),
False),
('Independent components - FastICA',
decomposition.FastICA(n_components=n_components, whiten=True),
True),
('Sparse comp. - MiniBatchSparsePCA',
decomposition.MiniBatchSparsePCA(n_components=n_components, alpha=0.8,
n_iter=100, batch_size=3,
random_state=rng),
True),
('MiniBatchDictionaryLearning',
decomposition.MiniBatchDictionaryLearning(n_components=15, alpha=0.1,
n_iter=50, batch_size=3,
random_state=rng),
True),
('Cluster centers - MiniBatchKMeans',
MiniBatchKMeans(n_clusters=n_components, tol=1e-3, batch_size=20,
max_iter=50, random_state=rng),
True),
('Factor Analysis components - FA',
decomposition.FactorAnalysis(n_components=n_components, max_iter=2),
True),
]
###############################################################################
# Plot a sample of the input data
plot_gallery("First centered Olivetti faces", faces_centered[:n_components])
###############################################################################
# Do the estimation and plot it
for name, estimator, center in estimators:
print("Extracting the top %d %s..." % (n_components, name))
t0 = time()
data = faces
if center:
data = faces_centered
estimator.fit(data)
train_time = (time() - t0)
print("done in %0.3fs" % train_time)
if hasattr(estimator, 'cluster_centers_'):
components_ = estimator.cluster_centers_
else:
components_ = estimator.components_
if hasattr(estimator, 'noise_variance_'):
plot_gallery("Pixelwise variance",
estimator.noise_variance_.reshape(1, -1), n_col=1,
n_row=1)
plot_gallery('%s - Train time %.1fs' % (name, train_time),
components_[:n_components])
plt.show()
|
bsd-3-clause
|
maminian/skewtools
|
scripts/animate_particles_2d_labframe_v2.py
|
1
|
1976
|
from numpy import *
from matplotlib import pyplot
import scripts.skewtools as st
import sys
X,Y,t,Pe = st.importDatasets(sys.argv[1],'X','Y','Time','Peclet')
figscale = 5.
fig,ax = pyplot.subplots(2,1,figsize=(4*figscale,1.5*figscale))
orig0 = ax[0].get_position()
orig1 = ax[1].get_position()
ax[0].set_position([orig0.x0,orig0.y0-0.2,orig0.width,orig0.height+0.2])
ax[1].set_position([orig1.x0,orig1.y0,orig1.width,orig1.height-0.2])
np = shape(X)[0]
uwall = 2./3.
xmax = X.max()
nabsorbed = zeros(shape(t))
for i in range(len(t)):
#for i in [12]:
ax[0].cla()
# ax.hold(True)
ax[0].plot([0,xmax],[1,1],linewidth=0.5,color='k')
ax[0].plot([0,xmax],[-1,-1],linewidth=0.5,color='k')
subset1 = ((Y[:,i]<1.)*(Y[:,i] > -1.))
subset2 = ~subset1
subset2a = (Y[:,i]>1.)
subset2b = (Y[:,i]<-1.)
nbinsx = 401
nbinsy = 101
vmaxval = np/nbinsx
# ax[0].scatter(X[subset2,i],Y[subset2,i],facecolor=[1,0,0],edgecolor=[0,0,0,0],s=1,alpha=0.1)
ax[0].hist2d(X[subset2a,i],Y[subset2a,i],cmap=pyplot.cm.Reds,bins=[linspace(0,xmax,nbinsx),linspace(1,1.05,2)],vmin=0,vmax=vmaxval)
ax[0].hist2d(X[subset2b,i],Y[subset2b,i],cmap=pyplot.cm.Reds,bins=[linspace(0,xmax,nbinsx),linspace(-1.05,-1,2)],vmin=0,vmax=np/nbinsx)
ax[0].scatter(X[subset1,i],Y[subset1,i],facecolor=[1,0,0,0.2],edgecolor='none',s=1)
# ax[0].hist2d(X[subset1,i],Y[subset1,i],cmap=pyplot.cm.inferno,bins=[linspace(0,xmax,nbinsx),linspace(-1,1,nbinsy)])
# ax.hold(False)
ax[0].set_xlim([0.,xmax])
ax[0].set_ylim([-1.05,1.05])
ax[1].cla()
ax[1].hist(X[subset2,i],bins=linspace(0.,xmax,nbinsx),facecolor=[0,0,0,0],edgecolor='k')
ax[1].set_xlim([0.,xmax])
ax[1].set_ylim([0.,4*np/nbinsx])
pyplot.savefig('particleframe_'+str(i).zfill(4)+'.png',dpi=80,bbox_inches='tight')
print '%i active particles, %i of %i frames'%(sum(subset1),i,len(t)-1)
# end for
#pyplot.tight_layout()
|
gpl-3.0
|
eramirem/astroML
|
book_figures/chapter10/fig_FFT_aliasing.py
|
3
|
5126
|
"""
The effect of Sampling
----------------------
Figure 10.3
A visualization of aliasing in the Fourier transform. In each set of four
panels, the top-left panel shows a signal and a regular sampling function, the
top-right panel shows the Fourier transform of the signal and sampling
function, the bottom-left panel shows the sampled data, and the bottom-right
panel shows the convolution of the Fourier-space representations (cf. figure
10.2). In the top four panels, the data is well sampled, and there is little
to no aliasing. In the bottom panels, the data is not well sampled (the spacing
between two data points is larger) which leads to aliasing, as seen in the
overlap of the convolved Fourier transforms (figure adapted from Greg05).
"""
# Author: Jake VanderPlas
# License: BSD
# The figure produced by this code is published in the textbook
# "Statistics, Data Mining, and Machine Learning in Astronomy" (2013)
# For more information, see http://astroML.github.com
# To report a bug or issue, use the following forum:
# https://groups.google.com/forum/#!forum/astroml-general
import numpy as np
from matplotlib import pyplot as plt
#----------------------------------------------------------------------
# This function adjusts matplotlib settings for a uniform feel in the textbook.
# Note that with usetex=True, fonts are rendered with LaTeX. This may
# result in an error if LaTeX is not installed on your system. In that case,
# you can set usetex to False.
from astroML.plotting import setup_text_plots
setup_text_plots(fontsize=8, usetex=True)
def gaussian(x, a=1.0):
return np.exp(-0.5 * (x / a) ** 2)
def gaussian_FT(f, a=1.0):
return np.sqrt(2 * np.pi * a ** 2) * np.exp(-2 * (np.pi * a * f) ** 2)
#------------------------------------------------------------
# Define our terms
a = 1.0
t = np.linspace(-5, 5, 1000)
h = gaussian(t, a)
f = np.linspace(-2, 2, 1000)
H = gaussian_FT(f, a)
#------------------------------------------------------------
# Two plots: one well-sampled, one over-sampled
N = 12
for dt in (0.9, 1.5):
# define time-space sampling
t_sample = dt * (np.arange(N) - N / 2)
h_sample = gaussian(t_sample, a)
# Fourier transform of time-space sampling
df = 1. / dt
f_sample = df * (np.arange(N) - N / 2)
# Plot the results
fig = plt.figure(figsize=(5, 2.5))
fig.subplots_adjust(left=0.07, right=0.95, wspace=0.16,
bottom=0.1, top=0.85, hspace=0.05)
# First plot: sampled time-series
ax = fig.add_subplot(221)
ax.plot(t, h, '-k')
for ts in t_sample:
ax.annotate('', (ts, 0.5), (ts, 0), ha='center', va='center',
arrowprops=dict(arrowstyle='->'))
ax.text(0.03, 0.95,
("Signal and Sampling Window\n" +
r"Sampling Rate $\Delta t$"),
ha='left', va='top', transform=ax.transAxes)
ax.set_ylabel('$h(t)$')
ax.set_xlim(-5, 5)
ax.set_ylim(0, 1.4)
ax.xaxis.set_major_formatter(plt.NullFormatter())
ax.yaxis.set_major_formatter(plt.NullFormatter())
ax.set_title('Time Domain: Multiplication')
# second plot: frequency space
ax = fig.add_subplot(222)
ax.plot(f, H, '-k')
for fs in f_sample:
ax.annotate('', (fs, 1.5), (fs, 0), ha='center', va='center',
arrowprops=dict(arrowstyle='->'))
ax.text(0.03, 0.95,
("FT of Signal and Sampling Window\n" +
r"$\Delta f = 1 / \Delta t$"),
ha='left', va='top', transform=ax.transAxes)
ax.set_ylabel('$H(f)$')
ax.set_xlim(-1.5, 1.5)
ax.set_ylim(0, 3.8)
ax.xaxis.set_major_formatter(plt.NullFormatter())
ax.yaxis.set_major_formatter(plt.NullFormatter())
ax.set_title('Frequency Domain: Convolution')
# third plot: windowed function
ax = fig.add_subplot(223)
for (ts, hs) in zip(t_sample, h_sample):
if hs < 0.1:
continue
ax.annotate('', (ts, hs), (ts, 0), ha='center', va='center',
arrowprops=dict(arrowstyle='->'))
ax.plot(t, h, ':k')
ax.text(0.03, 0.95, "Sampled signal: pointwise\nmultiplication",
ha='left', va='top', transform=ax.transAxes)
ax.set_xlabel('$t$')
ax.set_ylabel('$h(t)$')
ax.set_xlim(-5, 5)
ax.set_ylim(0, 1.4)
ax.xaxis.set_major_formatter(plt.NullFormatter())
ax.yaxis.set_major_formatter(plt.NullFormatter())
# fourth plot: convolved PSD
ax = fig.add_subplot(224)
window = np.array([gaussian_FT(f - fs, a) for fs in f_sample])
ax.plot(f, window.sum(0), '-k')
if dt > 1:
ax.plot(f, window.T, ':k')
ax.text(0.03, 0.95, "Convolution of signal FT\nand window FT",
ha='left', va='top', transform=ax.transAxes)
ax.set_xlabel('$f$')
ax.set_ylabel('$H(f)$')
ax.set_xlim(-1.5, 1.5)
ax.set_ylim(0, 3.8)
ax.xaxis.set_major_formatter(plt.NullFormatter())
ax.yaxis.set_major_formatter(plt.NullFormatter())
if dt > 1:
fig.suptitle(r"Undersampled data: $\Delta t > t_c$")
else:
fig.suptitle(r"Well-sampled data: $\Delta t < t_c$")
plt.show()
|
bsd-2-clause
|
amandalund/openmc
|
tests/regression_tests/mgxs_library_no_nuclides/test.py
|
6
|
2709
|
import hashlib
import openmc
import openmc.mgxs
from openmc.examples import pwr_pin_cell
from tests.testing_harness import PyAPITestHarness
class MGXSTestHarness(PyAPITestHarness):
def __init__(self, *args, **kwargs):
# Generate inputs using parent class routine
super().__init__(*args, **kwargs)
# Initialize a two-group structure
energy_groups = openmc.mgxs.EnergyGroups(group_edges=[0, 0.625, 20.e6])
# Initialize MGXS Library for a few cross section types
self.mgxs_lib = openmc.mgxs.Library(self._model.geometry)
self.mgxs_lib.by_nuclide = False
# Test relevant MGXS types
relevant_MGXS_TYPES = [item for item in openmc.mgxs.MGXS_TYPES
if item != 'current']
# Add in a subset of openmc.mgxs.ARBITRARY_VECTOR_TYPES and
# openmc.mgxs.ARBITRARY_MATRIX_TYPES so we can see the code works,
# but not use too much resources
relevant_MGXS_TYPES += [
"(n,elastic)", "(n,level)", "(n,2n)", "(n,na)", "(n,nc)",
"(n,gamma)", "(n,a)", "(n,Xa)", "heating", "damage-energy",
"(n,n1)", "(n,a0)", "(n,nc) matrix", "(n,n1) matrix",
"(n,2n) matrix"]
self.mgxs_lib.mgxs_types = tuple(relevant_MGXS_TYPES) + \
openmc.mgxs.MDGXS_TYPES
self.mgxs_lib.energy_groups = energy_groups
self.mgxs_lib.num_delayed_groups = 6
self.mgxs_lib.legendre_order = 3
self.mgxs_lib.domain_type = 'material'
self.mgxs_lib.build_library()
# Add tallies
self.mgxs_lib.add_to_tallies_file(self._model.tallies, merge=False)
def _get_results(self, hash_output=False):
"""Digest info in the statepoint and return as a string."""
# Read the statepoint file.
sp = openmc.StatePoint(self._sp_name)
# Load the MGXS library from the statepoint
self.mgxs_lib.load_from_statepoint(sp)
# Build a string from Pandas Dataframe for each MGXS
outstr = ''
for domain in self.mgxs_lib.domains:
for mgxs_type in self.mgxs_lib.mgxs_types:
mgxs = self.mgxs_lib.get_mgxs(domain, mgxs_type)
df = mgxs.get_pandas_dataframe()
outstr += mgxs_type + '\n' + df.to_string() + '\n'
# Hash the results if necessary
if hash_output:
sha512 = hashlib.sha512()
sha512.update(outstr.encode('utf-8'))
outstr = sha512.hexdigest()
return outstr
def test_mgxs_library_no_nuclides():
model = pwr_pin_cell()
harness = MGXSTestHarness('statepoint.10.h5', model)
harness.main()
|
mit
|
martin-sicho/MI_ADM
|
compound_db_utils/data_loaders.py
|
1
|
2695
|
import math
import pandas
from rdkit.Chem import PandasTools, Descriptors, MolFromSmiles
from sqlalchemy.orm import sessionmaker
import compound_db_utils.database as db
from compound_db_utils import settings
def _gather_columns(table, col_list):
columns = []
for col in col_list:
columns.append(getattr(table.c, col))
return columns
def fetch_learning_data(
datasets
, datasets_cols = ()
, bioacitivities_cols = (
'value',
)
, compute_descriptors = False
, create_rdkit_mols = False
, col_names_map = ()
, duplicates_handler = None
):
DB_CONNECTION, TB_COMPOUNDS, TB_DATASETS, TB_BIOACTIVITIES = db.fetch_all()
session = sessionmaker(bind=DB_CONNECTION)()
cols = _gather_columns(TB_BIOACTIVITIES, bioacitivities_cols)
cols.extend(_gather_columns(TB_DATASETS, datasets_cols))
cols.append(TB_COMPOUNDS.c.smiles)
query = session.query(
*cols
).join(TB_COMPOUNDS).join(TB_DATASETS)\
.filter(
TB_DATASETS.c.unique_id.in_(datasets)
)
# make the DB query and export the data to pandas DataFrame object
data = pandas.read_sql_query(query.selectable, DB_CONNECTION)
smiles_col_name = settings.COMPOUNDS_TABLE + '_smiles'
ic50_col_name = settings.BIOACTIVITIES_TABLE + '_value'
# remove duplicate values
if duplicates_handler:
duplicates = set(data[smiles_col_name][data[smiles_col_name].duplicated()])
for smiles in duplicates:
duplicate_ic50s = data[data[smiles_col_name] == smiles][ic50_col_name]
ret = duplicates_handler(smiles, duplicate_ic50s)
data = data[data[smiles_col_name] != smiles]
if type(ret) != bool and ret != False:
data.update(
pandas.DataFrame(
[[smiles, ret]]
, columns = [smiles_col_name, ic50_col_name]
)
)
if compute_descriptors:
desc_list = Descriptors.descList
try:
desc_list = [x for x in desc_list if x[0] in compute_descriptors]
except TypeError:
for desc_name, function in desc_list:
values = []
for smiles in data[smiles_col_name]:
mol = MolFromSmiles(smiles)
values.append(function(mol))
data[desc_name] = values
if create_rdkit_mols:
PandasTools.AddMoleculeColumnToFrame(
data
, smiles_col_name
, 'rdmol'
)
if col_names_map:
data.rename(columns=col_names_map, inplace=True)
return data
|
gpl-3.0
|
mbayon/TFG-MachineLearning
|
vbig/lib/python2.7/site-packages/sklearn/metrics/cluster/bicluster.py
|
359
|
2797
|
from __future__ import division
import numpy as np
from sklearn.utils.linear_assignment_ import linear_assignment
from sklearn.utils.validation import check_consistent_length, check_array
__all__ = ["consensus_score"]
def _check_rows_and_columns(a, b):
"""Unpacks the row and column arrays and checks their shape."""
check_consistent_length(*a)
check_consistent_length(*b)
checks = lambda x: check_array(x, ensure_2d=False)
a_rows, a_cols = map(checks, a)
b_rows, b_cols = map(checks, b)
return a_rows, a_cols, b_rows, b_cols
def _jaccard(a_rows, a_cols, b_rows, b_cols):
"""Jaccard coefficient on the elements of the two biclusters."""
intersection = ((a_rows * b_rows).sum() *
(a_cols * b_cols).sum())
a_size = a_rows.sum() * a_cols.sum()
b_size = b_rows.sum() * b_cols.sum()
return intersection / (a_size + b_size - intersection)
def _pairwise_similarity(a, b, similarity):
"""Computes pairwise similarity matrix.
result[i, j] is the Jaccard coefficient of a's bicluster i and b's
bicluster j.
"""
a_rows, a_cols, b_rows, b_cols = _check_rows_and_columns(a, b)
n_a = a_rows.shape[0]
n_b = b_rows.shape[0]
result = np.array(list(list(similarity(a_rows[i], a_cols[i],
b_rows[j], b_cols[j])
for j in range(n_b))
for i in range(n_a)))
return result
def consensus_score(a, b, similarity="jaccard"):
"""The similarity of two sets of biclusters.
Similarity between individual biclusters is computed. Then the
best matching between sets is found using the Hungarian algorithm.
The final score is the sum of similarities divided by the size of
the larger set.
Read more in the :ref:`User Guide <biclustering>`.
Parameters
----------
a : (rows, columns)
Tuple of row and column indicators for a set of biclusters.
b : (rows, columns)
Another set of biclusters like ``a``.
similarity : string or function, optional, default: "jaccard"
May be the string "jaccard" to use the Jaccard coefficient, or
any function that takes four arguments, each of which is a 1d
indicator vector: (a_rows, a_columns, b_rows, b_columns).
References
----------
* Hochreiter, Bodenhofer, et. al., 2010. `FABIA: factor analysis
for bicluster acquisition
<https://www.ncbi.nlm.nih.gov/pmc/articles/PMC2881408/>`__.
"""
if similarity == "jaccard":
similarity = _jaccard
matrix = _pairwise_similarity(a, b, similarity)
indices = linear_assignment(1. - matrix)
n_a = len(a[0])
n_b = len(b[0])
return matrix[indices[:, 0], indices[:, 1]].sum() / max(n_a, n_b)
|
mit
|
Bismarrck/tensorflow
|
tensorflow/contrib/learn/python/learn/estimators/multioutput_test.py
|
136
|
1696
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Multi-output tests."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import random
import numpy as np
from tensorflow.contrib.learn.python import learn
from tensorflow.contrib.learn.python.learn.estimators._sklearn import mean_squared_error
from tensorflow.python.platform import test
class MultiOutputTest(test.TestCase):
"""Multi-output tests."""
def testMultiRegression(self):
random.seed(42)
rng = np.random.RandomState(1)
x = np.sort(200 * rng.rand(100, 1) - 100, axis=0)
y = np.array([np.pi * np.sin(x).ravel(), np.pi * np.cos(x).ravel()]).T
regressor = learn.LinearRegressor(
feature_columns=learn.infer_real_valued_columns_from_input(x),
label_dimension=2)
regressor.fit(x, y, steps=100)
score = mean_squared_error(np.array(list(regressor.predict_scores(x))), y)
self.assertLess(score, 10, "Failed with score = {0}".format(score))
if __name__ == "__main__":
test.main()
|
apache-2.0
|
petercable/xray
|
xray/core/combine.py
|
1
|
15637
|
import warnings
import pandas as pd
from . import utils
from .pycompat import iteritems, reduce, OrderedDict, basestring
from .variable import Variable
def concat(objs, dim=None, data_vars='all', coords='different',
compat='equals', positions=None, indexers=None, mode=None,
concat_over=None):
"""Concatenate xray objects along a new or existing dimension.
Parameters
----------
objs : sequence of Dataset and DataArray objects
xray objects to concatenate together. Each object is expected to
consist of variables and coordinates with matching shapes except for
along the concatenated dimension.
dim : str or DataArray or pandas.Index
Name of the dimension to concatenate along. This can either be a new
dimension name, in which case it is added along axis=0, or an existing
dimension name, in which case the location of the dimension is
unchanged. If dimension is provided as a DataArray or Index, its name
is used as the dimension to concatenate along and the values are added
as a coordinate.
data_vars : {'minimal', 'different', 'all' or list of str}, optional
These data variables will be concatenated together:
* 'minimal': Only data variables in which the dimension already
appears are included.
* 'different': Data variables which are not equal (ignoring
attributes) across all datasets are also concatenated (as well as
all for which dimension already appears). Beware: this option may
load the data payload of data variables into memory if they are not
already loaded.
* 'all': All data variables will be concatenated.
* list of str: The listed data variables will be concatenated, in
addition to the 'minimal' data variables.
If objects are DataArrays, data_vars must be 'all'.
coords : {'minimal', 'different', 'all' o list of str}, optional
These coordinate variables will be concatenated together:
* 'minimal': Only coordinates in which the dimension already appears
are included.
* 'different': Coordinates which are not equal (ignoring attributes)
across all datasets are also concatenated (as well as all for which
dimension already appears). Beware: this option may load the data
payload of coordinate variables into memory if they are not already
loaded.
* 'all': All coordinate variables will be concatenated, except
those corresponding to other dimensions.
* list of str: The listed coordinate variables will be concatenated,
in addition the 'minimal' coordinates.
compat : {'equals', 'identical'}, optional
String indicating how to compare non-concatenated variables and
dataset global attributes for potential conflicts. 'equals' means
that all variable values and dimensions must be the same;
'identical' means that variable attributes and global attributes
must also be equal.
positions : None or list of integer arrays, optional
List of integer arrays which specifies the integer positions to which
to assign each dataset along the concatenated dimension. If not
supplied, objects are concatenated in the provided order.
indexers, mode, concat_over : deprecated
Returns
-------
concatenated : type of objs
See also
--------
auto_combine
"""
# TODO: add join and ignore_index arguments copied from pandas.concat
# TODO: support concatenating scaler coordinates even if the concatenated
# dimension already exists
from .dataset import Dataset
from .dataarray import DataArray
try:
first_obj, objs = utils.peek_at(objs)
except StopIteration:
raise ValueError('must supply at least one object to concatenate')
if dim is None:
warnings.warn('the `dim` argument to `concat` will be required '
'in a future version of xray; for now, setting it to '
"the old default of 'concat_dim'",
FutureWarning, stacklevel=2)
dim = 'concat_dims'
if indexers is not None: # pragma: nocover
warnings.warn('indexers has been renamed to positions; the alias '
'will be removed in a future version of xray',
FutureWarning, stacklevel=2)
positions = indexers
if mode is not None:
raise ValueError('`mode` is no longer a valid argument to '
'xray.concat; it has been split into the `data_vars` '
'and `coords` arguments')
if concat_over is not None:
raise ValueError('`concat_over` is no longer a valid argument to '
'xray.concat; it has been split into the `data_vars` '
'and `coords` arguments')
if isinstance(first_obj, DataArray):
f = _dataarray_concat
elif isinstance(first_obj, Dataset):
f = _dataset_concat
else:
raise TypeError('can only concatenate xray Dataset and DataArray '
'objects')
return f(objs, dim, data_vars, coords, compat, positions)
def _calc_concat_dim_coord(dim):
"""
Infer the dimension name and 1d coordinate variable (if appropriate)
for concatenating along the new dimension.
"""
from .dataarray import DataArray
if isinstance(dim, basestring):
coord = None
elif not hasattr(dim, 'dims'):
# dim is not a DataArray or Coordinate
dim_name = getattr(dim, 'name', None)
if dim_name is None:
dim_name = 'concat_dim'
coord = DataArray(dim, dims=dim_name, name=dim_name)
dim = dim_name
else:
coord = dim
dim, = coord.dims
return dim, coord
def _calc_concat_over(datasets, dim, data_vars, coords):
"""
Determine which dataset variables need to be concatenated in the result,
and which can simply be taken from the first dataset.
"""
def process_subset_opt(opt, subset):
if subset == 'coords':
subset_long_name = 'coordinates'
else:
subset_long_name = 'data variables'
if isinstance(opt, basestring):
if opt == 'different':
def differs(vname):
# simple helper function which compares a variable
# across all datasets and indicates whether that
# variable differs or not.
v = datasets[0].variables[vname]
return any(not ds.variables[vname].equals(v)
for ds in datasets[1:])
# all nonindexes that are not the same in each dataset
concat_new = set(k for k in getattr(datasets[0], subset)
if k not in concat_over and differs(k))
elif opt == 'all':
concat_new = (set(getattr(datasets[0], subset)) -
set(datasets[0].dims))
elif opt == 'minimal':
concat_new = set()
else:
raise ValueError("unexpected value for concat_%s: %s"
% (subset, opt))
else:
invalid_vars = [k for k in opt
if k not in getattr(datasets[0], subset)]
if invalid_vars:
raise ValueError('some variables in %s are not '
'%s on the first dataset: %s'
% (subset, subset_long_name, invalid_vars))
concat_new = set(opt)
return concat_new
concat_over = set()
for ds in datasets:
concat_over.update(k for k, v in ds.variables.items()
if dim in v.dims)
concat_over.update(process_subset_opt(data_vars, 'data_vars'))
concat_over.update(process_subset_opt(coords, 'coords'))
if dim in datasets[0]:
concat_over.add(dim)
return concat_over
def _dataset_concat(datasets, dim, data_vars, coords, compat, positions):
"""
Concatenate a sequence of datasets along a new or existing dimension
"""
from .dataset import Dataset, as_dataset
if compat not in ['equals', 'identical']:
raise ValueError("compat=%r invalid: must be 'equals' "
"or 'identical'" % compat)
# don't bother trying to work with datasets as a generator instead of a
# list; the gains would be minimal
datasets = [as_dataset(ds) for ds in datasets]
dim, coord = _calc_concat_dim_coord(dim)
concat_over = _calc_concat_over(datasets, dim, data_vars, coords)
def insert_result_variable(k, v):
if k in datasets[0].coords:
result_coord_names.add(k)
result_vars[k] = v
# create the new dataset and add constant variables
result_vars = OrderedDict()
result_coord_names = set(datasets[0].coords)
result_attrs = datasets[0].attrs
# Dataset({}, attrs=datasets[0].attrs)
for k, v in datasets[0].variables.items():
if k not in concat_over:
insert_result_variable(k, v)
# check that global attributes and non-concatenated variables are fixed
# across all datasets
for ds in datasets[1:]:
if (compat == 'identical' and
not utils.dict_equiv(ds.attrs, result_attrs)):
raise ValueError('dataset global attributes not equal')
for k, v in iteritems(ds.variables):
if k not in result_vars and k not in concat_over:
raise ValueError('encountered unexpected variable %r' % k)
elif (k in result_coord_names) != (k in ds.coords):
raise ValueError('%r is a coordinate in some datasets but not '
'others' % k)
elif (k in result_vars and k != dim and
not getattr(v, compat)(result_vars[k])):
verb = 'equal' if compat == 'equals' else compat
raise ValueError(
'variable %r not %s across datasets' % (k, verb))
# we've already verified everything is consistent; now, calculate
# shared dimension sizes so we can expand the necessary variables
dim_lengths = [ds.dims.get(dim, 1) for ds in datasets]
non_concat_dims = {}
for ds in datasets:
non_concat_dims.update(ds.dims)
non_concat_dims.pop(dim, None)
def ensure_common_dims(vars):
# ensure each variable with the given name shares the same
# dimensions and the same shape for all of them except along the
# concat dimension
common_dims = tuple(pd.unique([d for v in vars for d in v.dims]))
if dim not in common_dims:
common_dims = (dim,) + common_dims
for var, dim_len in zip(vars, dim_lengths):
if var.dims != common_dims:
common_shape = tuple(non_concat_dims.get(d, dim_len)
for d in common_dims)
var = var.expand_dims(common_dims, common_shape)
yield var
# stack up each variable to fill-out the dataset
for k in concat_over:
vars = ensure_common_dims([ds.variables[k] for ds in datasets])
combined = Variable.concat(vars, dim, positions)
insert_result_variable(k, combined)
# result._coord_names.update(datasets[0].coords)
if coord is not None:
# add concat dimension last to ensure that its in the final Dataset
insert_result_variable(coord.name, coord)
# result[coord.name] = coord
result = Dataset(result_vars, attrs=result_attrs)
result = result.set_coords(result_coord_names)
return result
def _dataarray_concat(arrays, dim, data_vars, coords, compat,
positions):
from .dataarray import DataArray
if data_vars != 'all':
raise ValueError('data_vars is not a valid argument when '
'concatenating DataArray objects')
datasets = []
for n, arr in enumerate(arrays):
if n == 0:
name = arr.name
elif name != arr.name:
if compat == 'identical':
raise ValueError('array names not identical')
else:
arr = arr.rename(name)
datasets.append(arr._dataset)
ds = _dataset_concat(datasets, dim, data_vars, coords, compat,
positions)
return DataArray._new_from_dataset_no_copy(ds, name)
def _auto_concat(datasets, dim=None):
if len(datasets) == 1:
return datasets[0]
else:
if dim is None:
ds0 = datasets[0]
ds1 = datasets[1]
concat_dims = set(ds0.dims)
if ds0.dims != ds1.dims:
dim_tuples = set(ds0.dims.items()) - set(ds1.dims.items())
concat_dims = set(i for i, _ in dim_tuples)
if len(concat_dims) > 1:
concat_dims = set(d for d in concat_dims
if not ds0[d].equals(ds1[d]))
if len(concat_dims) > 1:
raise ValueError('too many different dimensions to '
'concatenate: %s' % concat_dims)
elif len(concat_dims) == 0:
raise ValueError('cannot infer dimension to concatenate: '
'supply the ``concat_dim`` argument '
'explicitly')
dim, = concat_dims
return concat(datasets, dim=dim)
def auto_combine(datasets, concat_dim=None):
"""Attempt to auto-magically combine the given datasets into one.
This method attempts to combine a list of datasets into a single entity by
inspecting metadata and using a combination of concat and merge.
It does not concatenate along more than one dimension or align or sort data
under any circumstances. It will fail in complex cases, for which you
should use ``concat`` and ``merge`` explicitly.
When ``auto_combine`` may succeed:
* You have N years of data and M data variables. Each combination of a
distinct time period and test of data variables is saved its own dataset.
Examples of when ``auto_combine`` fails:
* In the above scenario, one file is missing, containing the data for one
year's data for one variable.
* In the most recent year, there is an additional data variable.
* Your data includes "time" and "station" dimensions, and each year's data
has a different set of stations.
Parameters
----------
datasets : sequence of xray.Dataset
Dataset objects to merge.
concat_dim : str or DataArray or Index, optional
Dimension along which to concatenate variables, as used by
:py:func:`xray.concat`. You only need to provide this argument if the
dimension along which you want to concatenate is not a dimension in
the original datasets, e.g., if you want to stack a collection of
2D arrays along a third dimension.
Returns
-------
combined : xray.Dataset
See also
--------
concat
Dataset.merge
"""
from toolz import itertoolz
grouped = itertoolz.groupby(lambda ds: tuple(sorted(ds.data_vars)),
datasets).values()
concatenated = [_auto_concat(ds, dim=concat_dim) for ds in grouped]
merged = reduce(lambda ds, other: ds.merge(other), concatenated)
return merged
|
apache-2.0
|
geodynamics/burnman
|
examples/example_seismic_travel_times.py
|
2
|
9054
|
# This file is part of BurnMan - a thermoelastic and thermodynamic toolkit for the Earth and Planetary Sciences
# Copyright (C) 2012 - 2015 by the BurnMan team, released under the GNU
# GPL v2 or later.
"""
This example script produces an input files for travel time calculations in ObsPy, either by replacing one layer with a composition defined in BurnMan (see example_layer) or with an entire planet (see example_planet). This example shows how to plot predicted travel times and ray paths and plots those in a PREM earth for reference.
Requires Obspy, see www.obspy.org and
L. Krischer, T. Megies, R. Barsch, M. Beyreuther, T. Lecocq, C. Caudron, J. Wassermann (2015)
ObsPy: a bridge for seismology into the scientific Python ecosystem
Computational Science & Discovery, 8(1), 014003
DOI: 10.1088/1749-4699/8/1/014003
To find out more about the specific routines in this example see
https://docs.obspy.org/packages/obspy.taup.html
"""
# Import supporting libraries
# Imports to be compatible with Python2 and Python3
from __future__ import absolute_import
from __future__ import print_function
import numpy as np # Library used for general array
import matplotlib.pyplot as plt # Library used for plotting
# Import BurnMan
import burnman_path # adds the local burnman directory to the path
import burnman
from burnman import minerals # import mineral library seperately
# This example relies heavily on the ObsPy, a python seismology toolkit
import obspy
from obspy.taup import TauPyModel
assert burnman_path # silence pyflakes warning
def plot_rays_and_times(modelname):
"""
Calls obspy routines to plot ray paths and travel times for the model built in BurnMan and for a seismic reference model for comparison.
Parameters
----------
modelname:
Name for BurnMan model (*.tvel file must be present)
"""
# Arrivals to plot, some random examples of phase names to use ("P","S", "PcP", "Sdiff", "SKS", "PKIKP")
# Phase naming in obspy.taup is explained at
# https://docs.obspy.org/packages/obspy.taup.html
phase_list = ["P", "PKP", "PKIKP"]
source_depth = 10 # in km
min_degrees = 60 # minimum distance for ray paths
max_degrees = 300 # maximum distance for ray paths
npoints = 9 # number of distances to plot ray paths
ref_model = 'prem' # choice of models available in obpsy, or input an npz file name
# Build a taup_model for Obspy
obspy.taup.taup_create.build_taup_model(
"./" + modelname + ".tvel", ".")
# Time to plot some predictions using routines from Obspy
plt.figure(figsize=[9, 7])
ax = plt.subplot(2, 2, 1)
# plotting predicted travel times at all distances
obspy.taup.plot_travel_times(
ax=ax,
model='./' +
modelname +
'.npz',
source_depth=source_depth,
phase_list=phase_list,
show=False)
plt.title(modelname)
# plotting the same for PREM for reference
ax = plt.subplot(2, 2, 2)
obspy.taup.plot_travel_times(
ax=ax,
model=ref_model,
source_depth=source_depth,
phase_list=phase_list,
show=False)
# not sure why the grid dissapears on this subplot, reactivate here...
ax.grid()
plt.title(ref_model)
# plotting predicted ray paths every 30 degrees between 60 and 300
# degrees
ax = plt.subplot(2, 2, 3, polar=True)
obspy.taup.plot_ray_paths(
ax=ax,
model='./' +
modelname +
'.npz',
source_depth=source_depth,
min_degrees=min_degrees,
max_degrees=max_degrees,
npoints=npoints,
phase_list=phase_list,
verbose=True,
show=False)
# plotting the same for PREM for reference
ax = plt.subplot(2, 2, 4, polar=True)
obspy.taup.plot_ray_paths(
ax=ax,
model=ref_model,
source_depth=source_depth,
min_degrees=min_degrees,
max_degrees=max_degrees,
npoints=npoints,
phase_list=phase_list,
verbose=True)
if __name__ == "__main__":
# Two examples available
example_layer = True
example_planet = True
# First example: replacing the lower mantle with a composition from BurnMan
if example_layer:
modelname = 'perovskitic_mantle'
# This is the first actual work done in this example. We define
# composite object and name it "rock".
mg_fe_perovskite = minerals.SLB_2011.mg_fe_perovskite()
mg_fe_perovskite.set_composition(
[0.9, 0.1, 0]) # frac_mg, frac_fe, frac_al
rock = burnman.Composite([mg_fe_perovskite], [1.])
# We create an array of 20 depths at which we want to evaluate the
# layer at
depths = np.linspace(2890e3, 670e3, 20)
# Here we define the lower mantle as a Layer(). The layer needs various
# parameters to set a depth array and radius array.
lower_mantle = burnman.Layer(
name='Perovskitic Lower Mantle',
radii=6371.e3 - depths)
# Here we set the composition of the layer as the above defined 'rock'.
lower_mantle.set_material(rock)
# Now we set the temperature mode of the layer.
# Here we use an adiabatic temperature and set the temperature at the
# top of the layer
lower_mantle.set_temperature_mode(
temperature_mode='adiabatic',
temperature_top=1900.)
# And we set a self-consistent pressure. The pressure at the top of the layer and
# gravity at the bottom of the layer are given by the PREM.
pressure, gravity = burnman.seismic.PREM().evaluate(
['pressure', 'gravity'], depths)
lower_mantle.set_pressure_mode(pressure_mode='self-consistent',
pressure_top=pressure[-1], gravity_bottom=gravity[0])
lower_mantle.make()
# Constructing the tvel file for obspy. Here we use PREM to fill in the
# rest of the planet
burnman.output_seismo.write_tvel_file(
lower_mantle,
filename=modelname+'tvel',
background_model=burnman.seismic.PREM())
# Plot ray paths and travel times
plot_rays_and_times(modelname)
# Second example implementing an entire planet
if example_planet:
modelname = 'planetzog'
# A layer is defined by 4 parameters: Name, min_depth, max_depth,and number of slices within the layer.
# Separately the composition and the temperature_mode need to set.
# Radii of different layers
radius_planet = 6371.e3
radius_ic = 1220.e3
radius_oc = 3580.e3
radius_lm = 5711.e3
# inner_core
inner_core = burnman.Layer(
'inner core', radii=np.linspace(
0., radius_ic, 10))
inner_core.set_material(burnman.minerals.other.Fe_Dewaele())
# The minerals that make up our core do not currently implement the
# thermal equation of state, so we will set the temperature at 300 K.
inner_core.set_temperature_mode('user-defined',
300. * np.ones_like(inner_core.radii))
# outer_core
outer_core = burnman.Layer(
'outer core', radii=np.linspace(
radius_ic, radius_oc, 10))
outer_core.set_material(burnman.minerals.other.Liquid_Fe_Anderson())
# The minerals that make up our core do not currently implement the
# thermal equation of state, so we will define the temperature at 300
# K.
outer_core.set_temperature_mode(
'user-defined',
300. *
np.ones_like(
outer_core.radii))
# Next the Mantle.
lower_mantle = burnman.Layer(
'lower mantle', radii=np.linspace(
radius_oc, radius_lm, 10))
lower_mantle.set_material(burnman.minerals.SLB_2011.mg_bridgmanite())
lower_mantle.set_temperature_mode('adiabatic')
upper_mantle = burnman.Layer(
'upper mantle', radii=np.linspace(
radius_lm, radius_planet, 10))
upper_mantle.set_material(burnman.minerals.SLB_2011.forsterite())
upper_mantle.set_temperature_mode('adiabatic', temperature_top=1200.)
# Now we calculate the planet.
planet_zog = burnman.Planet(
'Planet Zog', [
inner_core, outer_core, lower_mantle, upper_mantle], verbose=True)
# Here we compute its state. Go BurnMan Go!
# (If we were to change composition of one of the layers, we would have to
# recompute the state)
planet_zog.make()
# Constructing the tvel file for obspy. Here we use PREM to fill in the
# rest of the planet
burnman.output_seismo.write_tvel_file(
planet_zog,
filename=modelname+'tvel',
background_model=burnman.seismic.PREM())
# Plot ray paths and travel times
plot_rays_and_times(modelname)
|
gpl-2.0
|
huzq/scikit-learn
|
examples/ensemble/plot_voting_decision_regions.py
|
23
|
2610
|
"""
==================================================
Plot the decision boundaries of a VotingClassifier
==================================================
.. currentmodule:: sklearn
Plot the decision boundaries of a :class:`~ensemble.VotingClassifier` for two
features of the Iris dataset.
Plot the class probabilities of the first sample in a toy dataset predicted by
three different classifiers and averaged by the
:class:`~ensemble.VotingClassifier`.
First, three exemplary classifiers are initialized
(:class:`~tree.DecisionTreeClassifier`,
:class:`~neighbors.KNeighborsClassifier`, and :class:`~svm.SVC`) and used to
initialize a soft-voting :class:`~ensemble.VotingClassifier` with weights `[2,
1, 2]`, which means that the predicted probabilities of the
:class:`~tree.DecisionTreeClassifier` and :class:`~svm.SVC` each count 2 times
as much as the weights of the :class:`~neighbors.KNeighborsClassifier`
classifier when the averaged probability is calculated.
"""
print(__doc__)
from itertools import product
import numpy as np
import matplotlib.pyplot as plt
from sklearn import datasets
from sklearn.tree import DecisionTreeClassifier
from sklearn.neighbors import KNeighborsClassifier
from sklearn.svm import SVC
from sklearn.ensemble import VotingClassifier
# Loading some example data
iris = datasets.load_iris()
X = iris.data[:, [0, 2]]
y = iris.target
# Training classifiers
clf1 = DecisionTreeClassifier(max_depth=4)
clf2 = KNeighborsClassifier(n_neighbors=7)
clf3 = SVC(gamma=.1, kernel='rbf', probability=True)
eclf = VotingClassifier(estimators=[('dt', clf1), ('knn', clf2),
('svc', clf3)],
voting='soft', weights=[2, 1, 2])
clf1.fit(X, y)
clf2.fit(X, y)
clf3.fit(X, y)
eclf.fit(X, y)
# Plotting decision regions
x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1
y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, 0.1),
np.arange(y_min, y_max, 0.1))
f, axarr = plt.subplots(2, 2, sharex='col', sharey='row', figsize=(10, 8))
for idx, clf, tt in zip(product([0, 1], [0, 1]),
[clf1, clf2, clf3, eclf],
['Decision Tree (depth=4)', 'KNN (k=7)',
'Kernel SVM', 'Soft Voting']):
Z = clf.predict(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
axarr[idx[0], idx[1]].contourf(xx, yy, Z, alpha=0.4)
axarr[idx[0], idx[1]].scatter(X[:, 0], X[:, 1], c=y,
s=20, edgecolor='k')
axarr[idx[0], idx[1]].set_title(tt)
plt.show()
|
bsd-3-clause
|
beiko-lab/gengis
|
bin/Lib/site-packages/mpl_toolkits/axisartist/axislines.py
|
6
|
25977
|
"""
Axislines includes modified implementation of the Axes class. The
biggest difference is that the artists responsible to draw axis line,
ticks, ticklabel and axis labels are separated out from the mpl's Axis
class, which are much more than artists in the original
mpl. Originally, this change was motivated to support curvilinear
grid. Here are a few reasons that I came up with new axes class.
* "top" and "bottom" x-axis (or "left" and "right" y-axis) can have
different ticks (tick locations and labels). This is not possible
with the current mpl, although some twin axes trick can help.
* Curvilinear grid.
* angled ticks.
In the new axes class, xaxis and yaxis is set to not visible by
default, and new set of artist (AxisArtist) are defined to draw axis
line, ticks, ticklabels and axis label. Axes.axis attribute serves as
a dictionary of these artists, i.e., ax.axis["left"] is a AxisArtist
instance responsible to draw left y-axis. The default Axes.axis contains
"bottom", "left", "top" and "right".
AxisArtist can be considered as a container artist and
has following children artists which will draw ticks, labels, etc.
* line
* major_ticks, major_ticklabels
* minor_ticks, minor_ticklabels
* offsetText
* label
Note that these are separate artists from Axis class of the
original mpl, thus most of tick-related command in the original mpl
won't work, although some effort has made to work with. For example,
color and markerwidth of the ax.axis["bottom"].major_ticks will follow
those of Axes.xaxis unless explicitly specified.
In addition to AxisArtist, the Axes will have *gridlines* attribute,
which obviously draws grid lines. The gridlines needs to be separated
from the axis as some gridlines can never pass any axis.
"""
import matplotlib.axes as maxes
import matplotlib.artist as martist
import matplotlib.text as mtext
import matplotlib.font_manager as font_manager
from matplotlib.path import Path
from matplotlib.transforms import Affine2D, ScaledTranslation, \
IdentityTransform, TransformedPath, Bbox
from matplotlib.collections import LineCollection
from matplotlib import rcParams
from matplotlib.artist import allow_rasterization
import warnings
import numpy as np
import matplotlib.lines as mlines
from axisline_style import AxislineStyle
from axis_artist import AxisArtist, GridlinesCollection
class AxisArtistHelper(object):
"""
AxisArtistHelper should define
following method with given APIs. Note that the first axes argument
will be axes attribute of the caller artist.
# LINE (spinal line?)
def get_line(self, axes):
# path : Path
return path
def get_line_transform(self, axes):
# ...
# trans : transform
return trans
# LABEL
def get_label_pos(self, axes):
# x, y : position
return (x, y), trans
def get_label_offset_transform(self, \
axes,
pad_points, fontprops, renderer,
bboxes,
):
# va : vertical alignment
# ha : horizontal alignment
# a : angle
return trans, va, ha, a
# TICK
def get_tick_transform(self, axes):
return trans
def get_tick_iterators(self, axes):
# iter : iterable object that yields (c, angle, l) where
# c, angle, l is position, tick angle, and label
return iter_major, iter_minor
"""
class _Base(object):
"""
Base class for axis helper.
"""
def __init__(self):
"""
"""
self.delta1, self.delta2 = 0.00001, 0.00001
def update_lim(self, axes):
pass
class Fixed(_Base):
"""
Helper class for a fixed (in the axes coordinate) axis.
"""
_default_passthru_pt = dict(left=(0, 0),
right=(1, 0),
bottom=(0, 0),
top=(0, 1))
def __init__(self,
loc, nth_coord=None,
):
"""
nth_coord = along which coordinate value varies
in 2d, nth_coord = 0 -> x axis, nth_coord = 1 -> y axis
"""
self._loc = loc
if loc not in ["left", "right", "bottom", "top"]:
raise ValueError("%s" % loc)
if nth_coord is None:
if loc in ["left", "right"]:
nth_coord = 1
elif loc in ["bottom", "top"]:
nth_coord = 0
self.nth_coord = nth_coord
super(AxisArtistHelper.Fixed, self).__init__()
self.passthru_pt = self._default_passthru_pt[loc]
_verts = np.array([[0., 0.],
[1., 1.]])
fixed_coord = 1-nth_coord
_verts[:,fixed_coord] = self.passthru_pt[fixed_coord]
# axis line in transAxes
self._path = Path(_verts)
def get_nth_coord(self):
return self.nth_coord
# LINE
def get_line(self, axes):
return self._path
def get_line_transform(self, axes):
return axes.transAxes
# LABEL
def get_axislabel_transform(self, axes):
return axes.transAxes
def get_axislabel_pos_angle(self, axes):
"""
label reference position in transAxes.
get_label_transform() returns a transform of (transAxes+offset)
"""
loc = self._loc
pos, angle_tangent = dict(left=((0., 0.5), 90),
right=((1., 0.5), 90),
bottom=((0.5, 0.), 0),
top=((0.5, 1.), 0))[loc]
return pos, angle_tangent
# TICK
def get_tick_transform(self, axes):
trans_tick = [axes.get_xaxis_transform(),
axes.get_yaxis_transform()][self.nth_coord]
return trans_tick
class Floating(_Base):
def __init__(self, nth_coord,
value):
self.nth_coord = nth_coord
self._value = value
super(AxisArtistHelper.Floating,
self).__init__()
def get_nth_coord(self):
return self.nth_coord
def get_line(self, axes):
raise RuntimeError("get_line method should be defined by the derived class")
class AxisArtistHelperRectlinear:
class Fixed(AxisArtistHelper.Fixed):
def __init__(self,
axes, loc, nth_coord=None,
):
"""
nth_coord = along which coordinate value varies
in 2d, nth_coord = 0 -> x axis, nth_coord = 1 -> y axis
"""
super(AxisArtistHelperRectlinear.Fixed, self).__init__( \
loc, nth_coord)
self.axis = [axes.xaxis, axes.yaxis][self.nth_coord]
# TICK
def get_tick_iterators(self, axes):
"""tick_loc, tick_angle, tick_label"""
loc = self._loc
if loc in ["bottom", "top"]:
angle_normal, angle_tangent = 90, 0
else:
angle_normal, angle_tangent = 0, 90
major = self.axis.major
majorLocs = major.locator()
major.formatter.set_locs(majorLocs)
majorLabels = [major.formatter(val, i) for i, val in enumerate(majorLocs)]
minor = self.axis.minor
minorLocs = minor.locator()
minor.formatter.set_locs(minorLocs)
minorLabels = [minor.formatter(val, i) for i, val in enumerate(minorLocs)]
trans_tick = self.get_tick_transform(axes)
tr2ax = trans_tick + axes.transAxes.inverted()
def _f(locs, labels):
for x, l in zip(locs, labels):
c = list(self.passthru_pt) # copy
c[self.nth_coord] = x
# check if the tick point is inside axes
c2 = tr2ax.transform_point(c)
#delta=0.00001
if 0. -self.delta1<= c2[self.nth_coord] <= 1.+self.delta2:
yield c, angle_normal, angle_tangent, l
return _f(majorLocs, majorLabels), _f(minorLocs, minorLabels)
class Floating(AxisArtistHelper.Floating):
def __init__(self, axes, nth_coord,
passingthrough_point, axis_direction="bottom"):
super(AxisArtistHelperRectlinear.Floating, self).__init__( \
nth_coord, passingthrough_point)
self._axis_direction = axis_direction
self.axis = [axes.xaxis, axes.yaxis][self.nth_coord]
def get_line(self, axes):
_verts = np.array([[0., 0.],
[1., 1.]])
fixed_coord = 1-self.nth_coord
trans_passingthrough_point = axes.transData + axes.transAxes.inverted()
p = trans_passingthrough_point.transform_point([self._value,
self._value])
_verts[:,fixed_coord] = p[fixed_coord]
return Path(_verts)
def get_line_transform(self, axes):
return axes.transAxes
def get_axislabel_transform(self, axes):
return axes.transAxes
def get_axislabel_pos_angle(self, axes):
"""
label reference position in transAxes.
get_label_transform() returns a transform of (transAxes+offset)
"""
loc = self._axis_direction
#angle = dict(left=0,
# right=0,
# bottom=.5*np.pi,
# top=.5*np.pi)[loc]
if self.nth_coord == 0:
angle = 0
else:
angle = 90
_verts = [0.5, 0.5]
fixed_coord = 1-self.nth_coord
trans_passingthrough_point = axes.transData + axes.transAxes.inverted()
p = trans_passingthrough_point.transform_point([self._value,
self._value])
_verts[fixed_coord] = p[fixed_coord]
if not (0. <= _verts[fixed_coord] <= 1.):
return None, None
else:
return _verts, angle
def get_tick_transform(self, axes):
return axes.transData
def get_tick_iterators(self, axes):
"""tick_loc, tick_angle, tick_label"""
loc = self._axis_direction
if loc in ["bottom", "top"]:
angle_normal, angle_tangent = 90, 0
else:
angle_normal, angle_tangent = 0, 90
if self.nth_coord == 0:
angle_normal, angle_tangent = 90, 0
else:
angle_normal, angle_tangent = 0, 90
#angle = 90 - 90 * self.nth_coord
major = self.axis.major
majorLocs = major.locator()
major.formatter.set_locs(majorLocs)
majorLabels = [major.formatter(val, i) for i, val in enumerate(majorLocs)]
minor = self.axis.minor
minorLocs = minor.locator()
minor.formatter.set_locs(minorLocs)
minorLabels = [minor.formatter(val, i) for i, val in enumerate(minorLocs)]
tr2ax = axes.transData + axes.transAxes.inverted()
def _f(locs, labels):
for x, l in zip(locs, labels):
c = [self._value, self._value]
c[self.nth_coord] = x
c1, c2 = tr2ax.transform_point(c)
if 0. <= c1 <= 1. and 0. <= c2 <= 1.:
if 0. - self.delta1 <= [c1, c2][self.nth_coord] <= 1. + self.delta2:
yield c, angle_normal, angle_tangent, l
return _f(majorLocs, majorLabels), _f(minorLocs, minorLabels)
class GridHelperBase(object):
def __init__(self):
self._force_update = True
self._old_limits = None
super(GridHelperBase, self).__init__()
def update_lim(self, axes):
x1, x2 = axes.get_xlim()
y1, y2 = axes.get_ylim()
if self._force_update or self._old_limits != (x1, x2, y1, y2):
self._update(x1, x2, y1, y2)
self._force_update = False
self._old_limits = (x1, x2, y1, y2)
def _update(self, x1, x2, y1, y2):
pass
def invalidate(self):
self._force_update = True
def valid(self):
return not self._force_update
def get_gridlines(self, which, axis):
"""
Return list of grid lines as a list of paths (list of points).
*which* : "major" or "minor"
*axis* : "both", "x" or "y"
"""
return []
def new_gridlines(self, ax):
"""
Create and return a new GridlineCollection instance.
*which* : "major" or "minor"
*axis* : "both", "x" or "y"
"""
gridlines = GridlinesCollection(None, transform=ax.transData,
colors=rcParams['grid.color'],
linestyles=rcParams['grid.linestyle'],
linewidths=rcParams['grid.linewidth'])
ax._set_artist_props(gridlines)
gridlines.set_grid_helper(self)
ax.axes._set_artist_props(gridlines)
# gridlines.set_clip_path(self.axes.patch)
# set_clip_path need to be deferred after Axes.cla is completed.
# It is done inside the cla.
return gridlines
class GridHelperRectlinear(GridHelperBase):
def __init__(self, axes):
super(GridHelperRectlinear, self).__init__()
self.axes = axes
def new_fixed_axis(self, loc,
nth_coord=None,
axis_direction=None,
offset=None,
axes=None,
):
if axes is None:
warnings.warn("'new_fixed_axis' explicitly requires the axes keyword.")
axes = self.axes
_helper = AxisArtistHelperRectlinear.Fixed(axes, loc, nth_coord)
if axis_direction is None:
axis_direction = loc
axisline = AxisArtist(axes, _helper, offset=offset,
axis_direction=axis_direction,
)
return axisline
def new_floating_axis(self, nth_coord, value,
axis_direction="bottom",
axes=None,
):
if axes is None:
warnings.warn("'new_floating_axis' explicitly requires the axes keyword.")
axes = self.axes
passthrough_point = (value, value)
transform = axes.transData
_helper = AxisArtistHelperRectlinear.Floating( \
axes, nth_coord, value, axis_direction)
axisline = AxisArtist(axes, _helper)
axisline.line.set_clip_on(True)
axisline.line.set_clip_box(axisline.axes.bbox)
return axisline
def get_gridlines(self, which="major", axis="both"):
"""
return list of gridline coordinates in data coordinates.
*which* : "major" or "minor"
*axis* : "both", "x" or "y"
"""
gridlines = []
if axis in ["both", "x"]:
locs = []
y1, y2 = self.axes.get_ylim()
#if self.axes.xaxis._gridOnMajor:
if which in ["both", "major"]:
locs.extend(self.axes.xaxis.major.locator())
#if self.axes.xaxis._gridOnMinor:
if which in ["both", "minor"]:
locs.extend(self.axes.xaxis.minor.locator())
for x in locs:
gridlines.append([[x, x], [y1, y2]])
if axis in ["both", "y"]:
x1, x2 = self.axes.get_xlim()
locs = []
if self.axes.yaxis._gridOnMajor:
#if which in ["both", "major"]:
locs.extend(self.axes.yaxis.major.locator())
if self.axes.yaxis._gridOnMinor:
#if which in ["both", "minor"]:
locs.extend(self.axes.yaxis.minor.locator())
for y in locs:
gridlines.append([[x1, x2], [y, y]])
return gridlines
class SimpleChainedObjects(object):
def __init__(self, objects):
self._objects = objects
def __getattr__(self, k):
_a = SimpleChainedObjects([getattr(a, k) for a in self._objects])
return _a
def __call__(self, *kl, **kwargs):
for m in self._objects:
m(*kl, **kwargs)
class Axes(maxes.Axes):
class AxisDict(dict):
def __init__(self, axes):
self.axes = axes
super(Axes.AxisDict, self).__init__()
def __getitem__(self, k):
if isinstance(k, tuple):
r = SimpleChainedObjects([dict.__getitem__(self, k1) for k1 in k])
return r
elif isinstance(k, slice):
if k.start == None and k.stop == None and k.step == None:
r = SimpleChainedObjects(self.values())
return r
else:
raise ValueError("Unsupported slice")
else:
return dict.__getitem__(self, k)
def __call__(self, *v, **kwargs):
return maxes.Axes.axis(self.axes, *v, **kwargs)
def __init__(self, *kl, **kw):
helper = kw.pop("grid_helper", None)
self._axisline_on = True
if helper:
self._grid_helper = helper
else:
self._grid_helper = GridHelperRectlinear(self)
super(Axes, self).__init__(*kl, **kw)
self.toggle_axisline(True)
def toggle_axisline(self, b=None):
if b is None:
b = not self._axisline_on
if b:
self._axisline_on = True
for s in self.spines.values():
s.set_visible(False)
self.xaxis.set_visible(False)
self.yaxis.set_visible(False)
else:
self._axisline_on = False
for s in self.spines.values():
s.set_visible(True)
self.xaxis.set_visible(True)
self.yaxis.set_visible(True)
def _init_axis(self):
super(Axes, self)._init_axis()
def _init_axis_artists(self, axes=None):
if axes is None:
axes = self
self._axislines = self.AxisDict(self)
new_fixed_axis = self.get_grid_helper().new_fixed_axis
for loc in ["bottom", "top", "left", "right"]:
self._axislines[loc] = new_fixed_axis(loc=loc, axes=axes,
axis_direction=loc)
for axisline in [self._axislines["top"], self._axislines["right"]]:
axisline.label.set_visible(False)
axisline.major_ticklabels.set_visible(False)
axisline.minor_ticklabels.set_visible(False)
def _get_axislines(self):
return self._axislines
axis = property(_get_axislines)
def new_gridlines(self, grid_helper=None):
"""
Create and return a new GridlineCollection instance.
*which* : "major" or "minor"
*axis* : "both", "x" or "y"
"""
if grid_helper is None:
grid_helper = self.get_grid_helper()
gridlines = grid_helper.new_gridlines(self)
return gridlines
def _init_gridlines(self, grid_helper=None):
# It is done inside the cla.
gridlines = self.new_gridlines(grid_helper)
self.gridlines = gridlines
def cla(self):
# gridlines need to b created before cla() since cla calls grid()
self._init_gridlines()
super(Axes, self).cla()
# the clip_path should be set after Axes.cla() since that's
# when a patch is created.
self.gridlines.set_clip_path(self.axes.patch)
self._init_axis_artists()
def get_grid_helper(self):
return self._grid_helper
def grid(self, b=None, which='major', axis="both", **kwargs):
"""
Toggle the gridlines, and optionally set the properties of the lines.
"""
# their are some discrepancy between the behavior of grid in
# axes_grid and the original mpl's grid, because axes_grid
# explicitly set the visibility of the gridlines.
super(Axes, self).grid(b, which=which, axis=axis, **kwargs)
if not self._axisline_on:
return
if b is None:
if self.axes.xaxis._gridOnMinor or self.axes.xaxis._gridOnMajor or \
self.axes.yaxis._gridOnMinor or self.axes.yaxis._gridOnMajor:
b=True
else:
b=False
self.gridlines.set_which(which)
self.gridlines.set_axis(axis)
self.gridlines.set_visible(b)
if len(kwargs):
martist.setp(self.gridlines, **kwargs)
def get_children(self):
if self._axisline_on:
children = self._axislines.values()+[self.gridlines]
else:
children = []
children.extend(super(Axes, self).get_children())
return children
def invalidate_grid_helper(self):
self._grid_helper.invalidate()
def new_fixed_axis(self, loc, offset=None):
gh = self.get_grid_helper()
axis = gh.new_fixed_axis(loc,
nth_coord=None,
axis_direction=None,
offset=offset,
axes=self,
)
return axis
def new_floating_axis(self, nth_coord, value,
axis_direction="bottom",
):
gh = self.get_grid_helper()
axis = gh.new_floating_axis(nth_coord, value,
axis_direction=axis_direction,
axes=self)
return axis
def draw(self, renderer, inframe=False):
if not self._axisline_on:
super(Axes, self).draw(renderer, inframe)
return
orig_artists = self.artists
self.artists = self.artists + list(self._axislines.values()) + [self.gridlines]
super(Axes, self).draw(renderer, inframe)
self.artists = orig_artists
def get_tightbbox(self, renderer, call_axes_locator=True):
bb0 = super(Axes, self).get_tightbbox(renderer, call_axes_locator)
if not self._axisline_on:
return bb0
bb = [bb0]
for axisline in self._axislines.values():
if not axisline.get_visible():
continue
bb.append(axisline.get_tightbbox(renderer))
# if axisline.label.get_visible():
# bb.append(axisline.label.get_window_extent(renderer))
# if axisline.major_ticklabels.get_visible():
# bb.extend(axisline.major_ticklabels.get_window_extents(renderer))
# if axisline.minor_ticklabels.get_visible():
# bb.extend(axisline.minor_ticklabels.get_window_extents(renderer))
# if axisline.major_ticklabels.get_visible() or \
# axisline.minor_ticklabels.get_visible():
# bb.append(axisline.offsetText.get_window_extent(renderer))
#bb.extend([c.get_window_extent(renderer) for c in artists \
# if c.get_visible()])
_bbox = Bbox.union([b for b in bb if b and (b.width!=0 or b.height!=0)])
return _bbox
Subplot = maxes.subplot_class_factory(Axes)
class AxesZero(Axes):
def __init__(self, *kl, **kw):
super(AxesZero, self).__init__(*kl, **kw)
def _init_axis_artists(self):
super(AxesZero, self)._init_axis_artists()
new_floating_axis = self._grid_helper.new_floating_axis
xaxis_zero = new_floating_axis(nth_coord=0,
value=0.,
axis_direction="bottom",
axes=self)
xaxis_zero.line.set_clip_path(self.patch)
xaxis_zero.set_visible(False)
self._axislines["xzero"] = xaxis_zero
yaxis_zero = new_floating_axis(nth_coord=1,
value=0.,
axis_direction="left",
axes=self)
yaxis_zero.line.set_clip_path(self.patch)
yaxis_zero.set_visible(False)
self._axislines["yzero"] = yaxis_zero
SubplotZero = maxes.subplot_class_factory(AxesZero)
if 0:
#if __name__ == "__main__":
import matplotlib.pyplot as plt
fig = plt.figure(1, (4,3))
ax = SubplotZero(fig, 1, 1, 1)
fig.add_subplot(ax)
ax.axis["xzero"].set_visible(True)
ax.axis["xzero"].label.set_text("Axis Zero")
for n in ["top", "right"]:
ax.axis[n].set_visible(False)
xx = np.arange(0, 2*np.pi, 0.01)
ax.plot(xx, np.sin(xx))
ax.set_ylabel("Test")
plt.draw()
plt.show()
if __name__ == "__main__":
#if 1:
import matplotlib.pyplot as plt
fig = plt.figure(1, (4,3))
ax = Subplot(fig, 1, 1, 1)
fig.add_subplot(ax)
xx = np.arange(0, 2*np.pi, 0.01)
ax.plot(xx, np.sin(xx))
ax.set_ylabel("Test")
ax.axis["top"].major_ticks.set_tick_out(True) #set_tick_direction("out")
ax.axis["bottom"].major_ticks.set_tick_out(True) #set_tick_direction("out")
#ax.axis["bottom"].set_tick_direction("in")
ax.axis["bottom"].set_label("Tk0")
plt.draw()
plt.show()
|
gpl-3.0
|
PanDAWMS/panda-server
|
pandaserver/jobdispatcher/Protocol.py
|
1
|
16027
|
import re
import json
import base64
try:
from urllib import urlencode
except ImportError:
from urllib.parse import urlencode
from pandaserver.taskbuffer import EventServiceUtils
from pandaserver.dataservice import DataServiceUtils
# constants
TimeOutToken = "TimeOut"
NoJobsToken = "NoJobs"
########### status codes
# succeeded
SC_Success = 0
# timeout
SC_TimeOut = 10
# no available jobs
SC_NoJobs = 20
# failed
SC_Failed = 30
# Not secure connection
SC_NonSecure = 40
# invalid token
SC_Invalid = 50
# invalid role
SC_Role = 60
# permission denied
SC_Perms = 70
# key missing
SC_MissKey = 80
# failure of proxy retrieval
SC_ProxyError = 90
# response
class Response:
# constructor
def __init__(self,statusCode,errorDialog=None):
# create data object
self.data = {'StatusCode':statusCode}
if errorDialog is not None:
self.data['errorDialog'] = errorDialog
# URL encode
def encode(self,acceptJson=False):
if not acceptJson:
return urlencode(self.data)
else:
return {'type':'json','content':json.dumps(self.data)}
# append Node
def appendNode(self,name,value):
self.data[name]=value
# append job
def appendJob(self, job, siteMapperCache=None):
# event service merge
if EventServiceUtils.isEventServiceMerge(job):
isEventServiceMerge = True
else:
isEventServiceMerge = False
# PandaID
self.data['PandaID'] = job.PandaID
# prodSourceLabel
self.data['prodSourceLabel'] = job.prodSourceLabel
# swRelease
self.data['swRelease'] = job.AtlasRelease
# homepackage
self.data['homepackage'] = job.homepackage
# transformation
self.data['transformation'] = job.transformation
# job name
self.data['jobName'] = job.jobName
# job definition ID
self.data['jobDefinitionID'] = job.jobDefinitionID
# cloud
self.data['cloud'] = job.cloud
# files
strIFiles = ''
strOFiles = ''
strDispatch = ''
strDisToken = ''
strDisTokenForOutput = ''
strDestination = ''
strRealDataset = ''
strRealDatasetIn = ''
strProdDBlock = ''
strDestToken = ''
strProdToken = ''
strProdTokenForOutput = ''
strGUID = ''
strFSize = ''
strCheckSum = ''
strFileDestinationSE = ''
strScopeIn = ''
strScopeOut = ''
strScopeLog = ''
logFile = ''
logGUID = ''
ddmEndPointIn = []
ddmEndPointOut = []
noOutput = []
siteSpec = None
inDsLfnMap = {}
inLFNset = set()
if siteMapperCache is not None:
siteMapper = siteMapperCache.getObj()
siteSpec = siteMapper.getSite(job.computingSite)
# resolve destSE
try:
job.destinationSE = siteMapper.resolveNucleus(job.destinationSE)
for tmpFile in job.Files:
tmpFile.destinationSE = siteMapper.resolveNucleus(tmpFile.destinationSE)
except Exception:
pass
siteMapperCache.releaseObj()
for file in job.Files:
if file.type == 'input':
if EventServiceUtils.isJumboJob(job) and file.lfn in inLFNset:
pass
else:
inLFNset.add(file.lfn)
if strIFiles != '':
strIFiles += ','
strIFiles += file.lfn
if strDispatch != '':
strDispatch += ','
strDispatch += file.dispatchDBlock
if strDisToken != '':
strDisToken += ','
strDisToken += file.dispatchDBlockToken
strProdDBlock += '%s,' % file.prodDBlock
if not isEventServiceMerge:
strProdToken += '%s,' % file.prodDBlockToken
else:
strProdToken += '%s,' % job.metadata[1][file.lfn]
if strGUID != '':
strGUID += ','
strGUID += file.GUID
strRealDatasetIn += '%s,' % file.dataset
strFSize += '%s,' % file.fsize
if file.checksum not in ['','NULL',None]:
strCheckSum += '%s,' % file.checksum
else:
strCheckSum += '%s,' % file.md5sum
strScopeIn += '%s,' % file.scope
ddmEndPointIn.append(self.getDdmEndpoint(siteSpec, file.dispatchDBlockToken, 'input',
job.prodSourceLabel, job.job_label))
if file.dataset not in inDsLfnMap:
inDsLfnMap[file.dataset] = []
inDsLfnMap[file.dataset].append(file.lfn)
if file.type == 'output' or file.type == 'log':
if strOFiles != '':
strOFiles += ','
strOFiles += file.lfn
if strDestination != '':
strDestination += ','
strDestination += file.destinationDBlock
if strRealDataset != '':
strRealDataset += ','
strRealDataset += file.dataset
strFileDestinationSE += '%s,' % file.destinationSE
if file.type == 'log':
logFile = file.lfn
logGUID = file.GUID
strScopeLog = file.scope
else:
strScopeOut += '%s,' % file.scope
if strDestToken != '':
strDestToken += ','
strDestToken += re.sub('^ddd:','dst:',file.destinationDBlockToken.split(',')[0])
strDisTokenForOutput += '%s,' % file.dispatchDBlockToken
strProdTokenForOutput += '%s,' % file.prodDBlockToken
ddmEndPointOut.append(self.getDdmEndpoint(siteSpec, file.destinationDBlockToken.split(',')[0], 'output',
job.prodSourceLabel, job.job_label))
if file.isAllowedNoOutput():
noOutput.append(file.lfn)
# inFiles
self.data['inFiles'] = strIFiles
# dispatch DBlock
self.data['dispatchDblock'] = strDispatch
# dispatch DBlock space token
self.data['dispatchDBlockToken'] = strDisToken
# dispatch DBlock space token for output
self.data['dispatchDBlockTokenForOut'] = strDisTokenForOutput[:-1]
# outFiles
self.data['outFiles'] = strOFiles
# destination DBlock
self.data['destinationDblock'] = strDestination
# destination DBlock space token
self.data['destinationDBlockToken'] = strDestToken
# prod DBlocks
self.data['prodDBlocks'] = strProdDBlock[:-1]
# prod DBlock space token
self.data['prodDBlockToken'] = strProdToken[:-1]
# real output datasets
self.data['realDatasets'] = strRealDataset
# real output datasets
self.data['realDatasetsIn'] = strRealDatasetIn[:-1]
# file's destinationSE
self.data['fileDestinationSE'] = strFileDestinationSE[:-1]
# log filename
self.data['logFile'] = logFile
# log GUID
self.data['logGUID'] = logGUID
# jobPars
self.data['jobPars'], ppSteps = job.extractMultiStepExec()
if ppSteps is not None:
self.data.update(ppSteps)
if job.to_encode_job_params():
self.data['jobPars'] = base64.b64encode(self.data['jobPars'].encode()).decode()
# attempt number
self.data['attemptNr'] = job.attemptNr
# GUIDs
self.data['GUID'] = strGUID
# checksum
self.data['checksum'] = strCheckSum[:-1]
# fsize
self.data['fsize'] = strFSize[:-1]
# scope
self.data['scopeIn'] = strScopeIn[:-1]
self.data['scopeOut'] = strScopeOut[:-1]
self.data['scopeLog'] = strScopeLog
# DDM endpoints
try:
self.data['ddmEndPointIn'] = ','.join(ddmEndPointIn)
except TypeError:
self.data['ddmEndPointIn'] = ''
try:
self.data['ddmEndPointOut'] = ','.join(ddmEndPointOut)
except TypeError:
self.data['ddmEndPointOut'] = ''
# destinationSE
self.data['destinationSE'] = job.destinationSE
# user ID
self.data['prodUserID'] = job.prodUserID
# CPU count
self.data['maxCpuCount'] = job.maxCpuCount
# RAM count
self.data['minRamCount'] = job.minRamCount
# disk count
self.data['maxDiskCount'] = job.maxDiskCount
# cmtconfig
if ppSteps is None:
self.data['cmtConfig'] = job.cmtConfig
else:
self.data['cmtConfig'] = ''
# processingType
self.data['processingType'] = job.processingType
# transferType
self.data['transferType'] = job.transferType
# sourceSite
self.data['sourceSite'] = job.sourceSite
# current priority
self.data['currentPriority'] = job.currentPriority
# taskID
if job.lockedby == 'jedi':
self.data['taskID'] = job.jediTaskID
else:
self.data['taskID'] = job.taskID
# core count
if job.coreCount in ['NULL', None]:
self.data['coreCount'] = 1
else:
self.data['coreCount'] = job.coreCount
# jobsetID
self.data['jobsetID'] = job.jobsetID
# nucleus
self.data['nucleus'] = job.nucleus
# walltime
self.data['maxWalltime'] = job.maxWalltime
# looping check
if job.is_no_looping_check():
self.data['loopingCheck'] = False
# debug mode
if job.specialHandling is not None and 'debug' in job.specialHandling:
self.data['debug'] = 'True'
# event service or job cloning
if EventServiceUtils.isJobCloningJob(job):
self.data['cloneJob'] = EventServiceUtils.getJobCloningType(job)
elif EventServiceUtils.isEventServiceJob(job) or EventServiceUtils.isJumboJob(job):
self.data['eventService'] = 'True'
# prod DBlock space token for pre-merging output
self.data['prodDBlockTokenForOutput'] = strProdTokenForOutput[:-1]
# event service merge
if isEventServiceMerge:
self.data['eventServiceMerge'] = 'True'
# write to file for ES merge
writeToFileStr = ''
try:
for outputName in job.metadata[0]:
inputList = job.metadata[0][outputName]
writeToFileStr += 'inputFor_{0}:'.format(outputName)
for tmpInput in inputList:
writeToFileStr += '{0},'.format(tmpInput)
writeToFileStr = writeToFileStr[:-1]
writeToFileStr += '^'
writeToFileStr = writeToFileStr[:-1]
except Exception:
pass
self.data['writeToFile'] = writeToFileStr
elif job.writeInputToFile():
try:
# write input to file
writeToFileStr = ''
for inDS in inDsLfnMap:
inputList = inDsLfnMap[inDS]
inDS = re.sub('/$','',inDS)
inDS = inDS.split(':')[-1]
writeToFileStr += 'tmpin_{0}:'.format(inDS)
writeToFileStr += ','.join(inputList)
writeToFileStr += '^'
writeToFileStr = writeToFileStr[:-1]
self.data['writeToFile'] = writeToFileStr
except Exception:
pass
# replace placeholder
if EventServiceUtils.isJumboJob(job) or EventServiceUtils.isCoJumboJob(job):
try:
for inDS in inDsLfnMap:
inputList = inDsLfnMap[inDS]
inDS = re.sub('/$','',inDS)
inDS = inDS.split(':')[-1]
srcStr = 'tmpin__cnt_{0}'.format(inDS)
dstStr = ','.join(inputList)
self.data['jobPars'] = self.data['jobPars'].replace(srcStr, dstStr)
except Exception:
pass
# no output
if noOutput != []:
self.data['allowNoOutput'] = ','.join(noOutput)
# alternative stage-out
if job.getAltStgOut() is not None:
self.data['altStageOut'] = job.getAltStgOut()
# log to OS
if job.putLogToOS():
self.data['putLogToOS'] = 'True'
# suppress execute string conversion
if job.noExecStrCnv():
self.data['noExecStrCnv'] = 'True'
# in-file positional event number
if job.inFilePosEvtNum():
self.data['inFilePosEvtNum'] = 'True'
# use prefetcher
if job.usePrefetcher():
self.data['usePrefetcher'] = 'True'
# image name
if job.container_name not in ['NULL', None]:
self.data['container_name'] = job.container_name
# IO
self.data['ioIntensity'] = job.get_task_attribute('ioIntensity')
self.data['ioIntensityUnit'] = job.get_task_attribute('ioIntensityUnit')
# HPO
if job.is_hpo_workflow():
self.data['isHPO'] = 'True'
# VP
if siteSpec is not None:
scope_input, scope_output = DataServiceUtils.select_scope(siteSpec, job.prodSourceLabel, job.job_label)
if siteSpec.use_vp(scope_input):
self.data['useVP'] = 'True'
# set proxy key
def setProxyKey(self,proxyKey):
names = ['credname','myproxy']
for name in names:
if name in proxyKey:
self.data[name] = proxyKey[name]
else:
self.data[name] = ''
# set secret key for panda proxy
def setPandaProxySecretKey(self,secretKey):
self.data['pandaProxySecretKey'] = secretKey
# get ddm endpoint
def getDdmEndpoint(self, siteSpec, spaceToken, mode, prodSourceLabel, job_label):
scope_input, scope_output = DataServiceUtils.select_scope(siteSpec, prodSourceLabel, job_label)
if siteSpec is None or mode not in ['input', 'output']:
return ''
if mode == 'input':
connected_endpoints = siteSpec.ddm_endpoints_input.get(scope_input)
elif mode == 'output':
connected_endpoints = siteSpec.ddm_endpoints_output.get(scope_output)
endPoint = DataServiceUtils.getDestinationSE(spaceToken)
if endPoint and connected_endpoints and connected_endpoints.isAssociated(endPoint):
return endPoint
endPoint = DataServiceUtils.getDistributedDestination(spaceToken)
if endPoint and connected_endpoints and connected_endpoints.isAssociated(endPoint):
return endPoint
if mode == 'input':
setokens = siteSpec.setokens_input.get(scope_input, [])
ddm = siteSpec.ddm_input.get(scope_input)
elif mode == 'output':
setokens = siteSpec.setokens_output.get(scope_output, [])
ddm = siteSpec.ddm_output.get(scope_output)
if spaceToken in setokens:
return setokens[spaceToken]
# Protection against misconfigured sites
if not ddm:
ddm = ''
return ddm
# check if secure connection
def isSecure(req):
if 'SSL_CLIENT_S_DN' not in req.subprocess_env:
return False
return True
# get user DN
def getUserDN(req):
try:
return req.subprocess_env['SSL_CLIENT_S_DN']
except Exception:
return 'None'
|
apache-2.0
|
giorgiop/scikit-learn
|
sklearn/datasets/tests/test_samples_generator.py
|
181
|
15664
|
from __future__ import division
from collections import defaultdict
from functools import partial
import numpy as np
import scipy.sparse as sp
from sklearn.externals.six.moves import zip
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import assert_raises
from sklearn.datasets import make_classification
from sklearn.datasets import make_multilabel_classification
from sklearn.datasets import make_hastie_10_2
from sklearn.datasets import make_regression
from sklearn.datasets import make_blobs
from sklearn.datasets import make_friedman1
from sklearn.datasets import make_friedman2
from sklearn.datasets import make_friedman3
from sklearn.datasets import make_low_rank_matrix
from sklearn.datasets import make_sparse_coded_signal
from sklearn.datasets import make_sparse_uncorrelated
from sklearn.datasets import make_spd_matrix
from sklearn.datasets import make_swiss_roll
from sklearn.datasets import make_s_curve
from sklearn.datasets import make_biclusters
from sklearn.datasets import make_checkerboard
from sklearn.utils.validation import assert_all_finite
def test_make_classification():
X, y = make_classification(n_samples=100, n_features=20, n_informative=5,
n_redundant=1, n_repeated=1, n_classes=3,
n_clusters_per_class=1, hypercube=False,
shift=None, scale=None, weights=[0.1, 0.25],
random_state=0)
assert_equal(X.shape, (100, 20), "X shape mismatch")
assert_equal(y.shape, (100,), "y shape mismatch")
assert_equal(np.unique(y).shape, (3,), "Unexpected number of classes")
assert_equal(sum(y == 0), 10, "Unexpected number of samples in class #0")
assert_equal(sum(y == 1), 25, "Unexpected number of samples in class #1")
assert_equal(sum(y == 2), 65, "Unexpected number of samples in class #2")
def test_make_classification_informative_features():
"""Test the construction of informative features in make_classification
Also tests `n_clusters_per_class`, `n_classes`, `hypercube` and
fully-specified `weights`.
"""
# Create very separate clusters; check that vertices are unique and
# correspond to classes
class_sep = 1e6
make = partial(make_classification, class_sep=class_sep, n_redundant=0,
n_repeated=0, flip_y=0, shift=0, scale=1, shuffle=False)
for n_informative, weights, n_clusters_per_class in [(2, [1], 1),
(2, [1/3] * 3, 1),
(2, [1/4] * 4, 1),
(2, [1/2] * 2, 2),
(2, [3/4, 1/4], 2),
(10, [1/3] * 3, 10)
]:
n_classes = len(weights)
n_clusters = n_classes * n_clusters_per_class
n_samples = n_clusters * 50
for hypercube in (False, True):
X, y = make(n_samples=n_samples, n_classes=n_classes,
weights=weights, n_features=n_informative,
n_informative=n_informative,
n_clusters_per_class=n_clusters_per_class,
hypercube=hypercube, random_state=0)
assert_equal(X.shape, (n_samples, n_informative))
assert_equal(y.shape, (n_samples,))
# Cluster by sign, viewed as strings to allow uniquing
signs = np.sign(X)
signs = signs.view(dtype='|S{0}'.format(signs.strides[0]))
unique_signs, cluster_index = np.unique(signs,
return_inverse=True)
assert_equal(len(unique_signs), n_clusters,
"Wrong number of clusters, or not in distinct "
"quadrants")
clusters_by_class = defaultdict(set)
for cluster, cls in zip(cluster_index, y):
clusters_by_class[cls].add(cluster)
for clusters in clusters_by_class.values():
assert_equal(len(clusters), n_clusters_per_class,
"Wrong number of clusters per class")
assert_equal(len(clusters_by_class), n_classes,
"Wrong number of classes")
assert_array_almost_equal(np.bincount(y) / len(y) // weights,
[1] * n_classes,
err_msg="Wrong number of samples "
"per class")
# Ensure on vertices of hypercube
for cluster in range(len(unique_signs)):
centroid = X[cluster_index == cluster].mean(axis=0)
if hypercube:
assert_array_almost_equal(np.abs(centroid),
[class_sep] * n_informative,
decimal=0,
err_msg="Clusters are not "
"centered on hypercube "
"vertices")
else:
assert_raises(AssertionError,
assert_array_almost_equal,
np.abs(centroid),
[class_sep] * n_informative,
decimal=0,
err_msg="Clusters should not be cenetered "
"on hypercube vertices")
assert_raises(ValueError, make, n_features=2, n_informative=2, n_classes=5,
n_clusters_per_class=1)
assert_raises(ValueError, make, n_features=2, n_informative=2, n_classes=3,
n_clusters_per_class=2)
def test_make_multilabel_classification_return_sequences():
for allow_unlabeled, min_length in zip((True, False), (0, 1)):
X, Y = make_multilabel_classification(n_samples=100, n_features=20,
n_classes=3, random_state=0,
return_indicator=False,
allow_unlabeled=allow_unlabeled)
assert_equal(X.shape, (100, 20), "X shape mismatch")
if not allow_unlabeled:
assert_equal(max([max(y) for y in Y]), 2)
assert_equal(min([len(y) for y in Y]), min_length)
assert_true(max([len(y) for y in Y]) <= 3)
def test_make_multilabel_classification_return_indicator():
for allow_unlabeled, min_length in zip((True, False), (0, 1)):
X, Y = make_multilabel_classification(n_samples=25, n_features=20,
n_classes=3, random_state=0,
allow_unlabeled=allow_unlabeled)
assert_equal(X.shape, (25, 20), "X shape mismatch")
assert_equal(Y.shape, (25, 3), "Y shape mismatch")
assert_true(np.all(np.sum(Y, axis=0) > min_length))
# Also test return_distributions and return_indicator with True
X2, Y2, p_c, p_w_c = make_multilabel_classification(
n_samples=25, n_features=20, n_classes=3, random_state=0,
allow_unlabeled=allow_unlabeled, return_distributions=True)
assert_array_equal(X, X2)
assert_array_equal(Y, Y2)
assert_equal(p_c.shape, (3,))
assert_almost_equal(p_c.sum(), 1)
assert_equal(p_w_c.shape, (20, 3))
assert_almost_equal(p_w_c.sum(axis=0), [1] * 3)
def test_make_multilabel_classification_return_indicator_sparse():
for allow_unlabeled, min_length in zip((True, False), (0, 1)):
X, Y = make_multilabel_classification(n_samples=25, n_features=20,
n_classes=3, random_state=0,
return_indicator='sparse',
allow_unlabeled=allow_unlabeled)
assert_equal(X.shape, (25, 20), "X shape mismatch")
assert_equal(Y.shape, (25, 3), "Y shape mismatch")
assert_true(sp.issparse(Y))
def test_make_hastie_10_2():
X, y = make_hastie_10_2(n_samples=100, random_state=0)
assert_equal(X.shape, (100, 10), "X shape mismatch")
assert_equal(y.shape, (100,), "y shape mismatch")
assert_equal(np.unique(y).shape, (2,), "Unexpected number of classes")
def test_make_regression():
X, y, c = make_regression(n_samples=100, n_features=10, n_informative=3,
effective_rank=5, coef=True, bias=0.0,
noise=1.0, random_state=0)
assert_equal(X.shape, (100, 10), "X shape mismatch")
assert_equal(y.shape, (100,), "y shape mismatch")
assert_equal(c.shape, (10,), "coef shape mismatch")
assert_equal(sum(c != 0.0), 3, "Unexpected number of informative features")
# Test that y ~= np.dot(X, c) + bias + N(0, 1.0).
assert_almost_equal(np.std(y - np.dot(X, c)), 1.0, decimal=1)
# Test with small number of features.
X, y = make_regression(n_samples=100, n_features=1) # n_informative=3
assert_equal(X.shape, (100, 1))
def test_make_regression_multitarget():
X, y, c = make_regression(n_samples=100, n_features=10, n_informative=3,
n_targets=3, coef=True, noise=1., random_state=0)
assert_equal(X.shape, (100, 10), "X shape mismatch")
assert_equal(y.shape, (100, 3), "y shape mismatch")
assert_equal(c.shape, (10, 3), "coef shape mismatch")
assert_array_equal(sum(c != 0.0), 3,
"Unexpected number of informative features")
# Test that y ~= np.dot(X, c) + bias + N(0, 1.0)
assert_almost_equal(np.std(y - np.dot(X, c)), 1.0, decimal=1)
def test_make_blobs():
cluster_stds = np.array([0.05, 0.2, 0.4])
cluster_centers = np.array([[0.0, 0.0], [1.0, 1.0], [0.0, 1.0]])
X, y = make_blobs(random_state=0, n_samples=50, n_features=2,
centers=cluster_centers, cluster_std=cluster_stds)
assert_equal(X.shape, (50, 2), "X shape mismatch")
assert_equal(y.shape, (50,), "y shape mismatch")
assert_equal(np.unique(y).shape, (3,), "Unexpected number of blobs")
for i, (ctr, std) in enumerate(zip(cluster_centers, cluster_stds)):
assert_almost_equal((X[y == i] - ctr).std(), std, 1, "Unexpected std")
def test_make_friedman1():
X, y = make_friedman1(n_samples=5, n_features=10, noise=0.0,
random_state=0)
assert_equal(X.shape, (5, 10), "X shape mismatch")
assert_equal(y.shape, (5,), "y shape mismatch")
assert_array_almost_equal(y,
10 * np.sin(np.pi * X[:, 0] * X[:, 1])
+ 20 * (X[:, 2] - 0.5) ** 2
+ 10 * X[:, 3] + 5 * X[:, 4])
def test_make_friedman2():
X, y = make_friedman2(n_samples=5, noise=0.0, random_state=0)
assert_equal(X.shape, (5, 4), "X shape mismatch")
assert_equal(y.shape, (5,), "y shape mismatch")
assert_array_almost_equal(y,
(X[:, 0] ** 2
+ (X[:, 1] * X[:, 2] - 1
/ (X[:, 1] * X[:, 3])) ** 2) ** 0.5)
def test_make_friedman3():
X, y = make_friedman3(n_samples=5, noise=0.0, random_state=0)
assert_equal(X.shape, (5, 4), "X shape mismatch")
assert_equal(y.shape, (5,), "y shape mismatch")
assert_array_almost_equal(y, np.arctan((X[:, 1] * X[:, 2]
- 1 / (X[:, 1] * X[:, 3]))
/ X[:, 0]))
def test_make_low_rank_matrix():
X = make_low_rank_matrix(n_samples=50, n_features=25, effective_rank=5,
tail_strength=0.01, random_state=0)
assert_equal(X.shape, (50, 25), "X shape mismatch")
from numpy.linalg import svd
u, s, v = svd(X)
assert_less(sum(s) - 5, 0.1, "X rank is not approximately 5")
def test_make_sparse_coded_signal():
Y, D, X = make_sparse_coded_signal(n_samples=5, n_components=8,
n_features=10, n_nonzero_coefs=3,
random_state=0)
assert_equal(Y.shape, (10, 5), "Y shape mismatch")
assert_equal(D.shape, (10, 8), "D shape mismatch")
assert_equal(X.shape, (8, 5), "X shape mismatch")
for col in X.T:
assert_equal(len(np.flatnonzero(col)), 3, 'Non-zero coefs mismatch')
assert_array_almost_equal(np.dot(D, X), Y)
assert_array_almost_equal(np.sqrt((D ** 2).sum(axis=0)),
np.ones(D.shape[1]))
def test_make_sparse_uncorrelated():
X, y = make_sparse_uncorrelated(n_samples=5, n_features=10, random_state=0)
assert_equal(X.shape, (5, 10), "X shape mismatch")
assert_equal(y.shape, (5,), "y shape mismatch")
def test_make_spd_matrix():
X = make_spd_matrix(n_dim=5, random_state=0)
assert_equal(X.shape, (5, 5), "X shape mismatch")
assert_array_almost_equal(X, X.T)
from numpy.linalg import eig
eigenvalues, _ = eig(X)
assert_array_equal(eigenvalues > 0, np.array([True] * 5),
"X is not positive-definite")
def test_make_swiss_roll():
X, t = make_swiss_roll(n_samples=5, noise=0.0, random_state=0)
assert_equal(X.shape, (5, 3), "X shape mismatch")
assert_equal(t.shape, (5,), "t shape mismatch")
assert_array_almost_equal(X[:, 0], t * np.cos(t))
assert_array_almost_equal(X[:, 2], t * np.sin(t))
def test_make_s_curve():
X, t = make_s_curve(n_samples=5, noise=0.0, random_state=0)
assert_equal(X.shape, (5, 3), "X shape mismatch")
assert_equal(t.shape, (5,), "t shape mismatch")
assert_array_almost_equal(X[:, 0], np.sin(t))
assert_array_almost_equal(X[:, 2], np.sign(t) * (np.cos(t) - 1))
def test_make_biclusters():
X, rows, cols = make_biclusters(
shape=(100, 100), n_clusters=4, shuffle=True, random_state=0)
assert_equal(X.shape, (100, 100), "X shape mismatch")
assert_equal(rows.shape, (4, 100), "rows shape mismatch")
assert_equal(cols.shape, (4, 100,), "columns shape mismatch")
assert_all_finite(X)
assert_all_finite(rows)
assert_all_finite(cols)
X2, _, _ = make_biclusters(shape=(100, 100), n_clusters=4,
shuffle=True, random_state=0)
assert_array_almost_equal(X, X2)
def test_make_checkerboard():
X, rows, cols = make_checkerboard(
shape=(100, 100), n_clusters=(20, 5),
shuffle=True, random_state=0)
assert_equal(X.shape, (100, 100), "X shape mismatch")
assert_equal(rows.shape, (100, 100), "rows shape mismatch")
assert_equal(cols.shape, (100, 100,), "columns shape mismatch")
X, rows, cols = make_checkerboard(
shape=(100, 100), n_clusters=2, shuffle=True, random_state=0)
assert_all_finite(X)
assert_all_finite(rows)
assert_all_finite(cols)
X1, _, _ = make_checkerboard(shape=(100, 100), n_clusters=2,
shuffle=True, random_state=0)
X2, _, _ = make_checkerboard(shape=(100, 100), n_clusters=2,
shuffle=True, random_state=0)
assert_array_equal(X1, X2)
|
bsd-3-clause
|
bsipocz/bokeh
|
bokeh/charts/builder/histogram_builder.py
|
43
|
9142
|
"""This is the Bokeh charts interface. It gives you a high level API to build
complex plot is a simple way.
This is the Histogram class which lets you build your histograms just passing
the arguments to the Chart class and calling the proper functions.
"""
#-----------------------------------------------------------------------------
# Copyright (c) 2012 - 2014, Continuum Analytics, Inc. All rights reserved.
#
# Powered by the Bokeh Development Team.
#
# The full license is in the file LICENSE.txt, distributed with this software.
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
from __future__ import absolute_import
try:
import scipy.special
_is_scipy = True
except ImportError as e:
_is_scipy = False
import numpy as np
from ..utils import chunk, cycle_colors
from .._builder import Builder, create_and_build
from ...models import ColumnDataSource, GlyphRenderer, Range1d
from ...models.glyphs import Line, Quad
from ...properties import Bool, Float, Int
#-----------------------------------------------------------------------------
# Classes and functions
#-----------------------------------------------------------------------------
def Histogram(values, bins, mu=None, sigma=None, density=True, **kws):
""" Create a histogram chart using :class:`HistogramBuilder <bokeh.charts.builder.histogram_builder.HistogramBuilder>`
to render the geometry from values, bins, sigma and density.
Args:
values (iterable): iterable 2d representing the data series
values matrix.
bins (int): number of bins to use in the Histogram building.
mu (float, optional): theoretical mean value for the normal
distribution. (default: None)
sigma (float, optional): theoretical sigma value for the
normal distribution. (default: None)
density (bool, optional): If False, the result will contain
the number of samples in each bin. If True, the result
is the value of the probability *density* function at
the bin, normalized such that the *integral* over the
range is 1. For more info check numpy.histogram
function documentation. (default: True)
In addition the the parameters specific to this chart,
:ref:`userguide_charts_generic_arguments` are also accepted as keyword parameters.
Returns:
a new :class:`Chart <bokeh.charts.Chart>`
Examples:
.. bokeh-plot::
:source-position: above
import pandas as pd
from bokeh.charts import Histogram, output_file, show
# (dict, OrderedDict, lists, arrays and DataFrames are valid inputs)
xyvalues = pd.DataFrame(dict(normal=[1, 2, 3, 1], lognormal=[5, 4, 4, 1]))
hm = Histogram(xyvalues, bins=5, title='Histogram')
output_file('histogram.html')
show(hm)
"""
return create_and_build(
HistogramBuilder, values, bins=bins, mu=mu, sigma=sigma, density=density,
**kws
)
class HistogramBuilder(Builder):
"""This is the Histogram class and it is in charge of plotting
histograms in an easy and intuitive way.
Essentially, we provide a way to ingest the data, make the proper
calculations and push the references into a source object.
We additionally make calculations for the ranges.
And finally add the needed glyphs (quads and lines) taking the
references from the source.
"""
bins = Int(10, help="""
Number of bins to use for the histogram. (default: 10)
""")
mu = Float(help="""
Theoretical mean value for the normal distribution. (default: None)
""")
sigma = Float(help="""
Theoretical standard deviation value for the normal distribution.
(default: None)
""")
density = Bool(True, help="""
Whether to normalize the histogram. (default: True)
If True, the result is the value of the probability *density* function
at the bin, normalized such that the *integral* over the range is 1. If
False, the result will contain the number of samples in each bin.
For more info check ``numpy.histogram`` function documentation.
""")
def _process_data(self):
"""Take the Histogram data from the input **value.
It calculates the chart properties accordingly. Then build a dict
containing references to all the calculated points to be used by
the quad and line glyphs inside the ``_yield_renderers`` method.
"""
# list to save all the groups available in the incomming input
self._groups.extend(self._values.keys())
# fill the data dictionary with the proper values
for i, (val, values) in enumerate(self._values.items()):
self.set_and_get("", val, values)
#build the histogram using the set bins number
hist, edges = np.histogram(
np.array(values), density=self.density, bins=self.bins
)
self.set_and_get("hist", val, hist)
self.set_and_get("edges", val, edges)
self.set_and_get("left", val, edges[:-1])
self.set_and_get("right", val, edges[1:])
self.set_and_get("bottom", val, np.zeros(len(hist)))
self._mu_and_sigma = False
if self.mu is not None and self.sigma is not None:
if _is_scipy:
self._mu_and_sigma = True
self.set_and_get("x", val, np.linspace(-2, 2, len(self._data[val])))
den = 2 * self.sigma ** 2
x_val = self._data["x" + val]
x_val_mu = x_val - self.mu
sigsqr2pi = self.sigma * np.sqrt(2 * np.pi)
pdf = 1 / (sigsqr2pi) * np.exp(-x_val_mu ** 2 / den)
self.set_and_get("pdf", val, pdf)
self._groups.append("pdf")
cdf = (1 + scipy.special.erf(x_val_mu / np.sqrt(den))) / 2
self.set_and_get("cdf", val, cdf)
self._groups.append("cdf")
else:
print("You need scipy to get the theoretical probability distributions.")
def _set_sources(self):
"""Push the Histogram data into the ColumnDataSource and calculate
the proper ranges."""
self._source = ColumnDataSource(data=self._data)
if not self._mu_and_sigma:
x_names, y_names = self._attr[2::6], self._attr[1::6]
else:
x_names, y_names = self._attr[2::9], self._attr[1::9]
endx = max(max(self._data[i]) for i in x_names)
startx = min(min(self._data[i]) for i in x_names)
self.x_range = Range1d(start=startx - 0.1 * (endx - startx),
end=endx + 0.1 * (endx - startx))
endy = max(max(self._data[i]) for i in y_names)
self.y_range = Range1d(start=0, end=1.1 * endy)
def _yield_renderers(self):
"""Use the several glyphs to display the Histogram and pdf/cdf.
It uses the quad (and line) glyphs to display the Histogram
bars, taking as reference points the data loaded at the
ColumnDataSurce.
"""
if not self._mu_and_sigma:
sextets = list(chunk(self._attr, 6))
colors = cycle_colors(sextets, self.palette)
# TODO (bev) this is a perfect use for a namedtuple
# sextet: values, his, edges, left, right, bottom
for i, sextet in enumerate(sextets):
glyph = Quad(
top=sextet[1], bottom=sextet[5], left=sextet[3], right=sextet[4],
fill_color=colors[i], fill_alpha=0.7,
line_color="white", line_alpha=1.0
)
renderer = GlyphRenderer(data_source=self._source, glyph=glyph)
self._legends.append((self._groups[i], [renderer]))
yield renderer
else:
nonets = list(chunk(self._attr, 9))
colors = cycle_colors(nonets, self.palette)
# TODO (bev) this is a perfect use for a namedtuple
# nonet: values, his, edges, left, right, bottom, x, pdf, cdf
for i, nonet in enumerate(nonets):
glyph = Quad(
top=nonet[1], bottom=nonet[5], left=nonet[3], right=nonet[4],
fill_color=colors[i], fill_alpha=0.7,
line_color="white", line_alpha=1.0
)
renderer = GlyphRenderer(data_source=self._source, glyph=glyph)
self._legends.append((self._groups[i], [renderer]))
yield renderer
glyph = Line(x=nonet[6], y=nonet[7], line_color="black")
yield GlyphRenderer(data_source=self._source, glyph=glyph)
glyph = Line(x=nonet[6], y=nonet[8], line_color="blue")
yield GlyphRenderer(data_source=self._source, glyph=glyph)
|
bsd-3-clause
|
olologin/scikit-learn
|
sklearn/gaussian_process/gpc.py
|
42
|
31571
|
"""Gaussian processes classification."""
# Authors: Jan Hendrik Metzen <jhm@informatik.uni-bremen.de>
#
# License: BSD 3 clause
import warnings
from operator import itemgetter
import numpy as np
from scipy.linalg import cholesky, cho_solve, solve
from scipy.optimize import fmin_l_bfgs_b
from scipy.special import erf
from sklearn.base import BaseEstimator, ClassifierMixin, clone
from sklearn.gaussian_process.kernels \
import RBF, CompoundKernel, ConstantKernel as C
from sklearn.utils.validation import check_X_y, check_is_fitted, check_array
from sklearn.utils import check_random_state
from sklearn.preprocessing import LabelEncoder
from sklearn.multiclass import OneVsRestClassifier, OneVsOneClassifier
# Values required for approximating the logistic sigmoid by
# error functions. coefs are obtained via:
# x = np.array([0, 0.6, 2, 3.5, 4.5, np.inf])
# b = logistic(x)
# A = (erf(np.dot(x, self.lambdas)) + 1) / 2
# coefs = lstsq(A, b)[0]
LAMBDAS = np.array([0.41, 0.4, 0.37, 0.44, 0.39])[:, np.newaxis]
COEFS = np.array([-1854.8214151, 3516.89893646, 221.29346712,
128.12323805, -2010.49422654])[:, np.newaxis]
class _BinaryGaussianProcessClassifierLaplace(BaseEstimator):
"""Binary Gaussian process classification based on Laplace approximation.
The implementation is based on Algorithm 3.1, 3.2, and 5.1 of
``Gaussian Processes for Machine Learning'' (GPML) by Rasmussen and
Williams.
Internally, the Laplace approximation is used for approximating the
non-Gaussian posterior by a Gaussian.
Currently, the implementation is restricted to using the logistic link
function.
Parameters
----------
kernel : kernel object
The kernel specifying the covariance function of the GP. If None is
passed, the kernel "1.0 * RBF(1.0)" is used as default. Note that
the kernel's hyperparameters are optimized during fitting.
optimizer : string or callable, optional (default: "fmin_l_bfgs_b")
Can either be one of the internally supported optimizers for optimizing
the kernel's parameters, specified by a string, or an externally
defined optimizer passed as a callable. If a callable is passed, it
must have the signature::
def optimizer(obj_func, initial_theta, bounds):
# * 'obj_func' is the objective function to be maximized, which
# takes the hyperparameters theta as parameter and an
# optional flag eval_gradient, which determines if the
# gradient is returned additionally to the function value
# * 'initial_theta': the initial value for theta, which can be
# used by local optimizers
# * 'bounds': the bounds on the values of theta
....
# Returned are the best found hyperparameters theta and
# the corresponding value of the target function.
return theta_opt, func_min
Per default, the 'fmin_l_bfgs_b' algorithm from scipy.optimize
is used. If None is passed, the kernel's parameters are kept fixed.
Available internal optimizers are::
'fmin_l_bfgs_b'
n_restarts_optimizer: int, optional (default: 0)
The number of restarts of the optimizer for finding the kernel's
parameters which maximize the log-marginal likelihood. The first run
of the optimizer is performed from the kernel's initial parameters,
the remaining ones (if any) from thetas sampled log-uniform randomly
from the space of allowed theta-values. If greater than 0, all bounds
must be finite. Note that n_restarts_optimizer=0 implies that one
run is performed.
max_iter_predict: int, optional (default: 100)
The maximum number of iterations in Newton's method for approximating
the posterior during predict. Smaller values will reduce computation
time at the cost of worse results.
warm_start : bool, optional (default: False)
If warm-starts are enabled, the solution of the last Newton iteration
on the Laplace approximation of the posterior mode is used as
initialization for the next call of _posterior_mode(). This can speed
up convergence when _posterior_mode is called several times on similar
problems as in hyperparameter optimization.
copy_X_train : bool, optional (default: True)
If True, a persistent copy of the training data is stored in the
object. Otherwise, just a reference to the training data is stored,
which might cause predictions to change if the data is modified
externally.
random_state : integer or numpy.RandomState, optional
The generator used to initialize the centers. If an integer is
given, it fixes the seed. Defaults to the global numpy random
number generator.
Attributes
----------
X_train_ : array-like, shape = (n_samples, n_features)
Feature values in training data (also required for prediction)
y_train_: array-like, shape = (n_samples,)
Target values in training data (also required for prediction)
classes_ : array-like, shape = (n_classes,)
Unique class labels.
kernel_: kernel object
The kernel used for prediction. The structure of the kernel is the
same as the one passed as parameter but with optimized hyperparameters
L_: array-like, shape = (n_samples, n_samples)
Lower-triangular Cholesky decomposition of the kernel in X_train_
pi_: array-like, shape = (n_samples,)
The probabilities of the positive class for the training points
X_train_
W_sr_: array-like, shape = (n_samples,)
Square root of W, the Hessian of log-likelihood of the latent function
values for the observed labels. Since W is diagonal, only the diagonal
of sqrt(W) is stored.
log_marginal_likelihood_value_: float
The log-marginal-likelihood of ``self.kernel_.theta``
"""
def __init__(self, kernel=None, optimizer="fmin_l_bfgs_b",
n_restarts_optimizer=0, max_iter_predict=100,
warm_start=False, copy_X_train=True, random_state=None):
self.kernel = kernel
self.optimizer = optimizer
self.n_restarts_optimizer = n_restarts_optimizer
self.max_iter_predict = max_iter_predict
self.warm_start = warm_start
self.copy_X_train = copy_X_train
self.random_state = random_state
def fit(self, X, y):
"""Fit Gaussian process classification model
Parameters
----------
X : array-like, shape = (n_samples, n_features)
Training data
y : array-like, shape = (n_samples,)
Target values, must be binary
Returns
-------
self : returns an instance of self.
"""
if self.kernel is None: # Use an RBF kernel as default
self.kernel_ = C(1.0, constant_value_bounds="fixed") \
* RBF(1.0, length_scale_bounds="fixed")
else:
self.kernel_ = clone(self.kernel)
self.rng = check_random_state(self.random_state)
self.X_train_ = np.copy(X) if self.copy_X_train else X
# Encode class labels and check that it is a binary classification
# problem
label_encoder = LabelEncoder()
self.y_train_ = label_encoder.fit_transform(y)
self.classes_ = label_encoder.classes_
if self.classes_.size > 2:
raise ValueError("%s supports only binary classification. "
"y contains classes %s"
% (self.__class__.__name__, self.classes_))
elif self.classes_.size == 1:
raise ValueError("{0:s} requires 2 classes.".format(
self.__class__.__name__))
if self.optimizer is not None and self.kernel_.n_dims > 0:
# Choose hyperparameters based on maximizing the log-marginal
# likelihood (potentially starting from several initial values)
def obj_func(theta, eval_gradient=True):
if eval_gradient:
lml, grad = self.log_marginal_likelihood(
theta, eval_gradient=True)
return -lml, -grad
else:
return -self.log_marginal_likelihood(theta)
# First optimize starting from theta specified in kernel
optima = [self._constrained_optimization(obj_func,
self.kernel_.theta,
self.kernel_.bounds)]
# Additional runs are performed from log-uniform chosen initial
# theta
if self.n_restarts_optimizer > 0:
if not np.isfinite(self.kernel_.bounds).all():
raise ValueError(
"Multiple optimizer restarts (n_restarts_optimizer>0) "
"requires that all bounds are finite.")
bounds = self.kernel_.bounds
for iteration in range(self.n_restarts_optimizer):
theta_initial = np.exp(self.rng.uniform(bounds[:, 0],
bounds[:, 1]))
optima.append(
self._constrained_optimization(obj_func, theta_initial,
bounds))
# Select result from run with minimal (negative) log-marginal
# likelihood
lml_values = list(map(itemgetter(1), optima))
self.kernel_.theta = optima[np.argmin(lml_values)][0]
self.log_marginal_likelihood_value_ = -np.min(lml_values)
else:
self.log_marginal_likelihood_value_ = \
self.log_marginal_likelihood(self.kernel_.theta)
# Precompute quantities required for predictions which are independent
# of actual query points
K = self.kernel_(self.X_train_)
_, (self.pi_, self.W_sr_, self.L_, _, _) = \
self._posterior_mode(K, return_temporaries=True)
return self
def predict(self, X):
"""Perform classification on an array of test vectors X.
Parameters
----------
X : array-like, shape = (n_samples, n_features)
Returns
-------
C : array, shape = (n_samples,)
Predicted target values for X, values are from ``classes_``
"""
check_is_fitted(self, ["X_train_", "y_train_", "pi_", "W_sr_", "L_"])
# As discussed on Section 3.4.2 of GPML, for making hard binary
# decisions, it is enough to compute the MAP of the posterior and
# pass it through the link function
K_star = self.kernel_(self.X_train_, X) # K_star =k(x_star)
f_star = K_star.T.dot(self.y_train_ - self.pi_) # Algorithm 3.2,Line 4
return np.where(f_star > 0, self.classes_[1], self.classes_[0])
def predict_proba(self, X):
"""Return probability estimates for the test vector X.
Parameters
----------
X : array-like, shape = (n_samples, n_features)
Returns
-------
C : array-like, shape = (n_samples, n_classes)
Returns the probability of the samples for each class in
the model. The columns correspond to the classes in sorted
order, as they appear in the attribute ``classes_``.
"""
check_is_fitted(self, ["X_train_", "y_train_", "pi_", "W_sr_", "L_"])
# Based on Algorithm 3.2 of GPML
K_star = self.kernel_(self.X_train_, X) # K_star =k(x_star)
f_star = K_star.T.dot(self.y_train_ - self.pi_) # Line 4
v = solve(self.L_, self.W_sr_[:, np.newaxis] * K_star) # Line 5
# Line 6 (compute np.diag(v.T.dot(v)) via einsum)
var_f_star = self.kernel_.diag(X) - np.einsum("ij,ij->j", v, v)
# Line 7:
# Approximate \int log(z) * N(z | f_star, var_f_star)
# Approximation is due to Williams & Barber, "Bayesian Classification
# with Gaussian Processes", Appendix A: Approximate the logistic
# sigmoid by a linear combination of 5 error functions.
# For information on how this integral can be computed see
# blitiri.blogspot.de/2012/11/gaussian-integral-of-error-function.html
alpha = 1 / (2 * var_f_star)
gamma = LAMBDAS * f_star
integrals = np.sqrt(np.pi / alpha) \
* erf(gamma * np.sqrt(alpha / (alpha + LAMBDAS**2))) \
/ (2 * np.sqrt(var_f_star * 2 * np.pi))
pi_star = (COEFS * integrals).sum(axis=0) + .5 * COEFS.sum()
return np.vstack((1 - pi_star, pi_star)).T
def log_marginal_likelihood(self, theta=None, eval_gradient=False):
"""Returns log-marginal likelihood of theta for training data.
Parameters
----------
theta : array-like, shape = (n_kernel_params,) or None
Kernel hyperparameters for which the log-marginal likelihood is
evaluated. If None, the precomputed log_marginal_likelihood
of ``self.kernel_.theta`` is returned.
eval_gradient : bool, default: False
If True, the gradient of the log-marginal likelihood with respect
to the kernel hyperparameters at position theta is returned
additionally. If True, theta must not be None.
Returns
-------
log_likelihood : float
Log-marginal likelihood of theta for training data.
log_likelihood_gradient : array, shape = (n_kernel_params,), optional
Gradient of the log-marginal likelihood with respect to the kernel
hyperparameters at position theta.
Only returned when eval_gradient is True.
"""
if theta is None:
if eval_gradient:
raise ValueError(
"Gradient can only be evaluated for theta!=None")
return self.log_marginal_likelihood_value_
kernel = self.kernel_.clone_with_theta(theta)
if eval_gradient:
K, K_gradient = kernel(self.X_train_, eval_gradient=True)
else:
K = kernel(self.X_train_)
# Compute log-marginal-likelihood Z and also store some temporaries
# which can be reused for computing Z's gradient
Z, (pi, W_sr, L, b, a) = \
self._posterior_mode(K, return_temporaries=True)
if not eval_gradient:
return Z
# Compute gradient based on Algorithm 5.1 of GPML
d_Z = np.empty(theta.shape[0])
# XXX: Get rid of the np.diag() in the next line
R = W_sr[:, np.newaxis] * cho_solve((L, True), np.diag(W_sr)) # Line 7
C = solve(L, W_sr[:, np.newaxis] * K) # Line 8
# Line 9: (use einsum to compute np.diag(C.T.dot(C))))
s_2 = -0.5 * (np.diag(K) - np.einsum('ij, ij -> j', C, C)) \
* (pi * (1 - pi) * (1 - 2 * pi)) # third derivative
for j in range(d_Z.shape[0]):
C = K_gradient[:, :, j] # Line 11
# Line 12: (R.T.ravel().dot(C.ravel()) = np.trace(R.dot(C)))
s_1 = .5 * a.T.dot(C).dot(a) - .5 * R.T.ravel().dot(C.ravel())
b = C.dot(self.y_train_ - pi) # Line 13
s_3 = b - K.dot(R.dot(b)) # Line 14
d_Z[j] = s_1 + s_2.T.dot(s_3) # Line 15
return Z, d_Z
def _posterior_mode(self, K, return_temporaries=False):
"""Mode-finding for binary Laplace GPC and fixed kernel.
This approximates the posterior of the latent function values for given
inputs and target observations with a Gaussian approximation and uses
Newton's iteration to find the mode of this approximation.
"""
# Based on Algorithm 3.1 of GPML
# If warm_start are enabled, we reuse the last solution for the
# posterior mode as initialization; otherwise, we initialize with 0
if self.warm_start and hasattr(self, "f_cached") \
and self.f_cached.shape == self.y_train_.shape:
f = self.f_cached
else:
f = np.zeros_like(self.y_train_, dtype=np.float64)
# Use Newton's iteration method to find mode of Laplace approximation
log_marginal_likelihood = -np.inf
for _ in range(self.max_iter_predict):
# Line 4
pi = 1 / (1 + np.exp(-f))
W = pi * (1 - pi)
# Line 5
W_sr = np.sqrt(W)
W_sr_K = W_sr[:, np.newaxis] * K
B = np.eye(W.shape[0]) + W_sr_K * W_sr
L = cholesky(B, lower=True)
# Line 6
b = W * f + (self.y_train_ - pi)
# Line 7
a = b - W_sr * cho_solve((L, True), W_sr_K.dot(b))
# Line 8
f = K.dot(a)
# Line 10: Compute log marginal likelihood in loop and use as
# convergence criterion
lml = -0.5 * a.T.dot(f) \
- np.log(1 + np.exp(-(self.y_train_ * 2 - 1) * f)).sum() \
- np.log(np.diag(L)).sum()
# Check if we have converged (log marginal likelihood does
# not decrease)
# XXX: more complex convergence criterion
if lml - log_marginal_likelihood < 1e-10:
break
log_marginal_likelihood = lml
self.f_cached = f # Remember solution for later warm-starts
if return_temporaries:
return log_marginal_likelihood, (pi, W_sr, L, b, a)
else:
return log_marginal_likelihood
def _constrained_optimization(self, obj_func, initial_theta, bounds):
if self.optimizer == "fmin_l_bfgs_b":
theta_opt, func_min, convergence_dict = \
fmin_l_bfgs_b(obj_func, initial_theta, bounds=bounds)
if convergence_dict["warnflag"] != 0:
warnings.warn("fmin_l_bfgs_b terminated abnormally with the "
" state: %s" % convergence_dict)
elif callable(self.optimizer):
theta_opt, func_min = \
self.optimizer(obj_func, initial_theta, bounds=bounds)
else:
raise ValueError("Unknown optimizer %s." % self.optimizer)
return theta_opt, func_min
class GaussianProcessClassifier(BaseEstimator, ClassifierMixin):
"""Gaussian process classification (GPC) based on Laplace approximation.
The implementation is based on Algorithm 3.1, 3.2, and 5.1 of
Gaussian Processes for Machine Learning (GPML) by Rasmussen and
Williams.
Internally, the Laplace approximation is used for approximating the
non-Gaussian posterior by a Gaussian.
Currently, the implementation is restricted to using the logistic link
function. For multi-class classification, several binary one-versus rest
classifiers are fitted. Note that this class thus does not implement
a true multi-class Laplace approximation.
Parameters
----------
kernel : kernel object
The kernel specifying the covariance function of the GP. If None is
passed, the kernel "1.0 * RBF(1.0)" is used as default. Note that
the kernel's hyperparameters are optimized during fitting.
optimizer : string or callable, optional (default: "fmin_l_bfgs_b")
Can either be one of the internally supported optimizers for optimizing
the kernel's parameters, specified by a string, or an externally
defined optimizer passed as a callable. If a callable is passed, it
must have the signature::
def optimizer(obj_func, initial_theta, bounds):
# * 'obj_func' is the objective function to be maximized, which
# takes the hyperparameters theta as parameter and an
# optional flag eval_gradient, which determines if the
# gradient is returned additionally to the function value
# * 'initial_theta': the initial value for theta, which can be
# used by local optimizers
# * 'bounds': the bounds on the values of theta
....
# Returned are the best found hyperparameters theta and
# the corresponding value of the target function.
return theta_opt, func_min
Per default, the 'fmin_l_bfgs_b' algorithm from scipy.optimize
is used. If None is passed, the kernel's parameters are kept fixed.
Available internal optimizers are::
'fmin_l_bfgs_b'
n_restarts_optimizer: int, optional (default: 0)
The number of restarts of the optimizer for finding the kernel's
parameters which maximize the log-marginal likelihood. The first run
of the optimizer is performed from the kernel's initial parameters,
the remaining ones (if any) from thetas sampled log-uniform randomly
from the space of allowed theta-values. If greater than 0, all bounds
must be finite. Note that n_restarts_optimizer=0 implies that one
run is performed.
max_iter_predict: int, optional (default: 100)
The maximum number of iterations in Newton's method for approximating
the posterior during predict. Smaller values will reduce computation
time at the cost of worse results.
warm_start : bool, optional (default: False)
If warm-starts are enabled, the solution of the last Newton iteration
on the Laplace approximation of the posterior mode is used as
initialization for the next call of _posterior_mode(). This can speed
up convergence when _posterior_mode is called several times on similar
problems as in hyperparameter optimization.
copy_X_train : bool, optional (default: True)
If True, a persistent copy of the training data is stored in the
object. Otherwise, just a reference to the training data is stored,
which might cause predictions to change if the data is modified
externally.
random_state : integer or numpy.RandomState, optional
The generator used to initialize the centers. If an integer is
given, it fixes the seed. Defaults to the global numpy random
number generator.
multi_class: string, default: "one_vs_rest"
Specifies how multi-class classification problems are handled.
Supported are "one_vs_rest" and "one_vs_one". In "one_vs_rest",
one binary Gaussian process classifier is fitted for each class, which
is trained to separate this class from the rest. In "one_vs_one", one
binary Gaussian process classifier is fitted for each pair of classes,
which is trained to separate these two classes. The predictions of
these binary predictors are combined into multi-class predictions.
Note that "one_vs_one" does not support predicting probability
estimates.
n_jobs : int, optional, default: 1
The number of jobs to use for the computation. If -1 all CPUs are used.
If 1 is given, no parallel computing code is used at all, which is
useful for debugging. For n_jobs below -1, (n_cpus + 1 + n_jobs) are
used. Thus for n_jobs = -2, all CPUs but one are used.
Attributes
----------
kernel_ : kernel object
The kernel used for prediction. In case of binary classification,
the structure of the kernel is the same as the one passed as parameter
but with optimized hyperparameters. In case of multi-class
classification, a CompoundKernel is returned which consists of the
different kernels used in the one-versus-rest classifiers.
log_marginal_likelihood_value_: float
The log-marginal-likelihood of ``self.kernel_.theta``
classes_ : array-like, shape = (n_classes,)
Unique class labels.
n_classes_ : int
The number of classes in the training data
"""
def __init__(self, kernel=None, optimizer="fmin_l_bfgs_b",
n_restarts_optimizer=0, max_iter_predict=100,
warm_start=False, copy_X_train=True, random_state=None,
multi_class="one_vs_rest", n_jobs=1):
self.kernel = kernel
self.optimizer = optimizer
self.n_restarts_optimizer = n_restarts_optimizer
self.max_iter_predict = max_iter_predict
self.warm_start = warm_start
self.copy_X_train = copy_X_train
self.random_state = random_state
self.multi_class = multi_class
self.n_jobs = n_jobs
def fit(self, X, y):
"""Fit Gaussian process classification model
Parameters
----------
X : array-like, shape = (n_samples, n_features)
Training data
y : array-like, shape = (n_samples,)
Target values, must be binary
Returns
-------
self : returns an instance of self.
"""
X, y = check_X_y(X, y, multi_output=False)
self.base_estimator_ = _BinaryGaussianProcessClassifierLaplace(
self.kernel, self.optimizer, self.n_restarts_optimizer,
self.max_iter_predict, self.warm_start, self.copy_X_train,
self.random_state)
self.classes_ = np.unique(y)
self.n_classes_ = self.classes_.size
if self.n_classes_ == 1:
raise ValueError("GaussianProcessClassifier requires 2 or more "
"distinct classes. Only class %s present."
% self.classes_[0])
if self.n_classes_ > 2:
if self.multi_class == "one_vs_rest":
self.base_estimator_ = \
OneVsRestClassifier(self.base_estimator_,
n_jobs=self.n_jobs)
elif self.multi_class == "one_vs_one":
self.base_estimator_ = \
OneVsOneClassifier(self.base_estimator_,
n_jobs=self.n_jobs)
else:
raise ValueError("Unknown multi-class mode %s"
% self.multi_class)
self.base_estimator_.fit(X, y)
if self.n_classes_ > 2:
self.log_marginal_likelihood_value_ = np.mean(
[estimator.log_marginal_likelihood()
for estimator in self.base_estimator_.estimators_])
else:
self.log_marginal_likelihood_value_ = \
self.base_estimator_.log_marginal_likelihood()
return self
def predict(self, X):
"""Perform classification on an array of test vectors X.
Parameters
----------
X : array-like, shape = (n_samples, n_features)
Returns
-------
C : array, shape = (n_samples,)
Predicted target values for X, values are from ``classes_``
"""
check_is_fitted(self, ["classes_", "n_classes_"])
X = check_array(X)
return self.base_estimator_.predict(X)
def predict_proba(self, X):
"""Return probability estimates for the test vector X.
Parameters
----------
X : array-like, shape = (n_samples, n_features)
Returns
-------
C : array-like, shape = (n_samples, n_classes)
Returns the probability of the samples for each class in
the model. The columns correspond to the classes in sorted
order, as they appear in the attribute `classes_`.
"""
check_is_fitted(self, ["classes_", "n_classes_"])
if self.n_classes_ > 2 and self.multi_class == "one_vs_one":
raise ValueError("one_vs_one multi-class mode does not support "
"predicting probability estimates. Use "
"one_vs_rest mode instead.")
X = check_array(X)
return self.base_estimator_.predict_proba(X)
@property
def kernel_(self):
if self.n_classes_ == 2:
return self.base_estimator_.kernel_
else:
return CompoundKernel(
[estimator.kernel_
for estimator in self.base_estimator_.estimators_])
def log_marginal_likelihood(self, theta=None, eval_gradient=False):
"""Returns log-marginal likelihood of theta for training data.
In the case of multi-class classification, the mean log-marginal
likelihood of the one-versus-rest classifiers are returned.
Parameters
----------
theta : array-like, shape = (n_kernel_params,) or none
Kernel hyperparameters for which the log-marginal likelihood is
evaluated. In the case of multi-class classification, theta may
be the hyperparameters of the compound kernel or of an individual
kernel. In the latter case, all individual kernel get assigned the
same theta values. If None, the precomputed log_marginal_likelihood
of ``self.kernel_.theta`` is returned.
eval_gradient : bool, default: False
If True, the gradient of the log-marginal likelihood with respect
to the kernel hyperparameters at position theta is returned
additionally. Note that gradient computation is not supported
for non-binary classification. If True, theta must not be None.
Returns
-------
log_likelihood : float
Log-marginal likelihood of theta for training data.
log_likelihood_gradient : array, shape = (n_kernel_params,), optional
Gradient of the log-marginal likelihood with respect to the kernel
hyperparameters at position theta.
Only returned when eval_gradient is True.
"""
check_is_fitted(self, ["classes_", "n_classes_"])
if theta is None:
if eval_gradient:
raise ValueError(
"Gradient can only be evaluated for theta!=None")
return self.log_marginal_likelihood_value_
theta = np.asarray(theta)
if self.n_classes_ == 2:
return self.base_estimator_.log_marginal_likelihood(
theta, eval_gradient)
else:
if eval_gradient:
raise NotImplementedError(
"Gradient of log-marginal-likelihood not implemented for "
"multi-class GPC.")
estimators = self.base_estimator_.estimators_
n_dims = estimators[0].kernel_.n_dims
if theta.shape[0] == n_dims: # use same theta for all sub-kernels
return np.mean(
[estimator.log_marginal_likelihood(theta)
for i, estimator in enumerate(estimators)])
elif theta.shape[0] == n_dims * self.classes_.shape[0]:
# theta for compound kernel
return np.mean(
[estimator.log_marginal_likelihood(
theta[n_dims * i:n_dims * (i + 1)])
for i, estimator in enumerate(estimators)])
else:
raise ValueError("Shape of theta must be either %d or %d. "
"Obtained theta with shape %d."
% (n_dims, n_dims * self.classes_.shape[0],
theta.shape[0]))
|
bsd-3-clause
|
apaleyes/mxnet
|
example/rcnn/rcnn/pycocotools/coco.py
|
17
|
18296
|
__author__ = 'tylin'
__version__ = '2.0'
# Interface for accessing the Microsoft COCO dataset.
# Microsoft COCO is a large image dataset designed for object detection,
# segmentation, and caption generation. pycocotools is a Python API that
# assists in loading, parsing and visualizing the annotations in COCO.
# Please visit http://mscoco.org/ for more information on COCO, including
# for the data, paper, and tutorials. The exact format of the annotations
# is also described on the COCO website. For example usage of the pycocotools
# please see pycocotools_demo.ipynb. In addition to this API, please download both
# the COCO images and annotations in order to run the demo.
# An alternative to using the API is to load the annotations directly
# into Python dictionary
# Using the API provides additional utility functions. Note that this API
# supports both *instance* and *caption* annotations. In the case of
# captions not all functions are defined (e.g. categories are undefined).
# The following API functions are defined:
# COCO - COCO api class that loads COCO annotation file and prepare data structures.
# decodeMask - Decode binary mask M encoded via run-length encoding.
# encodeMask - Encode binary mask M using run-length encoding.
# getAnnIds - Get ann ids that satisfy given filter conditions.
# getCatIds - Get cat ids that satisfy given filter conditions.
# getImgIds - Get img ids that satisfy given filter conditions.
# loadAnns - Load anns with the specified ids.
# loadCats - Load cats with the specified ids.
# loadImgs - Load imgs with the specified ids.
# annToMask - Convert segmentation in an annotation to binary mask.
# showAnns - Display the specified annotations.
# loadRes - Load algorithm results and create API for accessing them.
# download - Download COCO images from mscoco.org server.
# Throughout the API "ann"=annotation, "cat"=category, and "img"=image.
# Help on each functions can be accessed by: "help COCO>function".
# See also COCO>decodeMask,
# COCO>encodeMask, COCO>getAnnIds, COCO>getCatIds,
# COCO>getImgIds, COCO>loadAnns, COCO>loadCats,
# COCO>loadImgs, COCO>annToMask, COCO>showAnns
# Microsoft COCO Toolbox. version 2.0
# Data, paper, and tutorials available at: http://mscoco.org/
# Code written by Piotr Dollar and Tsung-Yi Lin, 2014.
# Licensed under the Simplified BSD License [see bsd.txt]
import json
import time
import matplotlib.pyplot as plt
from matplotlib.collections import PatchCollection
from matplotlib.patches import Polygon
import numpy as np
import copy
import itertools
from . import mask as maskUtils
import os
from collections import defaultdict
import sys
PYTHON_VERSION = sys.version_info[0]
if PYTHON_VERSION == 2:
from urllib import urlretrieve
elif PYTHON_VERSION == 3:
from urllib.request import urlretrieve
class COCO:
def __init__(self, annotation_file=None):
"""
Constructor of Microsoft COCO helper class for reading and visualizing annotations.
:param annotation_file (str): location of annotation file
:param image_folder (str): location to the folder that hosts images.
:return:
"""
# load dataset
self.dataset,self.anns,self.cats,self.imgs = dict(),dict(),dict(),dict()
self.imgToAnns, self.catToImgs = defaultdict(list), defaultdict(list)
if not annotation_file == None:
print('loading annotations into memory...')
tic = time.time()
dataset = json.load(open(annotation_file, 'r'))
assert type(dataset)==dict, 'annotation file format {} not supported'.format(type(dataset))
print('Done (t={:0.2f}s)'.format(time.time()- tic))
self.dataset = dataset
self.createIndex()
def createIndex(self):
# create index
print('creating index...')
anns, cats, imgs = {}, {}, {}
imgToAnns,catToImgs = defaultdict(list),defaultdict(list)
if 'annotations' in self.dataset:
for ann in self.dataset['annotations']:
imgToAnns[ann['image_id']].append(ann)
anns[ann['id']] = ann
if 'images' in self.dataset:
for img in self.dataset['images']:
imgs[img['id']] = img
if 'categories' in self.dataset:
for cat in self.dataset['categories']:
cats[cat['id']] = cat
if 'annotations' in self.dataset and 'categories' in self.dataset:
for ann in self.dataset['annotations']:
catToImgs[ann['category_id']].append(ann['image_id'])
print('index created!')
# create class members
self.anns = anns
self.imgToAnns = imgToAnns
self.catToImgs = catToImgs
self.imgs = imgs
self.cats = cats
def info(self):
"""
Print information about the annotation file.
:return:
"""
for key, value in self.dataset['info'].items():
print('{}: {}'.format(key, value))
def getAnnIds(self, imgIds=[], catIds=[], areaRng=[], iscrowd=None):
"""
Get ann ids that satisfy given filter conditions. default skips that filter
:param imgIds (int array) : get anns for given imgs
catIds (int array) : get anns for given cats
areaRng (float array) : get anns for given area range (e.g. [0 inf])
iscrowd (boolean) : get anns for given crowd label (False or True)
:return: ids (int array) : integer array of ann ids
"""
imgIds = imgIds if type(imgIds) == list else [imgIds]
catIds = catIds if type(catIds) == list else [catIds]
if len(imgIds) == len(catIds) == len(areaRng) == 0:
anns = self.dataset['annotations']
else:
if not len(imgIds) == 0:
lists = [self.imgToAnns[imgId] for imgId in imgIds if imgId in self.imgToAnns]
anns = list(itertools.chain.from_iterable(lists))
else:
anns = self.dataset['annotations']
anns = anns if len(catIds) == 0 else [ann for ann in anns if ann['category_id'] in catIds]
anns = anns if len(areaRng) == 0 else [ann for ann in anns if ann['area'] > areaRng[0] and ann['area'] < areaRng[1]]
if not iscrowd == None:
ids = [ann['id'] for ann in anns if ann['iscrowd'] == iscrowd]
else:
ids = [ann['id'] for ann in anns]
return ids
def getCatIds(self, catNms=[], supNms=[], catIds=[]):
"""
filtering parameters. default skips that filter.
:param catNms (str array) : get cats for given cat names
:param supNms (str array) : get cats for given supercategory names
:param catIds (int array) : get cats for given cat ids
:return: ids (int array) : integer array of cat ids
"""
catNms = catNms if type(catNms) == list else [catNms]
supNms = supNms if type(supNms) == list else [supNms]
catIds = catIds if type(catIds) == list else [catIds]
if len(catNms) == len(supNms) == len(catIds) == 0:
cats = self.dataset['categories']
else:
cats = self.dataset['categories']
cats = cats if len(catNms) == 0 else [cat for cat in cats if cat['name'] in catNms]
cats = cats if len(supNms) == 0 else [cat for cat in cats if cat['supercategory'] in supNms]
cats = cats if len(catIds) == 0 else [cat for cat in cats if cat['id'] in catIds]
ids = [cat['id'] for cat in cats]
return ids
def getImgIds(self, imgIds=[], catIds=[]):
'''
Get img ids that satisfy given filter conditions.
:param imgIds (int array) : get imgs for given ids
:param catIds (int array) : get imgs with all given cats
:return: ids (int array) : integer array of img ids
'''
imgIds = imgIds if type(imgIds) == list else [imgIds]
catIds = catIds if type(catIds) == list else [catIds]
if len(imgIds) == len(catIds) == 0:
ids = self.imgs.keys()
else:
ids = set(imgIds)
for i, catId in enumerate(catIds):
if i == 0 and len(ids) == 0:
ids = set(self.catToImgs[catId])
else:
ids &= set(self.catToImgs[catId])
return list(ids)
def loadAnns(self, ids=[]):
"""
Load anns with the specified ids.
:param ids (int array) : integer ids specifying anns
:return: anns (object array) : loaded ann objects
"""
if type(ids) == list:
return [self.anns[id] for id in ids]
elif type(ids) == int:
return [self.anns[ids]]
def loadCats(self, ids=[]):
"""
Load cats with the specified ids.
:param ids (int array) : integer ids specifying cats
:return: cats (object array) : loaded cat objects
"""
if type(ids) == list:
return [self.cats[id] for id in ids]
elif type(ids) == int:
return [self.cats[ids]]
def loadImgs(self, ids=[]):
"""
Load anns with the specified ids.
:param ids (int array) : integer ids specifying img
:return: imgs (object array) : loaded img objects
"""
if type(ids) == list:
return [self.imgs[id] for id in ids]
elif type(ids) == int:
return [self.imgs[ids]]
def showAnns(self, anns):
"""
Display the specified annotations.
:param anns (array of object): annotations to display
:return: None
"""
if len(anns) == 0:
return 0
if 'segmentation' in anns[0] or 'keypoints' in anns[0]:
datasetType = 'instances'
elif 'caption' in anns[0]:
datasetType = 'captions'
else:
raise Exception('datasetType not supported')
if datasetType == 'instances':
ax = plt.gca()
ax.set_autoscale_on(False)
polygons = []
color = []
for ann in anns:
c = (np.random.random((1, 3))*0.6+0.4).tolist()[0]
if 'segmentation' in ann:
if type(ann['segmentation']) == list:
# polygon
for seg in ann['segmentation']:
poly = np.array(seg).reshape((int(len(seg)/2), 2))
polygons.append(Polygon(poly))
color.append(c)
else:
# mask
t = self.imgs[ann['image_id']]
if type(ann['segmentation']['counts']) == list:
rle = maskUtils.frPyObjects([ann['segmentation']], t['height'], t['width'])
else:
rle = [ann['segmentation']]
m = maskUtils.decode(rle)
img = np.ones( (m.shape[0], m.shape[1], 3) )
if ann['iscrowd'] == 1:
color_mask = np.array([2.0,166.0,101.0])/255
if ann['iscrowd'] == 0:
color_mask = np.random.random((1, 3)).tolist()[0]
for i in range(3):
img[:,:,i] = color_mask[i]
ax.imshow(np.dstack( (img, m*0.5) ))
if 'keypoints' in ann and type(ann['keypoints']) == list:
# turn skeleton into zero-based index
sks = np.array(self.loadCats(ann['category_id'])[0]['skeleton'])-1
kp = np.array(ann['keypoints'])
x = kp[0::3]
y = kp[1::3]
v = kp[2::3]
for sk in sks:
if np.all(v[sk]>0):
plt.plot(x[sk],y[sk], linewidth=3, color=c)
plt.plot(x[v>0], y[v>0],'o',markersize=8, markerfacecolor=c, markeredgecolor='k',markeredgewidth=2)
plt.plot(x[v>1], y[v>1],'o',markersize=8, markerfacecolor=c, markeredgecolor=c, markeredgewidth=2)
p = PatchCollection(polygons, facecolor=color, linewidths=0, alpha=0.4)
ax.add_collection(p)
p = PatchCollection(polygons, facecolor='none', edgecolors=color, linewidths=2)
ax.add_collection(p)
elif datasetType == 'captions':
for ann in anns:
print(ann['caption'])
def loadRes(self, resFile):
"""
Load result file and return a result api object.
:param resFile (str) : file name of result file
:return: res (obj) : result api object
"""
res = COCO()
res.dataset['images'] = [img for img in self.dataset['images']]
print('Loading and preparing results...')
tic = time.time()
if type(resFile) == str or type(resFile) == unicode:
anns = json.load(open(resFile))
elif type(resFile) == np.ndarray:
anns = self.loadNumpyAnnotations(resFile)
else:
anns = resFile
assert type(anns) == list, 'results in not an array of objects'
annsImgIds = [ann['image_id'] for ann in anns]
assert set(annsImgIds) == (set(annsImgIds) & set(self.getImgIds())), \
'Results do not correspond to current coco set'
if 'caption' in anns[0]:
imgIds = set([img['id'] for img in res.dataset['images']]) & set([ann['image_id'] for ann in anns])
res.dataset['images'] = [img for img in res.dataset['images'] if img['id'] in imgIds]
for id, ann in enumerate(anns):
ann['id'] = id+1
elif 'bbox' in anns[0] and not anns[0]['bbox'] == []:
res.dataset['categories'] = copy.deepcopy(self.dataset['categories'])
for id, ann in enumerate(anns):
bb = ann['bbox']
x1, x2, y1, y2 = [bb[0], bb[0]+bb[2], bb[1], bb[1]+bb[3]]
if not 'segmentation' in ann:
ann['segmentation'] = [[x1, y1, x1, y2, x2, y2, x2, y1]]
ann['area'] = bb[2]*bb[3]
ann['id'] = id+1
ann['iscrowd'] = 0
elif 'segmentation' in anns[0]:
res.dataset['categories'] = copy.deepcopy(self.dataset['categories'])
for id, ann in enumerate(anns):
# now only support compressed RLE format as segmentation results
ann['area'] = maskUtils.area(ann['segmentation'])
if not 'bbox' in ann:
ann['bbox'] = maskUtils.toBbox(ann['segmentation'])
ann['id'] = id+1
ann['iscrowd'] = 0
elif 'keypoints' in anns[0]:
res.dataset['categories'] = copy.deepcopy(self.dataset['categories'])
for id, ann in enumerate(anns):
s = ann['keypoints']
x = s[0::3]
y = s[1::3]
x0,x1,y0,y1 = np.min(x), np.max(x), np.min(y), np.max(y)
ann['area'] = (x1-x0)*(y1-y0)
ann['id'] = id + 1
ann['bbox'] = [x0,y0,x1-x0,y1-y0]
print('DONE (t={:0.2f}s)'.format(time.time()- tic))
res.dataset['annotations'] = anns
res.createIndex()
return res
def download(self, tarDir = None, imgIds = [] ):
'''
Download COCO images from mscoco.org server.
:param tarDir (str): COCO results directory name
imgIds (list): images to be downloaded
:return:
'''
if tarDir is None:
print('Please specify target directory')
return -1
if len(imgIds) == 0:
imgs = self.imgs.values()
else:
imgs = self.loadImgs(imgIds)
N = len(imgs)
if not os.path.exists(tarDir):
os.makedirs(tarDir)
for i, img in enumerate(imgs):
tic = time.time()
fname = os.path.join(tarDir, img['file_name'])
if not os.path.exists(fname):
urlretrieve(img['coco_url'], fname)
print('downloaded {}/{} images (t={:0.1f}s)'.format(i, N, time.time()- tic))
def loadNumpyAnnotations(self, data):
"""
Convert result data from a numpy array [Nx7] where each row contains {imageID,x1,y1,w,h,score,class}
:param data (numpy.ndarray)
:return: annotations (python nested list)
"""
print('Converting ndarray to lists...')
assert(type(data) == np.ndarray)
print(data.shape)
assert(data.shape[1] == 7)
N = data.shape[0]
ann = []
for i in range(N):
if i % 1000000 == 0:
print('{}/{}'.format(i,N))
ann += [{
'image_id' : int(data[i, 0]),
'bbox' : [ data[i, 1], data[i, 2], data[i, 3], data[i, 4] ],
'score' : data[i, 5],
'category_id': int(data[i, 6]),
}]
return ann
def annToRLE(self, ann):
"""
Convert annotation which can be polygons, uncompressed RLE to RLE.
:return: binary mask (numpy 2D array)
"""
t = self.imgs[ann['image_id']]
h, w = t['height'], t['width']
segm = ann['segmentation']
if type(segm) == list:
# polygon -- a single object might consist of multiple parts
# we merge all parts into one mask rle code
rles = maskUtils.frPyObjects(segm, h, w)
rle = maskUtils.merge(rles)
elif type(segm['counts']) == list:
# uncompressed RLE
rle = maskUtils.frPyObjects(segm, h, w)
else:
# rle
rle = ann['segmentation']
return rle
def annToMask(self, ann):
"""
Convert annotation which can be polygons, uncompressed RLE, or RLE to binary mask.
:return: binary mask (numpy 2D array)
"""
rle = self.annToRLE(ann)
m = maskUtils.decode(rle)
return m
|
apache-2.0
|
MadsJensen/RP_scripts
|
skmodels_results.py
|
1
|
3022
|
# -*- coding: utf-8 -*-
"""
Created on Thu Jun 23 14:50:08 2016
@author: mje
"""
import numpy as np
import pandas as pd
from sklearn.externals import joblib
import mne
import glob
from my_settings import *
labels = mne.read_labels_from_annot(subject="0008",
parc="PALS_B12_Brodmann",
regexp="Brodmann",
subjects_dir=subjects_dir)
conditions = ["pln", "pre", "post"]
# measures = ["eigen", "pagerank", "path-strength"]
measures = ["pagerank"]
bands = ["alpha", "beta", "gamma_low", "gamma_high"]
column_keys = ["condition", "measure", "band", "scores",
"mean_score", "std", "feature_importance"]
results = pd.DataFrame(columns=column_keys)
for cond in conditions:
for measure in measures:
scores = np.load(source_folder + "graph_data/%s_scores_all_ada_%s.npy" % (measure, cond))
for j, band in enumerate(bands):
model = joblib.load(source_folder +
"graph_data/sk_models/%s_ada_%s_%s.plk" % (measure, condition, band))
row = pd.DataFrame([{"condition": cond,
"measure": measure,
"band": band,
"scores": scores[j],
"mean_score": scores[j].mean(),
"std": scores[j].std(),
"feature_importance": model.feature_importances_}])
results = results.append(row, ignore_index=True)
results[["condition", "measure", "band", "mean_score", "std"]].sort("mean_score")
for condit
for j in range(len(results.feature_importance)):
print("\nmeasure: %s, band: %s" % (results.ix[j].measure.swapcase(),
results.ix[j].band.swapcase()))
for i in range(82):
if results.feature_importance[j][i] > 0:
print(labels[i].name + " score: %s" %
np.round(results.feature_importance[j][i], 4))
for i in range(82):
if f3[i] > 0:
print(labels[i].name + " score: %s" % f3[i])
#### PERM TESTS ####
perm_tests = glob.glob(source_folder + "graph_data/perm_test*")
column_keys = ["name", "pval", "score", "mean_perm", "std_perm", "band"]
perm_results = pd.DataFrame(columns=column_keys)
for test in perm_tests:
tmp = np.load(test).item()
for band in bands:
name = test.split("_")[-2:]
name = "_".join(name)[:-4]
row = pd.DataFrame([{"name": name,
"pval": tmp[band]["pval"],
"score": tmp[band]["score"],
"mean_perm": tmp[band]["perm_scores"].mean(),
"std_perm":tmp[band]["perm_scores"].std(),
"band": band}])
perm_results = perm_results.append(row, ignore_index=True)
|
bsd-3-clause
|
COSMOGRAIL/COSMOULINE
|
pipe/extrascripts/read_stat_rdbfiles.py
|
1
|
3050
|
import numpy as np
import pickle as pkl
import pycs, os
import matplotlib.pyplot as plt
import matplotlib as mpl
mpl.rcParams['ps.fonttype'] = 42
mpl.rcParams['font.family'] = 'serif'
mpl.rcParams['xtick.labelsize'] = 14
mpl.rcParams['ytick.labelsize'] = 14
rdb_list = [
"/Users/martin/Desktop/cosmograil-dr1/WFI_lenses/HE0047_WFI/HE0047_WFI.rdb",
"/Users/martin/Desktop/cosmograil-dr1/WFI_lenses/WG0214_WFI/WG0214_WFI.rdb",
"/Users/martin/Desktop/cosmograil-dr1/WFI_lenses/DES0407_WFI/DES0407_WFI.rdb",
"/Users/martin/Desktop/cosmograil-dr1/WFI_lenses/J0832_WFI/J0832_WFI.rdb",
"/Users/martin/Desktop/cosmograil-dr1/WFI_lenses/2M1134_WFI/2M1134_WFI.rdb",
"/Users/martin/Desktop/cosmograil-dr1/WFI_lenses/PS1606_WFI/PS1606_WFI.rdb",
]
object_list = [os.path.basename(r).split("_")[0] for r in rdb_list]
print object_list
nicename = [
' HE 0047-1756',
' WG 0214-2105',
' DES 0407-5006',
' SDSS J0832+0404',
' 2M 1134-2103',
' PSJ 1606-2333',
]
mycolours = ["darkorange", "royalblue", "seagreen", "purple", "brown", "magenta", "orange"]
fig1 = plt.figure(figsize=(12,8))
ax1 = fig1.add_subplot(1,2,1)
ax2 = fig1.add_subplot(1,2,2)
plt.subplots_adjust(left = 0.1, right=0.98, top=0.96, bottom=0.1, wspace=0.08, hspace=0.0)
handle_list = []
for i,rdb in enumerate(rdb_list):
lc = pycs.gen.lc.rdbimport(rdb_list[i], object='A', magcolname="mag_A", magerrcolname="magerr_A_5", mhjdcolname="mhjd", flagcolname = None, propertycolnames = "lcmanip", verbose = False)
seeings = [float(prop['fwhm']) for prop in lc.properties]
airmass = [float(prop['airmass']) for prop in lc.properties]
skylevel = [float(prop['relskylevel']) for prop in lc.properties]
nbimage = [float(prop['nbimg']) for prop in lc.properties]
ellipticity = [float(prop['ellipticity']) for prop in lc.properties]
print "### %s ### "% object_list[i]
print "Epochs : ", len(lc.jds)
print "Median seeing : ", np.median(seeings)
print "Median airmass : ", np.median(airmass)
print "Median skylevel : ", np.median(skylevel)
print "Median nbimage : ", np.median(nbimage)
print "Median ellipticity : ", np.median(ellipticity)
ax1.hist(seeings, histtype= 'step', color= mycolours[i], label=nicename[i], density= True, linewidth = 2)
ax2.hist(airmass, histtype= 'step', color= mycolours[i], label=nicename[i], density= True, linewidth = 2)
# Create new legend handles but use the colors from the existing ones
handles, labels = ax1.get_legend_handles_labels()
new_handles = [mpl.lines.Line2D([], [], c=h.get_edgecolor()) for h in handles]
ax1.set_xlabel('Seeing (per epoch) ["]', fontdict={"fontsize": 20})
ax2.set_xlabel('Airmass (per epoch)', fontdict={"fontsize": 20})
ax1.set_ylabel('Normalised distribution', fontdict={"fontsize": 20})
ax1.legend(handles=new_handles, labels = labels, fontsize = 16)
name = "/Users/martin/Desktop/DR2/paperplot/seeing.pdf"
fig1.savefig("/Users/martin/Desktop/DR2/paperplot/seeing.pdf")
os.system("pdfcrop %s %s"%(name,name))
plt.show()
|
gpl-3.0
|
uetke/experimentor
|
experimentor/drivers/hamamatsu/hamamatsu_camera.py
|
1
|
34446
|
# -*- coding: utf-8 -*-
"""
UUTrack.Controller.devices.hamamatsu.hamamatsu_camera.py
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
File taken from `ZhuangLab <https://github.com/ZhuangLab/storm-control>`_
A ctypes based interface to Hamamatsu cameras.
(tested on a sCMOS Flash 4.0).
The documentation is a little confusing to me on this subject..
I used c_int32 when this is explicitly specified, otherwise I use c_int.
.. todo:: I'm using the "old" functions because these are documented. Switch to the "new" functions at some point.
.. todo:: How to stream 2048 x 2048 at max frame rate to the flash disk? The Hamamatsu software can do this.
.. sectionauthor:: Hazen Babcock 10/13
"""
import ctypes
import ctypes.util
import numpy
# Hamamatsu constants.
DCAMCAP_EVENT_FRAMEREADY = int("0x0002", 0)
# DCAM3 API.
DCAMERR_ERROR = 0
DCAMERR_NOERROR = 1
DCAMPROP_ATTR_HASVALUETEXT = int("0x10000000", 0)
DCAMPROP_ATTR_READABLE = int("0x00010000", 0)
DCAMPROP_ATTR_WRITABLE = int("0x00020000", 0)
DCAMPROP_OPTION_NEAREST = int("0x80000000", 0)
DCAMPROP_OPTION_NEXT = int("0x01000000", 0)
DCAMPROP_OPTION_SUPPORT = int("0x00000000", 0)
DCAMPROP_TYPE_MODE = int("0x00000001", 0)
DCAMPROP_TYPE_LONG = int("0x00000002", 0)
DCAMPROP_TYPE_REAL = int("0x00000003", 0)
DCAMPROP_TYPE_MASK = int("0x0000000F", 0)
DCAMWAIT_TIMEOUT_INFINITE = int("0x80000000", 0)
DCAM_CAPTUREMODE_SNAP = 0
DCAM_CAPTUREMODE_SEQUENCE = 1
DCAM_DEFAULT_ARG = 0
DCAM_IDPROP_EXPOSURETIME = int("0x001F0110", 0)
DCAM_IDSTR_MODEL = int("0x04000104", 0)
class DCAM_PARAM_PROPERTYATTR(ctypes.Structure):
"""The dcam property attribute structure."""
_fields_ = [("cbSize", ctypes.c_int32),
("iProp", ctypes.c_int32),
("option", ctypes.c_int32),
("iReserved1", ctypes.c_int32),
("attribute", ctypes.c_int32),
("iGroup", ctypes.c_int32),
("iUnit", ctypes.c_int32),
("attribute2", ctypes.c_int32),
("valuemin", ctypes.c_double),
("valuemax", ctypes.c_double),
("valuestep", ctypes.c_double),
("valuedefault", ctypes.c_double),
("nMaxChannel", ctypes.c_int32),
("iReserved3", ctypes.c_int32),
("nMaxView", ctypes.c_int32),
("iProp_NumberOfElement", ctypes.c_int32),
("iProp_ArrayBase", ctypes.c_int32),
("iPropStep_Element", ctypes.c_int32)]
class DCAM_PARAM_PROPERTYVALUETEXT(ctypes.Structure):
"""The dcam text property structure."""
_fields_ = [("cbSize", ctypes.c_int32),
("iProp", ctypes.c_int32),
("value", ctypes.c_double),
("text", ctypes.c_char_p),
("textbytes", ctypes.c_int32)]
def convertPropertyName(p_name):
""""Regularizes" a property name. We are using all lowercase names with
the spaces replaced by underscores.
@param p_name The property name string to regularize.
@return The regularized property name."""
a = p_name.decode('ascii')
b = a.lower()
c = b.replace(" ","_")
return c
class DCAMException(Exception):
"""Camera exceptions."""
def __init__(self, message):
Exception.__init__(self, message)
# dcam = ctypes.windll.dcamapi
#
# temp = ctypes.c_int32(0)
# if (dcam.dcam_init(None, ctypes.byref(temp), None) != DCAMERR_NOERROR):
# raise DCAMException("DCAM initialization failed.")
# n_cameras = temp.value
class HCamData():
"""Hamamatsu camera data object.
Initially I tried to use create_string_buffer() to allocate storage for the
data from the camera but this turned out to be too slow. The software
kept falling behind the camera and create_string_buffer() seemed to be the
bottleneck."""
def __init__(self, size):
"""Create a data object of the appropriate size.
@param size The size of the data object in bytes."""
self.np_array = numpy.empty((int(size/2), 1), dtype=numpy.uint16)
self.size = size
## __getitem__
#
# @param slice The slice of the item to get.
#
def __getitem__(self, slice):
return self.np_array[slice]
## copyData
#
# Uses the C memmove function to copy data from an address in memory
# into memory allocated for the numpy array of this object.
#
# @param address The memory address of the data to copy.
#
def copyData(self, address):
ctypes.memmove(self.np_array.ctypes.data, address, self.size)
## getData
#
# @return A numpy array that contains the camera data.
#
def getData(self):
return self.np_array
## getDataPtr
#
# @return The physical address in memory of the data.
#
def getDataPtr(self):
return self.np_array.ctypes.data
class HamamatsuCamera():
CAPTUREMODE_SNAP = 0
CAPTUREMODE_SEQUENCE = 1
"""Basic camera interface class.
This version uses the Hamamatsu library to allocate camera buffers.
Storage for the data from the camera is allocated dynamically and
copied out of the camera buffers."""
def __init__(self, camera_id):
"""Open the connection to the camera specified by camera_id.
@param camera_id The id of the camera (an integer)."""
self.buffer_index = 0
self.camera_id = camera_id
self.dcam = ctypes.windll.dcamapi
self.debug = False
self.frame_bytes = 0
self.frame_x = 0
self.frame_y = 0
self.last_frame_number = 0
self.properties = {}
self.max_backlog = 0
self.number_image_buffers = 0
# Open the camera.
self.camera_handle = ctypes.c_void_p(0)
self.temp = ctypes.c_int32(0)
if (self.dcam.dcam_init(None, ctypes.byref(self.temp), None) != DCAMERR_NOERROR):
raise DCAMException("DCAM initialization failed.")
self.n_cameras = self.temp.value
self.checkStatus(self.dcam.dcam_open(ctypes.byref(self.camera_handle),
ctypes.c_int32(self.camera_id),
None),
"dcam_open")
self.camera_model = self.getModelInfo(camera_id)
# Get camera properties.
self.properties = self.getCameraProperties()
# Get camera max width, height.
self.max_width = self.getPropertyValue("image_width")[0]
self.max_height = self.getPropertyValue("image_height")[0]
self.setmode(self.CAPTUREMODE_SEQUENCE) # By default is a sequence
def settrigger(self,mode):
TRIGMODE = ctypes.c_int32(mode)
self.checkStatus(self.dcam.dcam_settriggermode(self.camera_handle,TRIGMODE),'settriggermode')
DCAM_TRIGGERMODE = ctypes.c_int32(0)
self.checkStatus(self.dcam.dcam_gettriggermode(self.camera_handle,ctypes.byref(DCAM_TRIGGERMODE)),'gettrigermode')
return DCAM_TRIGGERMODE.value
def setmode(self, mode):
"""Sets the acquisition mode of the camera."""
self.mode = mode
def settrigger(self,mode):
TRIGMODE = ctypes.c_int32(mode)
self.checkStatus(self.dcam.dcam_settriggermode(self.camera_handle,TRIGMODE),'settriggermode')
DCAM_TRIGGERMODE = ctypes.c_int32(0)
self.checkStatus(self.dcam.dcam_gettriggermode(self.camera_handle,ctypes.byref(DCAM_TRIGGERMODE)),'gettrigermode')
return DCAM_TRIGGERMODE.value
def settrigger(self,mode):
TRIGMODE = ctypes.c_int32(mode)
self.checkStatus(self.dcam.dcam_settriggermode(self.camera_handle,TRIGMODE),'settriggermode')
DCAM_TRIGGERMODE = ctypes.c_int32(0)
self.checkStatus(self.dcam.dcam_gettriggermode(self.camera_handle,ctypes.byref(DCAM_TRIGGERMODE)),'gettrigermode')
return DCAM_TRIGGERMODE.value
def initCamera(self):
#
# Initialization
#
#self.dcam = ctypes.windll.dcamapi
self.captureSetup()
def captureSetup(self):
"""Capture setup (internal use only). This is called at the start
of new acquisition sequence to determine the current ROI and
get the camera configured properly."""
self.buffer_index = -1
self.last_frame_number = 0
# Set sub array mode.
self.setSubArrayMode()
# Get frame properties.
self.frame_x = self.getPropertyValue("image_width")[0]
self.frame_y = self.getPropertyValue("image_height")[0]
self.frame_bytes = self.getPropertyValue("image_framebytes")[0]
# Set capture mode.
self.checkStatus(self.dcam.dcam_precapture(self.camera_handle,
ctypes.c_int(self.mode)),
"dcam_precapture")
def checkStatus(self, fn_return, fn_name= "unknown"):
"""Check return value of the dcam function call.
Throw an error if not as expected?
@return The return value of the function."""
#if (fn_return != DCAMERR_NOERROR) and (fn_return != DCAMERR_ERROR):
# raise DCAMException("dcam error: " + fn_name + " returned " + str(fn_return))
if (fn_return == DCAMERR_ERROR):
c_buf_len = 80
c_buf = ctypes.create_string_buffer(c_buf_len)
c_error = self.dcam.dcam_getlasterror(self.camera_handle,
c_buf,
ctypes.c_int32(c_buf_len))
raise DCAMException("dcam error " + str(fn_name) + " " + str(c_buf.value))
#print "dcam error", fn_name, c_buf.value
return fn_return
def getCameraProperties(self):
"""Return the ids & names of all the properties that the camera supports. This
is used at initialization to populate the self.properties attribute.
@return A python dictionary of camera properties."""
c_buf_len = 64
c_buf = ctypes.create_string_buffer(c_buf_len)
properties = {}
prop_id = ctypes.c_int32(0)
# Reset to the start.
ret = self.dcam.dcam_getnextpropertyid(self.camera_handle,
ctypes.byref(prop_id),
ctypes.c_int32(DCAMPROP_OPTION_NEAREST))
if (ret != 0) and (ret != DCAMERR_NOERROR):
self.checkStatus(ret, "dcam_getnextpropertyid")
# Get the first property.
ret = self.dcam.dcam_getnextpropertyid(self.camera_handle,
ctypes.byref(prop_id),
ctypes.c_int32(DCAMPROP_OPTION_NEXT))
if (ret != 0) and (ret != DCAMERR_NOERROR):
self.checkStatus(ret, "dcam_getnextpropertyid")
self.checkStatus(self.dcam.dcam_getpropertyname(self.camera_handle,
prop_id,
c_buf,
ctypes.c_int32(c_buf_len)),
"dcam_getpropertyname")
# Get the rest of the properties.
last = -1
while prop_id.value != last:
last = prop_id.value
properties[convertPropertyName(c_buf.value)] = prop_id.value
ret = self.dcam.dcam_getnextpropertyid(self.camera_handle,
ctypes.byref(prop_id),
ctypes.c_int32(DCAMPROP_OPTION_NEXT))
if (ret != 0) and (ret != DCAMERR_NOERROR):
self.checkStatus(ret, "dcam_getnextpropertyid")
self.checkStatus(self.dcam.dcam_getpropertyname(self.camera_handle,
prop_id,
c_buf,
ctypes.c_int32(c_buf_len)),
"dcam_getpropertyname")
return properties
def fireTrigger(self):
"""Triggers the camera when in software mode."""
self.checkStatus(self.dcam.dcam_firetrigger(self.camera_handle),"dcam_firetrigger")
print('TRIG')
def getFrames(self):
"""Gets all of the available frames.
This will block waiting for new frames even if
there new frames available when it is called.
@return [frames, [frame x size, frame y size]]."""
frames = []
for n in self.newFrames():
# Lock the frame in the camera buffer & get address.
data_address = ctypes.c_void_p(0)
row_bytes = ctypes.c_int32(0)
self.checkStatus(self.dcam.dcam_lockdata(self.camera_handle,
ctypes.byref(data_address),
ctypes.byref(row_bytes),
ctypes.c_int32(n)),
"dcam_lockdata")
# Create storage for the frame & copy into this storage.
hc_data = HCamData(self.frame_bytes)
hc_data.copyData(data_address)
# Unlock the frame.
#
# According to the documentation, this would be done automatically
# on the next call to lockdata, but we do this anyway.
self.checkStatus(self.dcam.dcam_unlockdata(self.camera_handle),
"dcam_unlockdata")
frames.append(hc_data)
return [frames, [self.frame_x, self.frame_y]]
def getModelInfo(self, camera_id):
"""Returns the model of the camera
@param camera_id The (integer) camera id number.
@return A string containing the camera name."""
c_buf_len = 20
c_buf = ctypes.create_string_buffer(c_buf_len)
self.checkStatus(self.dcam.dcam_getmodelinfo(ctypes.c_int32(camera_id),
ctypes.c_int32(DCAM_IDSTR_MODEL),
c_buf,
ctypes.c_int(c_buf_len)),
"dcam_getmodelinfo")
return c_buf.value
def getProperties(self):
"""Return the list of camera properties. This is the one to call if you
want to know the camera properties.
@return A dictionary of camera properties."""
return self.properties
def getPropertyAttribute(self, property_name):
"""Return the attribute structure of a particular property.
FIXME (OPTIMIZATION): Keep track of known attributes?
@param property_name The name of the property to get the attributes of.
@return A DCAM_PARAM_PROPERTYATTR object."""
p_attr = DCAM_PARAM_PROPERTYATTR()
p_attr.cbSize = ctypes.sizeof(p_attr)
p_attr.iProp = self.properties[property_name]
ret = self.checkStatus(self.dcam.dcam_getpropertyattr(self.camera_handle,
ctypes.byref(p_attr)),
"dcam_getpropertyattr")
if (ret == 0):
print(" property %s is not supported" % property_name)
return False
else:
return p_attr
def getPropertyText(self, property_name):
"""Return the text options of a property (if any).
@param property_name The name of the property to get the text values of.
@return A dictionary of text properties (which may be empty)."""
prop_attr = self.getPropertyAttribute(property_name)
if not (prop_attr.attribute & DCAMPROP_ATTR_HASVALUETEXT):
return {}
else:
# Create property text structure.
prop_id = self.properties[property_name]
v = ctypes.c_double(prop_attr.valuemin)
prop_text = DCAM_PARAM_PROPERTYVALUETEXT()
c_buf_len = 64
c_buf = ctypes.create_string_buffer(c_buf_len)
#prop_text.text = ctypes.c_char_p(ctypes.addressof(c_buf))
prop_text.cbSize = ctypes.c_int32(ctypes.sizeof(prop_text))
prop_text.iProp = ctypes.c_int32(prop_id)
prop_text.value = v
prop_text.text = ctypes.addressof(c_buf)
prop_text.textbytes = c_buf_len
# Collect text options.
done = False
text_options = {}
while not done:
# Get text of current value.
self.checkStatus(self.dcam.dcam_getpropertyvaluetext(self.camera_handle,
ctypes.byref(prop_text)),
"dcam_getpropertyvaluetext")
text_options[prop_text.text] = int(v.value)
# Get next value.
ret = self.dcam.dcam_querypropertyvalue(self.camera_handle,
ctypes.c_int32(prop_id),
ctypes.byref(v),
ctypes.c_int32(DCAMPROP_OPTION_NEXT))
prop_text.value = v
if ret == 0:
done = True
return text_options
def getPropertyRange(self, property_name):
"""Return the range for an attribute.
@param property_name The name of the property (as a string).
@return [minimum value, maximum value]."""
prop_attr = self.getPropertyAttribute(property_name)
temp = prop_attr.attribute & DCAMPROP_TYPE_MASK
if (temp == DCAMPROP_TYPE_REAL):
return [float(prop_attr.valuemin), float(prop_attr.valuemax)]
else:
return [int(prop_attr.valuemin), int(prop_attr.valuemax)]
def getPropertyRW(self, property_name):
"""Return if a property is readable / writeable.
@return [True/False (readable), True/False (writeable)]."""
prop_attr = self.getPropertyAttribute(property_name)
rw = []
# Check if the property is readable.
if (prop_attr.attribute & DCAMPROP_ATTR_READABLE):
rw.append(True)
else:
rw.append(False)
# Check if the property is writeable.
if (prop_attr.attribute & DCAMPROP_ATTR_WRITABLE):
rw.append(True)
else:
rw.append(False)
return rw
def getPropertyValue(self, property_name):
"""Return the current setting of a particular property.
@param property_name The name of the property.
@return [the property value, the property type]."""
# Check if the property exists.
if not (property_name in self.properties):
print(" unknown property name: %s"%property_name)
return False
prop_id = self.properties[property_name]
# Get the property attributes.
prop_attr = self.getPropertyAttribute(property_name)
# Get the property value.
c_value = ctypes.c_double(0)
self.checkStatus(self.dcam.dcam_getpropertyvalue(self.camera_handle,
ctypes.c_int32(prop_id),
ctypes.byref(c_value)),
"dcam_getpropertyvalue")
# Convert type based on attribute type.
temp = prop_attr.attribute & DCAMPROP_TYPE_MASK
if (temp == DCAMPROP_TYPE_MODE):
prop_type = "MODE"
prop_value = int(c_value.value)
elif (temp == DCAMPROP_TYPE_LONG):
prop_type = "LONG"
prop_value = int(c_value.value)
elif (temp == DCAMPROP_TYPE_REAL):
prop_type = "REAL"
prop_value = c_value.value
else:
prop_type = "NONE"
prop_value = False
return [prop_value, prop_type]
def isCameraProperty(self, property_name):
"""Check if a property name is supported by the camera.
@param property_name The name of the property.
return True/False if property_name is a supported camera property.
"""
if (property_name in self.properties):
return True
else:
return False
def newFrames(self):
"""Return a list of the ids of all the new frames since the last check.
This will block waiting for at least one new frame.
@return [id of the first frame, .. , id of the last frame]
"""
# Wait for a new frame.
dwait = ctypes.c_int(DCAMCAP_EVENT_FRAMEREADY)
self.checkStatus(self.dcam.dcam_wait(self.camera_handle,
ctypes.byref(dwait),
ctypes.c_int(DCAMWAIT_TIMEOUT_INFINITE),
None),
"dcam_wait")
# Check how many new frames there are.
b_index = ctypes.c_int32(0) # Is pointer to receive the number of the frame in which the most recent data is stored.
f_count = ctypes.c_int32(0) # is pointer to receive the number of frames captured since the capture operation was begun. If no frames have been captured, a value of –1 is returned
self.checkStatus(self.dcam.dcam_gettransferinfo(self.camera_handle,
ctypes.byref(b_index),
ctypes.byref(f_count)),
"dcam_gettransferinfo")
# Check that we have not acquired more frames than we can store in our buffer.
# Keep track of the maximum backlog.
cur_frame_number = f_count.value
backlog = cur_frame_number - self.last_frame_number
if backlog > self.number_image_buffers:
print("warning: hamamatsu camera frame buffer overrun detected!")
if backlog > self.max_backlog:
self.max_backlog = backlog
self.last_frame_number = cur_frame_number
cur_buffer_index = b_index.value
# Create a list of the new frames.
new_frames = []
if cur_buffer_index < self.buffer_index:
for i in range(self.buffer_index + 1, self.number_image_buffers):
new_frames.append(i)
for i in range(cur_buffer_index + 1):
new_frames.append(i)
else:
for i in range(self.buffer_index, cur_buffer_index):
new_frames.append(i+1)
self.buffer_index = cur_buffer_index
if self.debug:
print(new_frames)
return new_frames
def setPropertyValue(self, property_name, property_value):
"""Set the value of a property.
@param property_name The name of the property.
@param property_value The value to set the property to.
"""
# Check if the property exists.
if not (property_name in self.properties):
print(" unknown property name: %s"%property_name)
return False
# If the value is text, figure out what the
# corresponding numerical property value is.
if (type(property_value) == type("")):
text_values = self.getPropertyText(property_name)
if (property_value in text_values):
property_value = float(text_values[property_value])
else:
print(" unknown property text value: %s for %s"%(property_value, property_name))
return False
# Check that the property is within range.
[pv_min, pv_max] = self.getPropertyRange(property_name)
if (property_value < pv_min):
print(" set property value %s is less than minimum of %s %s setting to minimum"%(property_value,pv_min,property_name))
property_value = pv_min
if (property_value > pv_max):
print( " set property value %s is greater than maximum of %s %s setting to maximum"%(property_value,pv_min,property_name))
property_value = pv_max
# Set the property value, return what it was set too.
prop_id = self.properties[property_name]
p_value = ctypes.c_double(property_value)
self.checkStatus(self.dcam.dcam_setgetpropertyvalue(self.camera_handle,
ctypes.c_int32(prop_id),
ctypes.byref(p_value),
ctypes.c_int32(DCAM_DEFAULT_ARG)),
"dcam_setgetpropertyvalue")
return p_value.value
def setSubArrayMode(self):
"""This sets the sub-array mode as appropriate based on the current ROI."""
# Check ROI properties.
roi_w = self.getPropertyValue("subarray_hsize")[0]
roi_h = self.getPropertyValue("subarray_vsize")[0]
# If the ROI is smaller than the entire frame turn on subarray mode
if roi_w == self.max_width and roi_h == self.max_height:
self.setPropertyValue("subarray_mode", 1) # OFF
else:
self.setPropertyValue("subarray_mode", 2) # ON
def startAcquisition(self):
""" Start data acquisition."""
self.captureSetup()
# Allocate Hamamatsu image buffers.
# We allocate enough to buffer 2 seconds of data.
n_buffers = int(2.0*self.getPropertyValue("internal_frame_rate")[0])
self.number_image_buffers = n_buffers
self.checkStatus(self.dcam.dcam_allocframe(self.camera_handle,
ctypes.c_int32(self.number_image_buffers)),
"dcam_allocframe")
# Start acquisition.
self.checkStatus(self.dcam.dcam_capture(self.camera_handle),
"dcam_capture")
def stopAcquisition(self):
"""Stop data acquisition."""
self.checkStatus(self.dcam.dcam_idle(self.camera_handle),
"dcam_idle")
print("max camera backlog was %s of %s"%(self.max_backlog, self.number_image_buffers))
self.max_backlog = 0
# Free image buffers.
self.number_image_buffers = 0
self.checkStatus(self.dcam.dcam_freeframe(self.camera_handle),
"dcam_freeframe")
def shutdown(self):
"""Close down the connection to the camera."""
self.checkStatus(self.dcam.dcam_close(self.camera_handle),
"dcam_close")
class HamamatsuCameraMR(HamamatsuCamera):
"""# Memory recycling camera class.
This version allocates "user memory" for the Hamamatsu camera
buffers. This memory is also the location of the storage for
the np_array element of a HCamData() class. The memory is
allocated once at the beginning, then recycled. This means
that there is a lot less memory allocation & shuffling compared
to the basic class, which performs one allocation and (I believe)
two copies for each frame that is acquired.
WARNING: There is the potential here for chaos. Since the memory
is now shared there is the possibility that downstream code
will try and access the same bit of memory at the same time
as the camera and this could end badly.
FIXME: Use lockbits (and unlockbits) to avoid memory clashes?
This would probably also involve some kind of reference counting
scheme."""
def __init__(self, camera_id):
"""@param camera_id The id of the camera."""
HamamatsuCamera.__init__(self, camera_id)
self.hcam_data = []
self.hcam_ptr = False
self.old_frame_bytes = -1
self.setPropertyValue("output_trigger_kind[0]", 2)
def getFrames(self):
"""Gets all of the available frames.
This will block waiting for new frames even if there new frames
available when it is called.
FIXME: It does not always seem to block? The length of frames can
be zero. Are frames getting dropped? Some sort of race condition?
return [frames, [frame x size, frame y size]]
"""
frames = []
for n in self.newFrames():
frames.append(self.hcam_data[n])
return [frames, [self.frame_x, self.frame_y]]
def startAcquisition(self):
"""Allocate as many frames as will fit in 2GB of memory and start data acquisition."""
self.captureSetup()
# Allocate new image buffers if necessary.
# Allocate as many frames as can fit in 2GB of memory.
if (self.old_frame_bytes != self.frame_bytes):
n_buffers = int((0.1 * 1024 * 1024 * 1024)/self.frame_bytes)
self.number_image_buffers = n_buffers
# Allocate new image buffers.
ptr_array = ctypes.c_void_p * self.number_image_buffers
self.hcam_ptr = ptr_array()
self.hcam_data = []
for i in range(self.number_image_buffers):
hc_data = HCamData(self.frame_bytes)
self.hcam_ptr[i] = hc_data.getDataPtr()
self.hcam_data.append(hc_data)
self.old_frame_bytes = self.frame_bytes
# Attach image buffers.
#
# We need to attach & release for each acquisition otherwise
# we'll get an error if we try to change the ROI in any way
# between acquisitions.
self.checkStatus(dcam.dcam_attachbuffer(self.camera_handle,
self.hcam_ptr,
ctypes.sizeof(self.hcam_ptr)),
"dcam_attachbuffer")
# Start acquisition.
self.checkStatus(dcam.dcam_capture(self.camera_handle),
"dcam_capture")
def stopAcquisition(self):
"""Stops the acquisition and releases the memory associated with the frames."""
# Stop acquisition.
self.checkStatus(dcam.dcam_idle(self.camera_handle),
"dcam_idle")
# Release image buffers.
if (self.hcam_ptr):
self.checkStatus(dcam.dcam_releasebuffer(self.camera_handle),
"dcam_releasebuffer")
print("max camera backlog was: %s"%self.max_backlog)
self.max_backlog = 0
#
# Testing.
#
if __name__ == "__main__":
print('MAIN')
print ("found: %s cameras"%n_cameras)
if (n_cameras > 0):
hcam = HamamatsuCamera(0)
print(hcam.setPropertyValue("defect_correct_mode", 1))
print("camera 0 model:", hcam.getModelInfo(0))
# List support properties.
if 0:
print("Supported properties:")
props = hcam.getProperties()
for i, id_name in enumerate(sorted(props.keys())):
[p_value, p_type] = hcam.getPropertyValue(id_name)
p_rw = hcam.getPropertyRW(id_name)
read_write = ""
if (p_rw[0]):
read_write += "read"
if (p_rw[1]):
read_write += ", write"
print(" %s)%s = %s type is:%s,%s"%(i,id_name,p_value,p_type,read_write))
text_values = hcam.getPropertyText(id_name)
if (len(text_values) > 0):
print(" option / value")
for key in sorted(text_values, key = text_values.get):
print(" %s/%s"%(key,text_values[key]))
# Test setting & getting some parameters.
if 1:
print(hcam.setPropertyValue("exposure_time", 0.001))
#print(hcam.setPropertyValue("subarray_hsize", 2048))
#print(hcam.setPropertyValue("subarray_vsize", 2048))
print(hcam.setPropertyValue("subarray_hpos", 512))
print(hcam.setPropertyValue("subarray_vpos", 512))
print(hcam.setPropertyValue("subarray_hsize", 1024))
print(hcam.setPropertyValue("subarray_vsize", 1024))
print(hcam.setPropertyValue("binning", "1x1"))
print(hcam.setPropertyValue("readout_speed", 2))
hcam.setSubArrayMode()
hcam.startAcquisition()
hcam.stopAcquisition()
params = ["internal_frame_rate",
"timing_readout_time",
"exposure_time",
"image_height",
"image_width",
"image_framebytes",
#"buffer_framebytes",
#"buffer_rowbytes",
#"buffer_top_offset_bytes",
"subarray_hsize",
"subarray_vsize",
"binning"]
for param in params:
print(param, hcam.getPropertyValue(param)[0])
# Test acquisition.
if 0:
import numpy as np
import matplotlib.pyplot as plt
hcam.startAcquisition()
cnt = 1
for i in range(300):
[frames, dims] = hcam.getFrames()
for aframe in frames:
print(cnt, aframe[0:5])
cnt += 1
hcam.stopAcquisition()
plt.imshow(aframe)
plt.show()
#
# The MIT License
#
# Copyright (c) 2013 Zhuang Lab, Harvard University
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
|
mit
|
kaichogami/sympy
|
sympy/plotting/plot_implicit.py
|
83
|
14400
|
"""Implicit plotting module for SymPy
The module implements a data series called ImplicitSeries which is used by
``Plot`` class to plot implicit plots for different backends. The module,
by default, implements plotting using interval arithmetic. It switches to a
fall back algorithm if the expression cannot be plotted using interval arithmetic.
It is also possible to specify to use the fall back algorithm for all plots.
Boolean combinations of expressions cannot be plotted by the fall back
algorithm.
See Also
========
sympy.plotting.plot
References
==========
- Jeffrey Allen Tupper. Reliable Two-Dimensional Graphing Methods for
Mathematical Formulae with Two Free Variables.
- Jeffrey Allen Tupper. Graphing Equations with Generalized Interval
Arithmetic. Master's thesis. University of Toronto, 1996
"""
from __future__ import print_function, division
from .plot import BaseSeries, Plot
from .experimental_lambdify import experimental_lambdify, vectorized_lambdify
from .intervalmath import interval
from sympy.core.relational import (Equality, GreaterThan, LessThan,
Relational, StrictLessThan, StrictGreaterThan)
from sympy import Eq, Tuple, sympify, Symbol, Dummy
from sympy.external import import_module
from sympy.logic.boolalg import BooleanFunction
from sympy.polys.polyutils import _sort_gens
from sympy.utilities.decorator import doctest_depends_on
from sympy.utilities.iterables import flatten
import warnings
class ImplicitSeries(BaseSeries):
""" Representation for Implicit plot """
is_implicit = True
def __init__(self, expr, var_start_end_x, var_start_end_y,
has_equality, use_interval_math, depth, nb_of_points,
line_color):
super(ImplicitSeries, self).__init__()
self.expr = sympify(expr)
self.var_x = sympify(var_start_end_x[0])
self.start_x = float(var_start_end_x[1])
self.end_x = float(var_start_end_x[2])
self.var_y = sympify(var_start_end_y[0])
self.start_y = float(var_start_end_y[1])
self.end_y = float(var_start_end_y[2])
self.get_points = self.get_raster
self.has_equality = has_equality # If the expression has equality, i.e.
#Eq, Greaterthan, LessThan.
self.nb_of_points = nb_of_points
self.use_interval_math = use_interval_math
self.depth = 4 + depth
self.line_color = line_color
def __str__(self):
return ('Implicit equation: %s for '
'%s over %s and %s over %s') % (
str(self.expr),
str(self.var_x),
str((self.start_x, self.end_x)),
str(self.var_y),
str((self.start_y, self.end_y)))
def get_raster(self):
func = experimental_lambdify((self.var_x, self.var_y), self.expr,
use_interval=True)
xinterval = interval(self.start_x, self.end_x)
yinterval = interval(self.start_y, self.end_y)
try:
temp = func(xinterval, yinterval)
except AttributeError:
if self.use_interval_math:
warnings.warn("Adaptive meshing could not be applied to the"
" expression. Using uniform meshing.")
self.use_interval_math = False
if self.use_interval_math:
return self._get_raster_interval(func)
else:
return self._get_meshes_grid()
def _get_raster_interval(self, func):
""" Uses interval math to adaptively mesh and obtain the plot"""
k = self.depth
interval_list = []
#Create initial 32 divisions
np = import_module('numpy')
xsample = np.linspace(self.start_x, self.end_x, 33)
ysample = np.linspace(self.start_y, self.end_y, 33)
#Add a small jitter so that there are no false positives for equality.
# Ex: y==x becomes True for x interval(1, 2) and y interval(1, 2)
#which will draw a rectangle.
jitterx = (np.random.rand(
len(xsample)) * 2 - 1) * (self.end_x - self.start_x) / 2**20
jittery = (np.random.rand(
len(ysample)) * 2 - 1) * (self.end_y - self.start_y) / 2**20
xsample += jitterx
ysample += jittery
xinter = [interval(x1, x2) for x1, x2 in zip(xsample[:-1],
xsample[1:])]
yinter = [interval(y1, y2) for y1, y2 in zip(ysample[:-1],
ysample[1:])]
interval_list = [[x, y] for x in xinter for y in yinter]
plot_list = []
#recursive call refinepixels which subdivides the intervals which are
#neither True nor False according to the expression.
def refine_pixels(interval_list):
""" Evaluates the intervals and subdivides the interval if the
expression is partially satisfied."""
temp_interval_list = []
plot_list = []
for intervals in interval_list:
#Convert the array indices to x and y values
intervalx = intervals[0]
intervaly = intervals[1]
func_eval = func(intervalx, intervaly)
#The expression is valid in the interval. Change the contour
#array values to 1.
if func_eval[1] is False or func_eval[0] is False:
pass
elif func_eval == (True, True):
plot_list.append([intervalx, intervaly])
elif func_eval[1] is None or func_eval[0] is None:
#Subdivide
avgx = intervalx.mid
avgy = intervaly.mid
a = interval(intervalx.start, avgx)
b = interval(avgx, intervalx.end)
c = interval(intervaly.start, avgy)
d = interval(avgy, intervaly.end)
temp_interval_list.append([a, c])
temp_interval_list.append([a, d])
temp_interval_list.append([b, c])
temp_interval_list.append([b, d])
return temp_interval_list, plot_list
while k >= 0 and len(interval_list):
interval_list, plot_list_temp = refine_pixels(interval_list)
plot_list.extend(plot_list_temp)
k = k - 1
#Check whether the expression represents an equality
#If it represents an equality, then none of the intervals
#would have satisfied the expression due to floating point
#differences. Add all the undecided values to the plot.
if self.has_equality:
for intervals in interval_list:
intervalx = intervals[0]
intervaly = intervals[1]
func_eval = func(intervalx, intervaly)
if func_eval[1] and func_eval[0] is not False:
plot_list.append([intervalx, intervaly])
return plot_list, 'fill'
def _get_meshes_grid(self):
"""Generates the mesh for generating a contour.
In the case of equality, ``contour`` function of matplotlib can
be used. In other cases, matplotlib's ``contourf`` is used.
"""
equal = False
if isinstance(self.expr, Equality):
expr = self.expr.lhs - self.expr.rhs
equal = True
elif isinstance(self.expr, (GreaterThan, StrictGreaterThan)):
expr = self.expr.lhs - self.expr.rhs
elif isinstance(self.expr, (LessThan, StrictLessThan)):
expr = self.expr.rhs - self.expr.lhs
else:
raise NotImplementedError("The expression is not supported for "
"plotting in uniform meshed plot.")
np = import_module('numpy')
xarray = np.linspace(self.start_x, self.end_x, self.nb_of_points)
yarray = np.linspace(self.start_y, self.end_y, self.nb_of_points)
x_grid, y_grid = np.meshgrid(xarray, yarray)
func = vectorized_lambdify((self.var_x, self.var_y), expr)
z_grid = func(x_grid, y_grid)
z_grid[np.ma.where(z_grid < 0)] = -1
z_grid[np.ma.where(z_grid > 0)] = 1
if equal:
return xarray, yarray, z_grid, 'contour'
else:
return xarray, yarray, z_grid, 'contourf'
@doctest_depends_on(modules=('matplotlib',))
def plot_implicit(expr, x_var=None, y_var=None, **kwargs):
"""A plot function to plot implicit equations / inequalities.
Arguments
=========
- ``expr`` : The equation / inequality that is to be plotted.
- ``x_var`` (optional) : symbol to plot on x-axis or tuple giving symbol
and range as ``(symbol, xmin, xmax)``
- ``y_var`` (optional) : symbol to plot on y-axis or tuple giving symbol
and range as ``(symbol, ymin, ymax)``
If neither ``x_var`` nor ``y_var`` are given then the free symbols in the
expression will be assigned in the order they are sorted.
The following keyword arguments can also be used:
- ``adaptive``. Boolean. The default value is set to True. It has to be
set to False if you want to use a mesh grid.
- ``depth`` integer. The depth of recursion for adaptive mesh grid.
Default value is 0. Takes value in the range (0, 4).
- ``points`` integer. The number of points if adaptive mesh grid is not
used. Default value is 200.
- ``title`` string .The title for the plot.
- ``xlabel`` string. The label for the x-axis
- ``ylabel`` string. The label for the y-axis
Aesthetics options:
- ``line_color``: float or string. Specifies the color for the plot.
See ``Plot`` to see how to set color for the plots.
plot_implicit, by default, uses interval arithmetic to plot functions. If
the expression cannot be plotted using interval arithmetic, it defaults to
a generating a contour using a mesh grid of fixed number of points. By
setting adaptive to False, you can force plot_implicit to use the mesh
grid. The mesh grid method can be effective when adaptive plotting using
interval arithmetic, fails to plot with small line width.
Examples
========
Plot expressions:
>>> from sympy import plot_implicit, cos, sin, symbols, Eq, And
>>> x, y = symbols('x y')
Without any ranges for the symbols in the expression
>>> p1 = plot_implicit(Eq(x**2 + y**2, 5))
With the range for the symbols
>>> p2 = plot_implicit(Eq(x**2 + y**2, 3),
... (x, -3, 3), (y, -3, 3))
With depth of recursion as argument.
>>> p3 = plot_implicit(Eq(x**2 + y**2, 5),
... (x, -4, 4), (y, -4, 4), depth = 2)
Using mesh grid and not using adaptive meshing.
>>> p4 = plot_implicit(Eq(x**2 + y**2, 5),
... (x, -5, 5), (y, -2, 2), adaptive=False)
Using mesh grid with number of points as input.
>>> p5 = plot_implicit(Eq(x**2 + y**2, 5),
... (x, -5, 5), (y, -2, 2),
... adaptive=False, points=400)
Plotting regions.
>>> p6 = plot_implicit(y > x**2)
Plotting Using boolean conjunctions.
>>> p7 = plot_implicit(And(y > x, y > -x))
When plotting an expression with a single variable (y - 1, for example),
specify the x or the y variable explicitly:
>>> p8 = plot_implicit(y - 1, y_var=y)
>>> p9 = plot_implicit(x - 1, x_var=x)
"""
has_equality = False # Represents whether the expression contains an Equality,
#GreaterThan or LessThan
def arg_expand(bool_expr):
"""
Recursively expands the arguments of an Boolean Function
"""
for arg in bool_expr.args:
if isinstance(arg, BooleanFunction):
arg_expand(arg)
elif isinstance(arg, Relational):
arg_list.append(arg)
arg_list = []
if isinstance(expr, BooleanFunction):
arg_expand(expr)
#Check whether there is an equality in the expression provided.
if any(isinstance(e, (Equality, GreaterThan, LessThan))
for e in arg_list):
has_equality = True
elif not isinstance(expr, Relational):
expr = Eq(expr, 0)
has_equality = True
elif isinstance(expr, (Equality, GreaterThan, LessThan)):
has_equality = True
xyvar = [i for i in (x_var, y_var) if i is not None]
free_symbols = expr.free_symbols
range_symbols = Tuple(*flatten(xyvar)).free_symbols
undeclared = free_symbols - range_symbols
if len(free_symbols & range_symbols) > 2:
raise NotImplementedError("Implicit plotting is not implemented for "
"more than 2 variables")
#Create default ranges if the range is not provided.
default_range = Tuple(-5, 5)
def _range_tuple(s):
if isinstance(s, Symbol):
return Tuple(s) + default_range
if len(s) == 3:
return Tuple(*s)
raise ValueError('symbol or `(symbol, min, max)` expected but got %s' % s)
if len(xyvar) == 0:
xyvar = list(_sort_gens(free_symbols))
var_start_end_x = _range_tuple(xyvar[0])
x = var_start_end_x[0]
if len(xyvar) != 2:
if x in undeclared or not undeclared:
xyvar.append(Dummy('f(%s)' % x.name))
else:
xyvar.append(undeclared.pop())
var_start_end_y = _range_tuple(xyvar[1])
use_interval = kwargs.pop('adaptive', True)
nb_of_points = kwargs.pop('points', 300)
depth = kwargs.pop('depth', 0)
line_color = kwargs.pop('line_color', "blue")
#Check whether the depth is greater than 4 or less than 0.
if depth > 4:
depth = 4
elif depth < 0:
depth = 0
series_argument = ImplicitSeries(expr, var_start_end_x, var_start_end_y,
has_equality, use_interval, depth,
nb_of_points, line_color)
show = kwargs.pop('show', True)
#set the x and y limits
kwargs['xlim'] = tuple(float(x) for x in var_start_end_x[1:])
kwargs['ylim'] = tuple(float(y) for y in var_start_end_y[1:])
# set the x and y labels
kwargs.setdefault('xlabel', var_start_end_x[0].name)
kwargs.setdefault('ylabel', var_start_end_y[0].name)
p = Plot(series_argument, **kwargs)
if show:
p.show()
return p
|
bsd-3-clause
|
kjung/scikit-learn
|
examples/text/document_classification_20newsgroups.py
|
27
|
10521
|
"""
======================================================
Classification of text documents using sparse features
======================================================
This is an example showing how scikit-learn can be used to classify documents
by topics using a bag-of-words approach. This example uses a scipy.sparse
matrix to store the features and demonstrates various classifiers that can
efficiently handle sparse matrices.
The dataset used in this example is the 20 newsgroups dataset. It will be
automatically downloaded, then cached.
The bar plot indicates the accuracy, training time (normalized) and test time
(normalized) of each classifier.
"""
# Author: Peter Prettenhofer <peter.prettenhofer@gmail.com>
# Olivier Grisel <olivier.grisel@ensta.org>
# Mathieu Blondel <mathieu@mblondel.org>
# Lars Buitinck <L.J.Buitinck@uva.nl>
# License: BSD 3 clause
from __future__ import print_function
import logging
import numpy as np
from optparse import OptionParser
import sys
from time import time
import matplotlib.pyplot as plt
from sklearn.datasets import fetch_20newsgroups
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.feature_extraction.text import HashingVectorizer
from sklearn.feature_selection import SelectKBest, chi2
from sklearn.linear_model import RidgeClassifier
from sklearn.pipeline import Pipeline
from sklearn.svm import LinearSVC
from sklearn.linear_model import SGDClassifier
from sklearn.linear_model import Perceptron
from sklearn.linear_model import PassiveAggressiveClassifier
from sklearn.naive_bayes import BernoulliNB, MultinomialNB
from sklearn.neighbors import KNeighborsClassifier
from sklearn.neighbors import NearestCentroid
from sklearn.ensemble import RandomForestClassifier
from sklearn.utils.extmath import density
from sklearn import metrics
# Display progress logs on stdout
logging.basicConfig(level=logging.INFO,
format='%(asctime)s %(levelname)s %(message)s')
# parse commandline arguments
op = OptionParser()
op.add_option("--report",
action="store_true", dest="print_report",
help="Print a detailed classification report.")
op.add_option("--chi2_select",
action="store", type="int", dest="select_chi2",
help="Select some number of features using a chi-squared test")
op.add_option("--confusion_matrix",
action="store_true", dest="print_cm",
help="Print the confusion matrix.")
op.add_option("--top10",
action="store_true", dest="print_top10",
help="Print ten most discriminative terms per class"
" for every classifier.")
op.add_option("--all_categories",
action="store_true", dest="all_categories",
help="Whether to use all categories or not.")
op.add_option("--use_hashing",
action="store_true",
help="Use a hashing vectorizer.")
op.add_option("--n_features",
action="store", type=int, default=2 ** 16,
help="n_features when using the hashing vectorizer.")
op.add_option("--filtered",
action="store_true",
help="Remove newsgroup information that is easily overfit: "
"headers, signatures, and quoting.")
(opts, args) = op.parse_args()
if len(args) > 0:
op.error("this script takes no arguments.")
sys.exit(1)
print(__doc__)
op.print_help()
print()
###############################################################################
# Load some categories from the training set
if opts.all_categories:
categories = None
else:
categories = [
'alt.atheism',
'talk.religion.misc',
'comp.graphics',
'sci.space',
]
if opts.filtered:
remove = ('headers', 'footers', 'quotes')
else:
remove = ()
print("Loading 20 newsgroups dataset for categories:")
print(categories if categories else "all")
data_train = fetch_20newsgroups(subset='train', categories=categories,
shuffle=True, random_state=42,
remove=remove)
data_test = fetch_20newsgroups(subset='test', categories=categories,
shuffle=True, random_state=42,
remove=remove)
print('data loaded')
categories = data_train.target_names # for case categories == None
def size_mb(docs):
return sum(len(s.encode('utf-8')) for s in docs) / 1e6
data_train_size_mb = size_mb(data_train.data)
data_test_size_mb = size_mb(data_test.data)
print("%d documents - %0.3fMB (training set)" % (
len(data_train.data), data_train_size_mb))
print("%d documents - %0.3fMB (test set)" % (
len(data_test.data), data_test_size_mb))
print("%d categories" % len(categories))
print()
# split a training set and a test set
y_train, y_test = data_train.target, data_test.target
print("Extracting features from the training data using a sparse vectorizer")
t0 = time()
if opts.use_hashing:
vectorizer = HashingVectorizer(stop_words='english', non_negative=True,
n_features=opts.n_features)
X_train = vectorizer.transform(data_train.data)
else:
vectorizer = TfidfVectorizer(sublinear_tf=True, max_df=0.5,
stop_words='english')
X_train = vectorizer.fit_transform(data_train.data)
duration = time() - t0
print("done in %fs at %0.3fMB/s" % (duration, data_train_size_mb / duration))
print("n_samples: %d, n_features: %d" % X_train.shape)
print()
print("Extracting features from the test data using the same vectorizer")
t0 = time()
X_test = vectorizer.transform(data_test.data)
duration = time() - t0
print("done in %fs at %0.3fMB/s" % (duration, data_test_size_mb / duration))
print("n_samples: %d, n_features: %d" % X_test.shape)
print()
# mapping from integer feature name to original token string
if opts.use_hashing:
feature_names = None
else:
feature_names = vectorizer.get_feature_names()
if opts.select_chi2:
print("Extracting %d best features by a chi-squared test" %
opts.select_chi2)
t0 = time()
ch2 = SelectKBest(chi2, k=opts.select_chi2)
X_train = ch2.fit_transform(X_train, y_train)
X_test = ch2.transform(X_test)
if feature_names:
# keep selected feature names
feature_names = [feature_names[i] for i
in ch2.get_support(indices=True)]
print("done in %fs" % (time() - t0))
print()
if feature_names:
feature_names = np.asarray(feature_names)
def trim(s):
"""Trim string to fit on terminal (assuming 80-column display)"""
return s if len(s) <= 80 else s[:77] + "..."
###############################################################################
# Benchmark classifiers
def benchmark(clf):
print('_' * 80)
print("Training: ")
print(clf)
t0 = time()
clf.fit(X_train, y_train)
train_time = time() - t0
print("train time: %0.3fs" % train_time)
t0 = time()
pred = clf.predict(X_test)
test_time = time() - t0
print("test time: %0.3fs" % test_time)
score = metrics.accuracy_score(y_test, pred)
print("accuracy: %0.3f" % score)
if hasattr(clf, 'coef_'):
print("dimensionality: %d" % clf.coef_.shape[1])
print("density: %f" % density(clf.coef_))
if opts.print_top10 and feature_names is not None:
print("top 10 keywords per class:")
for i, category in enumerate(categories):
top10 = np.argsort(clf.coef_[i])[-10:]
print(trim("%s: %s"
% (category, " ".join(feature_names[top10]))))
print()
if opts.print_report:
print("classification report:")
print(metrics.classification_report(y_test, pred,
target_names=categories))
if opts.print_cm:
print("confusion matrix:")
print(metrics.confusion_matrix(y_test, pred))
print()
clf_descr = str(clf).split('(')[0]
return clf_descr, score, train_time, test_time
results = []
for clf, name in (
(RidgeClassifier(tol=1e-2, solver="lsqr"), "Ridge Classifier"),
(Perceptron(n_iter=50), "Perceptron"),
(PassiveAggressiveClassifier(n_iter=50), "Passive-Aggressive"),
(KNeighborsClassifier(n_neighbors=10), "kNN"),
(RandomForestClassifier(n_estimators=100), "Random forest")):
print('=' * 80)
print(name)
results.append(benchmark(clf))
for penalty in ["l2", "l1"]:
print('=' * 80)
print("%s penalty" % penalty.upper())
# Train Liblinear model
results.append(benchmark(LinearSVC(loss='l2', penalty=penalty,
dual=False, tol=1e-3)))
# Train SGD model
results.append(benchmark(SGDClassifier(alpha=.0001, n_iter=50,
penalty=penalty)))
# Train SGD with Elastic Net penalty
print('=' * 80)
print("Elastic-Net penalty")
results.append(benchmark(SGDClassifier(alpha=.0001, n_iter=50,
penalty="elasticnet")))
# Train NearestCentroid without threshold
print('=' * 80)
print("NearestCentroid (aka Rocchio classifier)")
results.append(benchmark(NearestCentroid()))
# Train sparse Naive Bayes classifiers
print('=' * 80)
print("Naive Bayes")
results.append(benchmark(MultinomialNB(alpha=.01)))
results.append(benchmark(BernoulliNB(alpha=.01)))
print('=' * 80)
print("LinearSVC with L1-based feature selection")
# The smaller C, the stronger the regularization.
# The more regularization, the more sparsity.
results.append(benchmark(Pipeline([
('feature_selection', LinearSVC(penalty="l1", dual=False, tol=1e-3)),
('classification', LinearSVC())
])))
# make some plots
indices = np.arange(len(results))
results = [[x[i] for x in results] for i in range(4)]
clf_names, score, training_time, test_time = results
training_time = np.array(training_time) / np.max(training_time)
test_time = np.array(test_time) / np.max(test_time)
plt.figure(figsize=(12, 8))
plt.title("Score")
plt.barh(indices, score, .2, label="score", color='navy')
plt.barh(indices + .3, training_time, .2, label="training time",
color='c')
plt.barh(indices + .6, test_time, .2, label="test time", color='darkorange')
plt.yticks(())
plt.legend(loc='best')
plt.subplots_adjust(left=.25)
plt.subplots_adjust(top=.95)
plt.subplots_adjust(bottom=.05)
for i, c in zip(indices, clf_names):
plt.text(-.3, i, c)
plt.show()
|
bsd-3-clause
|
mne-tools/mne-tools.github.io
|
0.21/_downloads/6f20d729433c52851fc6a16e7531cf37/plot_compute_csd.py
|
20
|
3996
|
"""
==================================================
Compute a cross-spectral density (CSD) matrix
==================================================
A cross-spectral density (CSD) matrix is similar to a covariance matrix, but in
the time-frequency domain. It is the first step towards computing
sensor-to-sensor coherence or a DICS beamformer.
This script demonstrates the three methods that MNE-Python provides to compute
the CSD:
1. Using short-term Fourier transform: :func:`mne.time_frequency.csd_fourier`
2. Using a multitaper approach: :func:`mne.time_frequency.csd_multitaper`
3. Using Morlet wavelets: :func:`mne.time_frequency.csd_morlet`
"""
# Author: Marijn van Vliet <w.m.vanvliet@gmail.com>
# License: BSD (3-clause)
from matplotlib import pyplot as plt
import mne
from mne.datasets import sample
from mne.time_frequency import csd_fourier, csd_multitaper, csd_morlet
print(__doc__)
###############################################################################
# In the following example, the computation of the CSD matrices can be
# performed using multiple cores. Set ``n_jobs`` to a value >1 to select the
# number of cores to use.
n_jobs = 1
###############################################################################
# Loading the sample dataset.
data_path = sample.data_path()
fname_raw = data_path + '/MEG/sample/sample_audvis_raw.fif'
fname_event = data_path + '/MEG/sample/sample_audvis_raw-eve.fif'
raw = mne.io.read_raw_fif(fname_raw)
events = mne.read_events(fname_event)
###############################################################################
# By default, CSD matrices are computed using all MEG/EEG channels. When
# interpreting a CSD matrix with mixed sensor types, be aware that the
# measurement units, and thus the scalings, differ across sensors. In this
# example, for speed and clarity, we select a single channel type:
# gradiometers.
picks = mne.pick_types(raw.info, meg='grad')
# Make some epochs, based on events with trigger code 1
epochs = mne.Epochs(raw, events, event_id=1, tmin=-0.2, tmax=1,
picks=picks, baseline=(None, 0),
reject=dict(grad=4000e-13), preload=True)
###############################################################################
# Computing CSD matrices using short-term Fourier transform and (adaptive)
# multitapers is straightforward:
csd_fft = csd_fourier(epochs, fmin=15, fmax=20, n_jobs=n_jobs)
csd_mt = csd_multitaper(epochs, fmin=15, fmax=20, adaptive=True, n_jobs=n_jobs)
###############################################################################
# When computing the CSD with Morlet wavelets, you specify the exact
# frequencies at which to compute it. For each frequency, a corresponding
# wavelet will be constructed and convolved with the signal, resulting in a
# time-frequency decomposition.
#
# The CSD is constructed by computing the correlation between the
# time-frequency representations between all sensor-to-sensor pairs. The
# time-frequency decomposition originally has the same sampling rate as the
# signal, in our case ~600Hz. This means the decomposition is over-specified in
# time and we may not need to use all samples during our CSD computation, just
# enough to get a reliable correlation statistic. By specifying ``decim=10``,
# we use every 10th sample, which will greatly speed up the computation and
# will have a minimal effect on the CSD.
frequencies = [16, 17, 18, 19, 20]
csd_wav = csd_morlet(epochs, frequencies, decim=10, n_jobs=n_jobs)
###############################################################################
# The resulting :class:`mne.time_frequency.CrossSpectralDensity` objects have a
# plotting function we can use to compare the results of the different methods.
# We're plotting the mean CSD across frequencies.
csd_fft.mean().plot()
plt.suptitle('short-term Fourier transform')
csd_mt.mean().plot()
plt.suptitle('adaptive multitapers')
csd_wav.mean().plot()
plt.suptitle('Morlet wavelet transform')
|
bsd-3-clause
|
espenhgn/nest-simulator
|
doc/guides/spatial/user_manual_scripts/connections.py
|
5
|
20252
|
# -*- coding: utf-8 -*-
#
# connections.py
#
# This file is part of NEST.
#
# Copyright (C) 2004 The NEST Initiative
#
# NEST is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# NEST is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with NEST. If not, see <http://www.gnu.org/licenses/>.
# create connectivity figures for topology manual
import nest
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d.axes3d import Axes3D
import numpy as np
# seed NumPy RNG to ensure identical results for runs with random placement
np.random.seed(7654321)
def beautify_layer(l, fig=plt.gcf(), xlabel=None, ylabel=None,
xlim=None, ylim=None, xticks=None, yticks=None, dx=0, dy=0):
"""Assume either x and ylims/ticks given or none"""
ctr = l.spatial['center']
ext = l.spatial['extent']
if xticks is None:
if 'shape' in l.spatial:
dx = float(ext[0]) / l.spatial['shape'][0]
dy = float(ext[1]) / l.spatial['shape'][1]
xticks = ctr[0] - ext[0] / 2. + dx / 2. + dx * np.arange(
l.spatial['shape'][0])
yticks = ctr[1] - ext[1] / 2. + dy / 2. + dy * np.arange(
l.spatial['shape'][1])
if xlim is None:
xlim = [ctr[0] - ext[0] / 2. - dx / 2., ctr[0] + ext[
0] / 2. + dx / 2.] # extra space so extent is visible
ylim = [ctr[1] - ext[1] / 2. - dy / 2., ctr[1] + ext[1] / 2. + dy / 2.]
else:
ext = [xlim[1] - xlim[0], ylim[1] - ylim[0]]
ax = fig.gca()
ax.set_xlim(xlim)
ax.set_ylim(ylim)
ax.set_aspect('equal', 'box')
ax.set_xticks(xticks)
ax.set_yticks(yticks)
ax.grid(True)
ax.set_axisbelow(True)
ax.set_xlabel(xlabel)
ax.set_ylabel(ylabel)
return
def conn_figure(fig, layer, connd, targets=None, showmask=True, kern=None,
xticks=range(-5, 6), yticks=range(-5, 6),
xlim=[-5.5, 5.5], ylim=[-5.5, 5.5]):
if targets is None:
targets = ((nest.FindCenterElement(layer), 'red'),)
nest.PlotLayer(layer, fig=fig, nodesize=60)
for src, clr in targets:
if showmask:
mask = connd['mask']
else:
mask = None
nest.PlotTargets(src, layer, fig=fig, mask=mask, probability_parameter=kern,
src_size=250, tgt_color=clr, tgt_size=20, mask_color='red',
probability_cmap='Greens')
beautify_layer(layer, fig,
xlim=xlim, ylim=ylim, xticks=xticks, yticks=yticks,
xlabel='', ylabel='')
fig.gca().grid(False)
# -----------------------------------------------
# Simple connection
#{ conn1 #}
l = nest.Create('iaf_psc_alpha',
positions=nest.spatial.grid(shape=[11, 11], extent=[11., 11.]))
conndict = {'rule': 'pairwise_bernoulli',
'p': 1.0,
'mask': {'rectangular': {'lower_left': [-2., -1.],
'upper_right': [2., 1.]}}}
nest.Connect(l, l, conndict)
#{ end #}
fig = plt.figure()
fig.add_subplot(121)
conn_figure(fig, l, conndict,
targets=((nest.FindCenterElement(l), 'red'),
(nest.FindNearestElement(l, [4., 5.])[0], 'yellow')))
# same another time, with periodic bcs
lpbc = nest.Create('iaf_psc_alpha',
positions=nest.spatial.grid(
shape=[11, 11], extent=[11., 11.],
edge_wrap=True))
nest.Connect(lpbc, lpbc, conndict)
fig.add_subplot(122)
conn_figure(fig, lpbc, conndict, showmask=False,
targets=((nest.FindCenterElement(lpbc), 'red'),
(nest.FindNearestElement(lpbc, [4., 5.])[0], 'yellow')))
plt.savefig('../user_manual_figures/conn1.png', bbox_inches='tight')
# -----------------------------------------------
# free masks
def free_mask_fig(fig, loc, cdict):
nest.ResetKernel()
l = nest.Create('iaf_psc_alpha',
positions=nest.spatial.grid(shape=[11, 11], extent=[11., 11.]))
nest.Connect(l, l, cdict)
fig.add_subplot(loc)
conn_figure(fig, l, cdict, xticks=range(-5, 6, 2), yticks=range(-5, 6, 2))
fig = plt.figure()
#{ conn2r #}
conndict = {'rule': 'pairwise_bernoulli',
'p': 1.0,
'mask': {'rectangular': {'lower_left': [-2., -1.],
'upper_right': [2., 1.]}}}
#{ end #}
free_mask_fig(fig, 221, conndict)
#{ conn2c #}
conndict = {'rule': 'pairwise_bernoulli',
'p': 1.0,
'mask': {'circular': {'radius': 2.0}}}
#{ end #}
free_mask_fig(fig, 222, conndict)
#{ conn2d #}
conndict = {'rule': 'pairwise_bernoulli',
'p': 1.0,
'mask': {'doughnut': {'inner_radius': 1.5,
'outer_radius': 3.}}}
#{ end #}
free_mask_fig(fig, 223, conndict)
#{ conn2e #}
conndict = {'rule': 'pairwise_bernoulli',
'p': 1.0,
'mask': {'elliptical': {'major_axis': 7.,
'minor_axis': 4.}}}
#{ end #}
free_mask_fig(fig, 224, conndict)
plt.savefig('../user_manual_figures/conn2_a.png', bbox_inches='tight')
#-----------------------------------------------------------------------------#
fig = plt.figure()
#{ conn2ro #}
conndict = {'rule': 'pairwise_bernoulli',
'p': 1.0,
'mask': {'rectangular': {'lower_left': [-2., -1.],
'upper_right': [2., 1.]},
'anchor': [-1.5, -1.5]}}
#{ end #}
free_mask_fig(fig, 221, conndict)
#{ conn2co #}
conndict = {'rule': 'pairwise_bernoulli',
'p': 1.0,
'mask': {'circular': {'radius': 2.0},
'anchor': [-2.0, 0.0]}}
#{ end #}
free_mask_fig(fig, 222, conndict)
#{ conn2do #}
conndict = {'rule': 'pairwise_bernoulli',
'p': 1.0,
'mask': {'doughnut': {'inner_radius': 1.5,
'outer_radius': 3.},
'anchor': [1.5, 1.5]}}
#{ end #}
free_mask_fig(fig, 223, conndict)
#{ conn2eo #}
conndict = {'rule': 'pairwise_bernoulli',
'p': 1.0,
'mask': {'elliptical': {'major_axis': 7.,
'minor_axis': 4.},
'anchor': [2.0, -1.0]}}
#{ end #}
free_mask_fig(fig, 224, conndict)
plt.savefig('../user_manual_figures/conn2_b.png', bbox_inches='tight')
#-----------------------------------------------------------------------------#
fig = plt.figure()
#{ conn2rr #}
conndict = {'rule': 'pairwise_bernoulli',
'p': 1.0,
'mask': {'rectangular': {'lower_left': [-2., -1.],
'upper_right': [2., 1.],
'azimuth_angle': 120.}}}
#{ end #}
free_mask_fig(fig, 121, conndict)
#{ conn2er #}
conndict = {'rule': 'pairwise_bernoulli',
'p': 1.0,
'mask': {'elliptical': {'major_axis': 7.,
'minor_axis': 4.,
'azimuth_angle': 45.}}}
#{ end #}
free_mask_fig(fig, 122, conndict)
plt.savefig('../user_manual_figures/conn2_c.png', bbox_inches='tight')
# -----------------------------------------------
# 3d masks
def conn_figure_3d(fig, layer, connd, targets=None, showmask=True,
xticks=range(-5, 6), yticks=range(-5, 6),
xlim=[-5.5, 5.5], ylim=[-5.5, 5.5]):
if targets is None:
targets = ((nest.FindCenterElement(layer), 'red'),)
nest.PlotLayer(layer, fig=fig, nodesize=20, nodecolor=(.5, .5, 1.))
for src, clr in targets:
if showmask:
mask = connd['mask']
else:
mask = None
nest.PlotTargets(src, layer, fig=fig, mask=mask, probability_parameter=None,
src_size=250, tgt_color=clr, tgt_size=60,
probability_cmap='Greens')
ax = fig.gca()
# ax.set_aspect('equal', 'box')
plt.draw()
def free_mask_3d_fig(fig, loc, cdict):
nest.ResetKernel()
l = nest.Create('iaf_psc_alpha',
positions=nest.spatial.grid(
shape=[11, 11, 11],
extent=[11., 11., 11.]))
nest.Connect(l, l, cdict)
fig.add_subplot(loc, projection='3d')
conn_figure_3d(fig, l, cdict, xticks=range(-5, 6, 2),
yticks=range(-5, 6, 2))
fig = plt.figure()
#{ conn_3d_a #}
conndict = {'rule': 'pairwise_bernoulli',
'p': 1.0,
'mask': {'box': {'lower_left': [-2., -1., -1.],
'upper_right': [2., 1., 1.]}}}
#{ end #}
# free_mask_3d_fig(fig, 121, conndict)
#{ conn_3d_b #}
conndict = {'rule': 'pairwise_bernoulli',
'p': 1.0,
'mask': {'spherical': {'radius': 2.5}}}
#{ end #}
# free_mask_3d_fig(fig, 122, conndict)
#{ conn_3d_c #}
conndict = {'rule': 'pairwise_bernoulli',
'p': 1.0,
'mask': {'ellipsoidal': {'major_axis': 7.,
'minor_axis': 4.,
'polar_axis': 4.5}}}
#{ end #}
# plt.savefig('../user_manual_figures/conn_3d.png', bbox_inches='tight')
# -----------------------------------------------
# grid masks
def grid_mask_fig(fig, loc, cdict):
nest.ResetKernel()
l = nest.Create('iaf_psc_alpha',
positions=nest.spatial.grid(shape=[11, 11],
extent=[11., 11.]))
nest.Connect(l, l, cdict)
fig.add_subplot(loc)
conn_figure(fig, l, cdict, xticks=range(-5, 6, 2), yticks=range(-5, 6, 2),
showmask=False)
fig = plt.figure()
#{ conn3 #}
conndict = {'rule': 'pairwise_bernoulli',
'p': 1.0,
'mask': {'grid': {'shape': [5, 3]}}}
#{ end #}
grid_mask_fig(fig, 131, conndict)
#{ conn3c #}
conndict = {'rule': 'pairwise_bernoulli',
'p': 1.0,
'mask': {'grid': {'shape': [5, 3]},
'anchor': [2, 1]}}
#{ end #}
grid_mask_fig(fig, 132, conndict)
#{ conn3x #}
conndict = {'rule': 'pairwise_bernoulli',
'p': 1.0,
'mask': {'grid': {'shape': [3, 5]},
'anchor': [2, -1]}}
#{ end #}
grid_mask_fig(fig, 133, conndict)
plt.savefig('../user_manual_figures/conn3.png', bbox_inches='tight')
# -----------------------------------------------
# free masks
def kernel_fig(fig, loc, cdict, kern=None):
nest.ResetKernel()
l = nest.Create('iaf_psc_alpha',
positions=nest.spatial.grid(
shape=[11, 11],
extent=[11., 11.]))
nest.Connect(l, l, cdict)
fig.add_subplot(loc)
conn_figure(fig, l, cdict, xticks=range(-5, 6, 2), yticks=range(-5, 6, 2),
kern=kern)
fig = plt.figure()
#{ conn4cp #}
conndict = {'rule': 'pairwise_bernoulli',
'p': 0.5,
'mask': {'circular': {'radius': 4.}}}
#{ end #}
kernel_fig(fig, 231, conndict)
#{ conn4g #}
conndict = {'rule': 'pairwise_bernoulli',
'p': nest.spatial_distributions.gaussian(nest.spatial.distance, std=1.0),
'mask': {'circular': {'radius': 4.}}}
#{ end #}
kernel_fig(fig, 232, conndict, kern=nest.spatial_distributions.gaussian(nest.spatial.distance, std=1.0))
#{ conn4cut #}
distribution = nest.spatial_distributions.gaussian(nest.spatial.distance, std=1.0)
conndict = {'rule': 'pairwise_bernoulli',
'p': nest.logic.conditional(distribution > 0.5,
distribution,
0),
'mask': {'circular': {'radius': 4.}}}
#{ end #}
kernel_fig(fig, 234, conndict)
#{ conn42d #}
conndict = {'rule': 'pairwise_bernoulli',
'p': nest.spatial_distributions.gaussian2D(nest.spatial.distance.x,
nest.spatial.distance.y,
std_x=1.,
std_y=3.),
'mask': {'circular': {'radius': 4.}}}
#{ end #}
kernel_fig(fig, 235, conndict)
plt.savefig('../user_manual_figures/conn4.png', bbox_inches='tight')
# -----------------------------------------------
def wd_fig(fig, loc, pos, cdict, sdict, what, rpos=None,
xlim=[-1, 51], ylim=[0, 1], xticks=range(0, 51, 5),
yticks=np.arange(0., 1.1, 0.2), clr='blue',
label=''):
nest.ResetKernel()
l = nest.Create('iaf_psc_alpha', positions=pos)
nest.Connect(l, l, cdict, sdict)
ax = fig.add_subplot(loc)
if rpos is None:
rn = l[0] # first node
else:
rn = nest.FindNearestElement(l, rpos)
conns = nest.GetConnections(rn)
vals = np.array([c.get(what) for c in conns])
tgts = [c.get('target') for c in conns]
locs = np.array([nest.GetPosition(l[l.index(t)]) for t in tgts])
ax.plot(locs[:, 0], vals, 'o', mec='none', mfc=clr, label=label)
ax.set_xlim(xlim)
ax.set_ylim(ylim)
ax.set_xticks(xticks)
ax.set_yticks(yticks)
fig = plt.figure()
#{ conn5lin #}
pos = nest.spatial.grid(shape=[51, 1], extent=[51., 1.], center=[25., 0.])
spatial_nodes = nest.Create('iaf_psc_alpha', positions=pos)
cdict = {'rule': 'pairwise_bernoulli',
'p': 1.0,
'mask': {'rectangular': {'lower_left': [-25.5, -0.5],
'upper_right': [25.5, 0.5]}}}
sdict = {'weight': nest.math.max(1.0 - 0.05 * nest.spatial.distance, 0.),
'delay': 0.1 + 0.02 * nest.spatial.distance}
nest.Connect(spatial_nodes, spatial_nodes, cdict, sdict)
#{ end #}
wd_fig(fig, 311, pos, cdict, sdict, 'weight', label='Weight')
wd_fig(fig, 311, pos, cdict, sdict, 'delay', label='Delay', clr='red')
fig.gca().legend()
ppos = nest.spatial.grid(shape=[51, 1],
extent=[51., 1.],
center=[25., 0.],
edge_wrap=True)
#{ conn5linpbc #}
cdict = {'rule': 'pairwise_bernoulli',
'p': 1.0,
'mask': {'rectangular': {'lower_left': [-25.5, -0.5],
'upper_right': [25.5, 0.5]}}}
sdict = {'weight': nest.math.max(1.0 - 0.05 * nest.spatial.distance, 0.),
'delay': 0.1 + 0.02 * nest.spatial.distance}
#{ end #}
wd_fig(fig, 312, ppos, cdict, sdict, 'weight', label='Weight')
wd_fig(fig, 312, ppos, cdict, sdict, 'delay', label='Delay', clr='red')
fig.gca().legend(loc=1)
cdict = {'rule': 'pairwise_bernoulli',
'p': 1.0,
'mask': {'rectangular': {'lower_left': [-25.5, -0.5],
'upper_right': [25.5, 0.5]}}}
sdict = {'weight': nest.math.max(1.0 - 0.05 * nest.spatial.distance, 0.)}
wd_fig(fig, 313, pos, cdict, sdict, 'weight', label='Linear',
rpos=[25., 0.], clr='orange')
#{ conn5exp #}
cdict = {'rule': 'pairwise_bernoulli',
'p': 1.0,
'mask': {'rectangular': {'lower_left': [-25.5, -0.5],
'upper_right': [25.5, 0.5]}}}
sdict = {'weight': nest.spatial_distributions.exponential(nest.spatial.distance, beta=5.)}
#{ end #}
wd_fig(fig, 313, pos, cdict, sdict, 'weight', label='Exponential',
rpos=[25., 0.])
#{ conn5gauss #}
cdict = {'rule': 'pairwise_bernoulli',
'p': 1.0,
'mask': {'rectangular': {'lower_left': [-25.5, -0.5],
'upper_right': [25.5, 0.5]}}}
sdict = {'weight': nest.spatial_distributions.gaussian(nest.spatial.distance, std=5.)}
#{ end #}
wd_fig(fig, 313, pos, cdict, sdict, 'weight', label='Gaussian', clr='green',
rpos=[25., 0.])
#{ conn5uniform #}
cdict = {'rule': 'pairwise_bernoulli',
'p': 1.0,
'mask': {'rectangular': {'lower_left': [-25.5, -0.5],
'upper_right': [25.5, 0.5]}}}
sdict = {'weight': nest.random.uniform(min=0.2, max=0.8)}
#{ end #}
wd_fig(fig, 313, pos, cdict, sdict, 'weight', label='Uniform', clr='red',
rpos=[25., 0.])
fig.gca().legend()
plt.savefig('../user_manual_figures/conn5.png', bbox_inches='tight')
# --------------------------------
#{ conn_param_design #}
parameter = 0.5 + nest.spatial.distance.x + 2. * nest.spatial.distance.y
#{ end #}
#{ conn_param_design_ex #}
l = nest.Create('iaf_psc_alpha',
positions=nest.spatial.grid(shape=[11, 11],
extent=[1., 1.]))
nest.Connect(l, l, {'rule': 'pairwise_bernoulli',
'p': parameter,
'mask': {'circular': {'radius': 0.5}}})
#{ end #}
# --------------------------------
def pn_fig(fig, loc, l, cdict,
xlim=[0., .5], ylim=[0, 3.5], xticks=range(0, 51, 5),
yticks=np.arange(0., 1.1, 0.2), clr='blue',
label=''):
nest.Connect(l, l, cdict)
ax = fig.add_subplot(loc)
conns = nest.GetConnections(l)
dist = np.array([nest.Distance(l[l.index(s)],
l[l.index(t)])
for s, t in zip(conns.sources(), conns.targets())])
ax.hist(dist, bins=50, histtype='stepfilled', density=True)
r = np.arange(0., 0.51, 0.01)
plt.plot(r, 2 * np.pi * r * (1 - 2 * r) * 12 / np.pi, 'r-', lw=3,
zorder=-10)
ax.set_xlim(xlim)
ax.set_ylim(ylim)
"""ax.set_xticks(xticks)
ax.set_yticks(yticks)"""
# ax.set_aspect(100, 'box')
ax.set_xlabel('Source-target distance d')
ax.set_ylabel('Connection probability pconn(d)')
fig = plt.figure()
nest.ResetKernel()
#{ conn6 #}
pos = nest.spatial.free(nest.random.uniform(-1., 1.),
extent=[2., 2.], edge_wrap=True)
l = nest.Create('iaf_psc_alpha', 1000, positions=pos)
cdict = {'rule': 'fixed_outdegree',
'p': nest.math.max(1. - 2 * nest.spatial.distance, 0.),
'mask': {'circular': {'radius': 1.0}},
'outdegree': 50,
'allow_multapses': True, 'allow_autapses': False}
nest.Connect(l, l, cdict)
#{ end #}
pn_fig(fig, 111, l, cdict)
plt.savefig('../user_manual_figures/conn6.png', bbox_inches='tight')
# ----------------------------
#{ conn8 #}
nest.ResetKernel()
nest.CopyModel('static_synapse', 'exc', {'weight': 2.0})
nest.CopyModel('static_synapse', 'inh', {'weight': -8.0})
pos = nest.spatial.grid(shape=[10, 10])
l_ex = nest.Create('iaf_psc_alpha', positions=pos)
l_in = nest.Create('iaf_psc_alpha', positions=pos)
nest.Connect(l_ex, l_in, {'rule': 'pairwise_bernoulli',
'p': 0.8,
'mask': {'circular': {'radius': 0.5}}},
{'synapse_model': 'exc'})
nest.Connect(l_in, l_ex, {'rule': 'pairwise_bernoulli',
'p': 1.0,
'mask': {'rectangular': {'lower_left': [-0.2, -0.2],
'upper_right': [0.2, 0.2]}}},
{'synapse_model': 'inh'})
#{ end #}
# ----------------------------
#{ conn9 #}
nrn_layer = nest.Create('iaf_psc_alpha',
positions=nest.spatial.grid(shape=[20, 20]))
stim = nest.Create('poisson_generator',
positions=nest.spatial.grid(shape=[1, 1]))
cdict_stim = {'rule': 'pairwise_bernoulli',
'p': 1.0,
'mask': {'circular': {'radius': 0.1},
'anchor': [0.2, 0.2]}}
nest.Connect(stim, nrn_layer, cdict_stim)
#{ end #}
# ----------------------------
#{ conn10 #}
rec = nest.Create('spike_detector',
positions=nest.spatial.grid(shape=[1, 1]))
cdict_rec = {'rule': 'pairwise_bernoulli',
'p': 1.0,
'use_on_source': True,
'mask': {'circular': {'radius': 0.1},
'anchor': [-0.2, 0.2]}}
nest.Connect(nrn_layer, rec, cdict_rec)
#{ end #}
# ----------------------------
#{ conn11 #}
rec = nest.Create('spike_detector')
nest.Connect(nrn_layer, rec)
#{ end #}
|
gpl-2.0
|
chugunovyar/factoryForBuild
|
env/lib/python2.7/site-packages/matplotlib/tests/test_backend_svg.py
|
5
|
5466
|
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import six
import numpy as np
from io import BytesIO
import xml.parsers.expat
import matplotlib.pyplot as plt
from matplotlib.testing.decorators import cleanup
from matplotlib.testing.decorators import image_comparison, knownfailureif
import matplotlib
needs_tex = knownfailureif(
not matplotlib.checkdep_tex(),
"This test needs a TeX installation")
@cleanup
def test_visibility():
fig = plt.figure()
ax = fig.add_subplot(111)
x = np.linspace(0, 4 * np.pi, 50)
y = np.sin(x)
yerr = np.ones_like(y)
a, b, c = ax.errorbar(x, y, yerr=yerr, fmt='ko')
for artist in b:
artist.set_visible(False)
fd = BytesIO()
fig.savefig(fd, format='svg')
fd.seek(0)
buf = fd.read()
fd.close()
parser = xml.parsers.expat.ParserCreate()
parser.Parse(buf) # this will raise ExpatError if the svg is invalid
@image_comparison(baseline_images=['fill_black_with_alpha'], remove_text=True,
extensions=['svg'])
def test_fill_black_with_alpha():
fig = plt.figure()
ax = fig.add_subplot(1, 1, 1)
ax.scatter(x=[0, 0.1, 1], y=[0, 0, 0], c='k', alpha=0.1, s=10000)
@image_comparison(baseline_images=['noscale'], remove_text=True)
def test_noscale():
X, Y = np.meshgrid(np.arange(-5, 5, 1), np.arange(-5, 5, 1))
Z = np.sin(Y ** 2)
fig = plt.figure()
ax = fig.add_subplot(1, 1, 1)
ax.imshow(Z, cmap='gray', interpolation='none')
@cleanup
def test_composite_images():
#Test that figures can be saved with and without combining multiple images
#(on a single set of axes) into a single composite image.
X, Y = np.meshgrid(np.arange(-5, 5, 1), np.arange(-5, 5, 1))
Z = np.sin(Y ** 2)
fig = plt.figure()
ax = fig.add_subplot(1, 1, 1)
ax.set_xlim(0, 3)
ax.imshow(Z, extent=[0, 1, 0, 1])
ax.imshow(Z[::-1], extent=[2, 3, 0, 1])
plt.rcParams['image.composite_image'] = True
with BytesIO() as svg:
fig.savefig(svg, format="svg")
svg.seek(0)
buff = svg.read()
assert buff.count(six.b('<image ')) == 1
plt.rcParams['image.composite_image'] = False
with BytesIO() as svg:
fig.savefig(svg, format="svg")
svg.seek(0)
buff = svg.read()
assert buff.count(six.b('<image ')) == 2
@cleanup
def test_text_urls():
fig = plt.figure()
test_url = "http://test_text_urls.matplotlib.org"
fig.suptitle("test_text_urls", url=test_url)
fd = BytesIO()
fig.savefig(fd, format='svg')
fd.seek(0)
buf = fd.read().decode()
fd.close()
expected = '<a xlink:href="{0}">'.format(test_url)
assert expected in buf
@image_comparison(baseline_images=['bold_font_output'], extensions=['svg'])
def test_bold_font_output():
fig = plt.figure()
ax = fig.add_subplot(1, 1, 1)
ax.plot(np.arange(10), np.arange(10))
ax.set_xlabel('nonbold-xlabel')
ax.set_ylabel('bold-ylabel', fontweight='bold')
ax.set_title('bold-title', fontweight='bold')
@image_comparison(baseline_images=['bold_font_output_with_none_fonttype'],
extensions=['svg'])
def test_bold_font_output_with_none_fonttype():
plt.rcParams['svg.fonttype'] = 'none'
fig = plt.figure()
ax = fig.add_subplot(1, 1, 1)
ax.plot(np.arange(10), np.arange(10))
ax.set_xlabel('nonbold-xlabel')
ax.set_ylabel('bold-ylabel', fontweight='bold')
ax.set_title('bold-title', fontweight='bold')
def _test_determinism_save(filename, usetex):
# This function is mostly copy&paste from "def test_visibility"
# To require no GUI, we use Figure and FigureCanvasSVG
# instead of plt.figure and fig.savefig
from matplotlib.figure import Figure
from matplotlib.backends.backend_svg import FigureCanvasSVG
from matplotlib import rc
rc('svg', hashsalt='asdf')
rc('text', usetex=usetex)
fig = Figure()
ax = fig.add_subplot(111)
x = np.linspace(0, 4 * np.pi, 50)
y = np.sin(x)
yerr = np.ones_like(y)
a, b, c = ax.errorbar(x, y, yerr=yerr, fmt='ko')
for artist in b:
artist.set_visible(False)
ax.set_title('A string $1+2+\sigma$')
ax.set_xlabel('A string $1+2+\sigma$')
ax.set_ylabel('A string $1+2+\sigma$')
FigureCanvasSVG(fig).print_svg(filename)
def _test_determinism(filename, usetex):
import os
import sys
from subprocess import check_call
from nose.tools import assert_equal
plots = []
for i in range(3):
check_call([sys.executable, '-R', '-c',
'import matplotlib; '
'matplotlib.use("svg"); '
'from matplotlib.tests.test_backend_svg '
'import _test_determinism_save;'
'_test_determinism_save(%r, %r)' % (filename, usetex)])
with open(filename, 'rb') as fd:
plots.append(fd.read())
os.unlink(filename)
for p in plots[1:]:
assert_equal(p, plots[0])
@cleanup
def test_determinism_notex():
# unique filename to allow for parallel testing
_test_determinism('determinism_notex.svg', usetex=False)
@cleanup
@needs_tex
def test_determinism_tex():
# unique filename to allow for parallel testing
_test_determinism('determinism_tex.svg', usetex=True)
if __name__ == '__main__':
import nose
nose.runmodule(argv=['-s', '--with-doctest'], exit=False)
|
gpl-3.0
|
zfrenchee/pandas
|
pandas/core/apply.py
|
1
|
9002
|
import numpy as np
from pandas import compat
from pandas._libs import lib
from pandas.core.dtypes.common import (
is_extension_type,
is_sequence)
from pandas.io.formats.printing import pprint_thing
def frame_apply(obj, func, axis=0, broadcast=False,
raw=False, reduce=None, args=(), **kwds):
""" construct and return a row or column based frame apply object """
axis = obj._get_axis_number(axis)
if axis == 0:
klass = FrameRowApply
elif axis == 1:
klass = FrameColumnApply
return klass(obj, func, broadcast=broadcast,
raw=raw, reduce=reduce, args=args, kwds=kwds)
class FrameApply(object):
def __init__(self, obj, func, broadcast, raw, reduce, args, kwds):
self.obj = obj
self.broadcast = broadcast
self.raw = raw
self.reduce = reduce
self.args = args
self.ignore_failures = kwds.pop('ignore_failures', False)
self.kwds = kwds
# curry if needed
if kwds or args and not isinstance(func, np.ufunc):
def f(x):
return func(x, *args, **kwds)
else:
f = func
self.f = f
@property
def columns(self):
return self.obj.columns
@property
def index(self):
return self.obj.index
@property
def values(self):
return self.obj.values
@property
def agg_axis(self):
return self.obj._get_agg_axis(self.axis)
def get_result(self):
""" compute the results """
# all empty
if len(self.columns) == 0 and len(self.index) == 0:
return self.apply_empty_result()
# string dispatch
if isinstance(self.f, compat.string_types):
if self.axis:
self.kwds['axis'] = self.axis
return getattr(self.obj, self.f)(*self.args, **self.kwds)
# ufunc
elif isinstance(self.f, np.ufunc):
with np.errstate(all='ignore'):
results = self.f(self.values)
return self.obj._constructor(data=results, index=self.index,
columns=self.columns, copy=False)
# broadcasting
if self.broadcast:
return self.apply_broadcast()
# one axis empty
if not all(self.obj.shape):
return self.apply_empty_result()
# raw
if self.raw and not self.obj._is_mixed_type:
return self.apply_raw()
return self.apply_standard()
def apply_empty_result(self):
from pandas import Series
reduce = self.reduce
if reduce is None:
reduce = False
EMPTY_SERIES = Series([])
try:
r = self.f(EMPTY_SERIES, *self.args, **self.kwds)
reduce = not isinstance(r, Series)
except Exception:
pass
if reduce:
return Series(np.nan, index=self.agg_axis)
else:
return self.obj.copy()
def apply_raw(self):
try:
result = lib.reduce(self.values, self.f, axis=self.axis)
except Exception:
result = np.apply_along_axis(self.f, self.axis, self.values)
# TODO: mixed type case
from pandas import DataFrame, Series
if result.ndim == 2:
return DataFrame(result, index=self.index, columns=self.columns)
else:
return Series(result, index=self.agg_axis)
def apply_standard(self):
from pandas import Series
reduce = self.reduce
if reduce is None:
reduce = True
# try to reduce first (by default)
# this only matters if the reduction in values is of different dtype
# e.g. if we want to apply to a SparseFrame, then can't directly reduce
if reduce:
values = self.values
# we cannot reduce using non-numpy dtypes,
# as demonstrated in gh-12244
if not is_extension_type(values):
# Create a dummy Series from an empty array
index = self.obj._get_axis(self.axis)
empty_arr = np.empty(len(index), dtype=values.dtype)
dummy = Series(empty_arr, index=index, dtype=values.dtype)
try:
labels = self.agg_axis
result = lib.reduce(values, self.f,
axis=self.axis,
dummy=dummy,
labels=labels)
return Series(result, index=labels)
except Exception:
pass
# compute the result using the series generator
results, res_index, res_columns = self._apply_series_generator()
# wrap results
return self.wrap_results(results, res_index, res_columns)
def _apply_series_generator(self):
series_gen = self.series_generator
res_index = self.result_index
res_columns = self.result_columns
i = None
keys = []
results = {}
if self.ignore_failures:
successes = []
for i, v in enumerate(series_gen):
try:
results[i] = self.f(v)
keys.append(v.name)
successes.append(i)
except Exception:
pass
# so will work with MultiIndex
if len(successes) < len(res_index):
res_index = res_index.take(successes)
else:
try:
for i, v in enumerate(series_gen):
results[i] = self.f(v)
keys.append(v.name)
except Exception as e:
if hasattr(e, 'args'):
# make sure i is defined
if i is not None:
k = res_index[i]
e.args = e.args + ('occurred at index %s' %
pprint_thing(k), )
raise
return results, res_index, res_columns
def wrap_results(self, results, res_index, res_columns):
from pandas import Series
if len(results) > 0 and is_sequence(results[0]):
if not isinstance(results[0], Series):
index = res_columns
else:
index = None
result = self.obj._constructor(data=results, index=index)
result.columns = res_index
if self.axis == 1:
result = result.T
result = result._convert(
datetime=True, timedelta=True, copy=False)
else:
result = Series(results)
result.index = res_index
return result
def _apply_broadcast(self, target):
result_values = np.empty_like(target.values)
columns = target.columns
for i, col in enumerate(columns):
result_values[:, i] = self.f(target[col])
result = self.obj._constructor(result_values, index=target.index,
columns=target.columns)
return result
class FrameRowApply(FrameApply):
axis = 0
def get_result(self):
# dispatch to agg
if isinstance(self.f, (list, dict)):
return self.obj.aggregate(self.f, axis=self.axis,
*self.args, **self.kwds)
return super(FrameRowApply, self).get_result()
def apply_broadcast(self):
return self._apply_broadcast(self.obj)
@property
def series_generator(self):
return (self.obj._ixs(i, axis=1)
for i in range(len(self.columns)))
@property
def result_index(self):
return self.columns
@property
def result_columns(self):
return self.index
class FrameColumnApply(FrameApply):
axis = 1
def __init__(self, obj, func, broadcast, raw, reduce, args, kwds):
super(FrameColumnApply, self).__init__(obj, func, broadcast,
raw, reduce, args, kwds)
# skip if we are mixed datelike and trying reduce across axes
# GH6125
if self.reduce:
if self.obj._is_mixed_type and self.obj._is_datelike_mixed_type:
self.reduce = False
def apply_broadcast(self):
return self._apply_broadcast(self.obj.T).T
@property
def series_generator(self):
from pandas import Series
dtype = object if self.obj._is_mixed_type else None
return (Series._from_array(arr, index=self.columns, name=name,
dtype=dtype)
for i, (arr, name) in enumerate(zip(self.values,
self.index)))
@property
def result_index(self):
return self.index
@property
def result_columns(self):
return self.columns
|
bsd-3-clause
|
stack-of-tasks/sot-stabilizer
|
python/scripts/appli_two_hands_compensater.py
|
2
|
1608
|
# Launch it with py ../robotViewerLauncher.py +compensater.py +appli.py
import sys
import numpy as np
import matplotlib.pyplot as pl
import dynamic_graph as dg
import dynamic_graph.signal_base as dgsb
from math import sin
from dynamic_graph.sot.application.stabilizer.compensater import *
appli = HandCompensater(robot)
appli.withTraces()
if 'usingRobotViewer' in locals() and usingRobotViewer: refreshList.append(lambda: appli.updateDisplay()); appli.initDisplay(); go()
# --- SHORTCUTS
rm = appli.rm
push = appli.push
dyn = appli.robot.dynamic
sot = appli.sot
taskCom = appli.taskCom
taskChest = appli.taskChest
taskPosture = appli.taskPosture
taskRH = appli.taskRH
tr = appli.robot.tracer
t = optionalparentheses(appli.dumpTracer)
a = appli
s = optionalparentheses(appli.nextStep)
#### Flexibility Estimator ##
est= sotso.DGIMUFlexibilityEstimation('flextimator')
est.setSamplingPeriod(0.005)
meas = est.signal('measurement')
inputs = est.signal('input')
contactNbr = est.signal('contactNbr')
contactNbr.value = 1
contact1 = est.signal('contact1')
contact1.value = (0,0,0);
flex=est.signal('flexMatrixInverse')
flexdot = est.signal('flexInverseVelocityVector')
plug(flex,appli.ccMc)
plug(flexdot,appli.ccVc)
#appli.robot.addTrace( est.name,'flexInverseVelocityVector' )
#appli.robot.addTrace( est.name,'flexibility' )
#appli.robot.addTrace( est.name , 'simulatedSensors' )
meas.value = (0.0 , 0.0, 9.81 , 0.0 , 0.0 , 0.0)
inputs.value = (0.0, 0.0, 1.8, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0)
#stop()
|
lgpl-3.0
|
markr622/moose
|
framework/scripts/memory_logger.py
|
20
|
43940
|
#!/usr/bin/env python
from tempfile import TemporaryFile, SpooledTemporaryFile
import os, sys, re, socket, time, pickle, csv, uuid, subprocess, argparse, decimal, select, platform
class LLDB:
def __init__(self):
self.debugger = lldb.SBDebugger.Create()
self.command_interpreter = self.debugger.GetCommandInterpreter()
self.target = self.debugger.CreateTargetWithFileAndArch(None, None)
self.listener = lldb.SBListener("event_listener")
self.error = lldb.SBError()
def __del__(self):
lldb.SBDebugger.Destroy(self.debugger)
def _parseStackTrace(self, gibberish):
return gibberish
def _run_commands(self, commands):
tmp_text = ''
return_obj = lldb.SBCommandReturnObject()
for command in commands:
self.command_interpreter.HandleCommand(command, return_obj)
if return_obj.Succeeded():
if command == 'process status':
tmp_text += '\n########################################################\n## Process Status:\n##\n'
tmp_text += return_obj.GetOutput()
elif command == 'bt':
tmp_text += '\n########################################################\n## Backtrace:\n##\n'
tmp_text += return_obj.GetOutput()
return tmp_text
def getStackTrace(self, pid):
event = lldb.SBEvent()
lldb_results = ''
state = 0
attach_info = lldb.SBAttachInfo(int(pid))
process = self.target.Attach(attach_info, self.error)
process.GetBroadcaster().AddListener(self.listener, lldb.SBProcess.eBroadcastBitStateChanged)
done = False
while not done:
if self.listener.WaitForEvent(lldb.UINT32_MAX, event):
state = lldb.SBProcess.GetStateFromEvent(event)
if state == lldb.eStateExited:
done = True
elif state == lldb.eStateStopped:
lldb_results = self._run_commands(['process status', 'bt', 'cont'])
done = True
elif state == lldb.eStateRunning:
self._run_commands(['process interrupt'])
if state == lldb.eStateCrashed or state == lldb.eStateInvalid or state == lldb.eStateExited:
return 'Binary exited before sample could be taken'
time.sleep(0.03)
# Due to some strange race condition we have to wait until eState is running
# before we can pass the 'detach, quit' command. Why we can not do this all in
# one go... bug?
done = False
while not done:
if self.listener.WaitForEvent(lldb.UINT32_MAX, event):
state = lldb.SBProcess.GetStateFromEvent(event)
if state == lldb.eStateRunning:
self._run_commands(['detach', 'quit'])
done = True
if state == lldb.eStateCrashed or state == lldb.eStateInvalid or state == lldb.eStateExited:
return 'Binary exited before sample could be taken'
time.sleep(0.03)
return self._parseStackTrace(lldb_results)
class GDB:
def _parseStackTrace(self, gibberish):
not_gibberish = re.findall(r'\(gdb\) (#.*)\(gdb\)', gibberish, re.DOTALL)
if len(not_gibberish) != 0:
return not_gibberish[0]
else:
return 'Stack Trace failed:', gibberish
def _waitForResponse(self, wait=True):
while wait:
self.gdb_stdout.seek(self.last_position)
for line in self.gdb_stdout:
if line == '(gdb) ':
self.last_position = self.gdb_stdout.tell()
return True
time.sleep(0.05)
time.sleep(0.05)
return True
def getStackTrace(self, pid):
gdb_commands = [ 'attach ' + pid + '\n', 'set verbose off\n', 'thread\n', 'apply\n', 'all\n', 'bt\n', 'quit\n', 'y\n' ]
self.gdb_stdout = SpooledTemporaryFile()
self.last_position = 0
gdb_process = subprocess.Popen([which('gdb'), '-nx'], stdin=subprocess.PIPE, stdout=self.gdb_stdout, stderr=self.gdb_stdout)
while gdb_process.poll() == None:
for command in gdb_commands:
if command == gdb_commands[-1]:
gdb_commands = []
elif self._waitForResponse():
# I have seen GDB exit out from under us
try:
gdb_process.stdin.write(command)
except:
pass
self.gdb_stdout.seek(0)
stack_trace = self._parseStackTrace(self.gdb_stdout.read())
self.gdb_stdout.close()
return stack_trace
class Server:
def __init__(self, arguments):
self.arguments = arguments
self.arguments.cwd = os.getcwd()
# Test to see if we are starting as a server
if self.arguments.pbs == True:
if os.getenv('PBS_NODEFILE') != None:
# Initialize an agent, strictly for holding our stdout logs. Give it the UUID of 'server'
self.agent = Agent(self.arguments, 'server')
if self.arguments.recover:
self.logfile = WriteCSV(self.arguments.outfile[0], False)
else:
self.logfile = WriteCSV(self.arguments.outfile[0], True)
self.client_connections = []
self.startServer()
else:
print 'I could not find your PBS_NODEFILE. Is PBS loaded?'
sys.exit(1)
# If we are not a server, start the single client
else:
self.startClient()
def startServer(self):
# Setup the TCP socket
self.server_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.server_socket.bind((socket.gethostname(), 0))
self.server_socket.listen(5)
(self.host, self.port) = self.server_socket.getsockname()
# We will store all connections (sockets objects) made to the server in a list
self.client_connections.append(self.server_socket)
# Launch the actual binary we want to track
self._launchJob()
# Now launch all pbs agents
self._launchClients()
# This is a try so we can handle a keyboard ctrl-c
try:
# Continue to listen and accept active connections from agents
# until all agents report a STOP command.
AGENTS_ACTIVE = True
while AGENTS_ACTIVE:
read_sockets, write_sockets, error_sockets = select.select(self.client_connections,[],[])
for sock in read_sockets:
if sock == self.server_socket:
# Accept an incomming connection
self.client_connections.append(self.server_socket.accept()[0])
else:
# Deal with the data being sent to the server by its agents
self.handleAgent()
# Check to see if _all_ agents are telling the server to stop
agent_count = len(self.agent.agent_data.keys())
current_count = 0
for agent in self.agent.agent_data.keys():
if self.agent.agent_data[agent]['STOP']:
current_count += 1
# if All Agents have reported a STOP command, begin to exit
if current_count == agent_count:
AGENTS_ACTIVE = False
# Gotta get out of the for loop somehow...
break
# Sleep a bit before reading additional data
time.sleep(self.arguments.repeat_rate[-1])
# Close the server socket
self.server_socket.close()
# Close the logfile as the server is about to exit
self.logfile.close()
# Cancel server operations if ctrl-c was pressed
except KeyboardInterrupt:
print 'Canceled by user. Wrote log:', self.arguments.outfile[0]
sys.exit(0)
# Normal exiting procedures
print '\n\nAll agents have stopped. Log file saved to:', self.arguments.outfile[0]
sys.exit(0)
def startClient(self):
Client(self.arguments)
def _launchClients(self):
# Read the environment PBS_NODEFILE
self._PBS_NODEFILE = open(os.getenv('PBS_NODEFILE'), 'r')
nodes = set(self._PBS_NODEFILE.read().split())
# Print some useful information about our setup
print 'Memory Logger running on Host:', self.host, 'Port:', self.port, '\nNodes:', ', '.join(nodes), '\nSample rate (including stdout):', self.arguments.repeat_rate[-1], 's (use --repeat-rate to adjust)\nRemote agents delaying', self.arguments.pbs_delay[-1], 'second/s before tracking. (use --pbs-delay to adjust)\n'
# Build our command list based on the PBS_NODEFILE
command = []
for node in nodes:
command.append([ 'ssh', node,
'bash --login -c "source /etc/profile && ' \
+ 'sleep ' + str(self.arguments.pbs_delay[-1]) + ' && ' \
+ os.path.abspath(__file__) \
+ ' --call-back-host ' \
+ self.host + ' ' + str(self.port) \
+ '"'])
# remote into each node and execute another copy of memory_logger.py
# with a call back argument to recieve further instructions
for pbs_node in command:
subprocess.Popen(pbs_node, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
# Launch the binary we intend to track
def _launchJob(self):
subprocess.Popen(self.arguments.run[-1].split(), stdout=self.agent.log, stderr=self.agent.log)
# A connection has been made from client to server
# Capture that data, and determin what to do with it
def handleAgent(self):
# Loop through all client connections, and receive data if any
for agent_socket in self.client_connections:
# Completely ignore the server_socket object
if agent_socket == self.server_socket:
continue
# Assign an AgentConnector for the task of handling data between client and server
reporting_agent = AgentConnector(self.arguments, agent_socket)
# OK... get data from a client and begin
new_data = reporting_agent.readData()
if new_data != None:
# There should be only one dictionary key (were reading data from just one client at a time)
agent_uuid = new_data.keys()[0]
# Update our dictionary of an agents data
self.agent.agent_data[agent_uuid] = new_data[agent_uuid]
# Modify incoming Agents timestamp to match Server's time (because every node is a little bit off)
if self.arguments.recover:
self.agent.agent_data[agent_uuid]['TIMESTAMP'] = GetTime().now - self.agent.delta
else:
self.agent.agent_data[agent_uuid]['TIMESTAMP'] = GetTime().now
# update total usage for all known reporting agents
total_usage = 0
for one_agent in self.agent.agent_data.keys():
total_usage += self.agent.agent_data[one_agent]['MEMORY']
self.agent.agent_data[agent_uuid]['TOTAL'] = int(total_usage)
# Get any stdout thats happened thus far and apply it to what ever agent just sent us data
self.agent.agent_data[agent_uuid]['STDOUT'] = self.agent._getStdout()
# Write to our logfile
self.logfile.write(self.agent.agent_data[agent_uuid])
# Check for any agents sending a stop command. If we find one,
# set some zeroing values, and close that agent's socket.
if self.agent.agent_data[agent_uuid]['STOP']:
self.agent.agent_data[agent_uuid]['MEMORY'] = 0
agent_socket.close()
if agent_socket != self.server_socket:
self.client_connections.remove(agent_socket)
# Go ahead and set our server agent to STOP as well.
# The server will continue recording samples from agents
self.agent.agent_data['server']['STOP'] = True
# If an Agent has made a request for instructions, handle it here
update_client = False
if new_data[agent_uuid]['REQUEST'] != None:
for request in new_data[agent_uuid]['REQUEST'].iteritems():
if new_data[agent_uuid]['REQUEST'][request[0]] == '':
update_client = True
# We only support sending any arguments supplied to ther server, back to the agent
for request_type in dir(self.arguments):
if request[0] == str(request_type):
self.agent.agent_data[agent_uuid]['REQUEST'][request[0]] = getattr(self.arguments, request[0])
# If an Agent needed additional instructions, go ahead and re-send those instructions
if update_client:
reporting_agent.sendData(self.agent.agent_data[agent_uuid])
class Client:
def __init__(self, arguments):
self.arguments = arguments
# Initialize an Agent with a UUID based on our hostname
self.my_agent = Agent(arguments, str(uuid.uuid3(uuid.NAMESPACE_DNS, socket.gethostname())))
# Initialize an AgentConnector
self.remote_server = AgentConnector(self.arguments)
# If client will talk to a server (PBS)
if self.arguments.call_back_host:
# We know by initializing an agent, agent_data contains the necessary message asking for further instructions
self.my_agent.agent_data[self.my_agent.my_uuid] = self.remote_server.sendData(self.my_agent.agent_data)
# Apply new instructions received from server (this basically updates our arguments)
for request in self.my_agent.agent_data[self.my_agent.my_uuid]['REQUEST'].iteritems():
for request_type in dir(self.arguments):
if request[0] == str(request_type):
setattr(self.arguments, request[0], request[1])
# Requests have been satisfied, set to None
self.my_agent.agent_data[self.my_agent.my_uuid]['REQUEST'] = None
# Change to the same directory as the server was when initiated (needed for PBS stuff)
os.chdir(self.arguments.cwd)
# Client will not be talking to a server, save data to a file instead
else:
# Deal with --recover
if self.arguments.recover:
# Do not overwrite the file
self.logfile = WriteCSV(self.arguments.outfile[0], False)
else:
# Overwrite the file
self.logfile = WriteCSV(self.arguments.outfile[0], True)
# Lets begin!
self.startProcess()
# This function handles the starting and stoping of the sampler process.
# We loop until an agent returns a stop command.
def startProcess(self):
AGENTS_ACTIVE = True
# If we know we are the only client, go ahead and start the process we want to track.
if self.arguments.call_back_host == None:
subprocess.Popen(self.arguments.run[-1].split(), stdout=self.my_agent.log, stderr=self.my_agent.log)
# Delay just a bit to keep from recording a possible zero memory usage as the binary starts up
time.sleep(self.arguments.sample_delay[0])
# This is a try so we can handle a keyboard ctrl-c
try:
# Continue to process data until an Agent reports a STOP command
while AGENTS_ACTIVE:
# Take a sample
current_data = self.my_agent.takeSample()
# Handle the data supplied by the Agent.
self._handleData(current_data)
# If an Agent reported a STOP command, go ahead and begin the shutdown phase
if current_data[current_data.keys()[0]]['STOP']:
AGENTS_ACTIVE = False
# Sleep just a bit between samples, as to not saturate the machine
time.sleep(self.arguments.repeat_rate[-1])
# An agent reported a stop command... so let everyone know where the log was saved, and exit!
if self.arguments.call_back_host == None:
print 'Binary has exited. Wrote log:', self.arguments.outfile[0]
# Cancel server operations if ctrl-c was pressed
except KeyboardInterrupt:
self.logfile.close()
print 'Canceled by user. Wrote log:', self.arguments.outfile[0]
sys.exit(0)
# Everything went smooth.
sys.exit(0)
# Figure out what to do with the sampled data
def _handleData(self, data):
# Sending the sampled data to a server
if self.arguments.call_back_host:
self.remote_server.sendData(data)
# Saving the sampled data to a file
else:
# Compute the TOTAL memory usage to be how much our one agent reported
# Because were the only client doing any work
data[self.my_agent.my_uuid]['TOTAL'] = data[self.my_agent.my_uuid]['MEMORY']
self.logfile.write(data[self.my_agent.my_uuid])
# If the agent has been told to stop, close the database file
if self.my_agent.agent_data[self.my_agent.my_uuid]['STOP'] == True:
self.logfile.close()
class AgentConnector:
"""
Functions used to communicate to and from Client and Server.
Both Client and Server classes use this object.
readData()
sendData('message', socket_connection=None)
if sendData's socket_connection is None, it will create a new connection to the server
based on supplied arguments
"""
def __init__(self, arguments, connection=None):
self.arguments = arguments
self.connection = connection
self.CREATED_CONNECTION = False
# If the connection is None, meaning this object was instanced by a client,
# we must create a connection to the server first
if self.connection == None and self.arguments.call_back_host != None:
self.CREATED_CONNECTION = True
self.connection = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.connection.settimeout(15)
self.connection.connect((self.arguments.call_back_host[0], int(self.arguments.call_back_host[1])))
# read all data sent by an agent
def readData(self):
# Get how much data there is to receive
# The first eight bytes is our data length
data_width = int(self.connection.recv(8))
tmp_received = ''
# We need to receive precisely the ammount of data the
# client is trying to send us.
while len(tmp_received) < data_width:
if data_width - len(tmp_received) > 1024:
tmp_received += self.connection.recv(1024)
else:
tmp_received += self.connection.recv(data_width - (len(tmp_received)))
# unpickle the received message
return self._unpickleMessage(tmp_received)
# send data to an agent
def sendData(self, message):
# pickle the data up, and send the message
self.connection.sendall(self._pickleMessage(message))
# If we had to create the socket (connection was none), and this client/agent is requesting
# instructions, go ahead and read the data that _better be there_ sent to us by the server.
if self.CREATED_CONNECTION and message[message.keys()[0]]['REQUEST'] != None:
return self.readData()
# The following two functions pickle up the data for easy socket transport
def _pickleMessage(self, message):
t = TemporaryFile()
pickle.dump(message, t)
t.seek(0)
str_msg = t.read()
str_len = len(str_msg)
message = "%-8d" % (str_len,) + str_msg
return message
def _unpickleMessage(self, message):
t = TemporaryFile()
t.write(message)
t.seek(0)
try:
return pickle.load(t)
except KeyError:
print 'Socket data was not pickled data: ', message
except:
raise
class WriteCSV:
def __init__(self, logfile, overwrite):
if overwrite:
self.file_object = open(logfile, 'w', 1)
else:
self.file_object = open(logfile, 'a', 1)
csv.field_size_limit(sys.maxsize)
self.log_file = csv.writer(self.file_object, delimiter=',', quotechar='|', escapechar='\\', quoting=csv.QUOTE_MINIMAL)
# Close the logfile
def close(self):
self.file_object.close()
# Write a CSV row
def write(self, data):
formatted_string = self._formatString(data)
self.log_file.writerow(formatted_string)
# Format the CSV output
def _formatString(self, data):
# We will be saving this data in CSV format. Before we do, lets format it a bit here
format_order = ['TIMESTAMP', 'TOTAL', 'STDOUT', 'STACK', 'HOSTNAME', 'MEMORY']
formatted_text = []
for item in format_order:
# We have to handle python's way of formatting floats to strings specially
if item == 'TIMESTAMP':
formatted_text.append('%.6f' % data[item])
else:
formatted_text.append(data[item])
return formatted_text
class Agent:
"""
Each agent object contains its own sampled log data. The Agent class is responsible for
collecting and storing data. machine_id is used to identify the agent.
machine_id is supplied by the client class. This allows for multiple agents if desired
"""
def __init__(self, arguments, machine_id):
self.arguments = arguments
self.my_uuid = machine_id
self.track_process = ''
# This log object is for stdout purposes
self.log = TemporaryFile()
self.log_position = 0
# Discover if --recover is being used. If so, we need to obtain the
# timestamp of the last entry in the outfile log... a little bulky
# to do... and not a very good place to do it.
if self.arguments.recover:
if os.path.exists(self.arguments.outfile[-1]):
memory_list = []
history_file = open(self.arguments.outfile[-1], 'r')
csv.field_size_limit(sys.maxsize)
reader = csv.reader(history_file, delimiter=',', quotechar='|', escapechar='\\', quoting=csv.QUOTE_MINIMAL)
# Get last item in list. Unfortunately, no way to do this until
# we have read the entire file...? Lucky for us, most memory log
# files are in the single digit megabytes
for row in reader:
memory_list.append(row)
history_file.close()
last_entry = float(memory_list[-1][0]) + self.arguments.repeat_rate[-1]
self.delta = (GetTime().now - last_entry)
else:
print 'Recovery options detected, but I could not find your previous memory log file.'
sys.exit(1)
else:
self.delta = 0
# Create the dictionary to which all sampled data will be stored
# NOTE: REQUEST dictionary items are instructions (arguments) we will
# ask the server to provide (if we are running with --pbs)
# Simply add them here. We _can not_ make the arguments match the
# server exactly, this would cause every agent launched to perform
# like a server... bad stuff
# Example: We added repeat_rate (see dictionary below). Now every
# agent would update their repeat_rate according to what the user
# supplied as an argument (--repeat_rate 0.02)
self.agent_data = { self.my_uuid :
{ 'HOSTNAME' : socket.gethostname(),
'STDOUT' : '',
'STACK' : '',
'MEMORY' : 0,
'TIMESTAMP' : GetTime().now - self.delta,
'REQUEST' : { 'run' : '',
'pstack' : '',
'repeat_rate' : '',
'cwd' : ''},
'STOP' : False,
'TOTAL' : 0,
'DEBUG_LOG' : ''
}
}
# NOTE: This is the only function that should be called in this class
def takeSample(self):
if self.arguments.pstack:
self.agent_data[self.my_uuid]['STACK'] = self._getStack()
# Always do the following
self.agent_data[self.my_uuid]['MEMORY'] = self._getMemory()
self.agent_data[self.my_uuid]['STDOUT'] = self._getStdout()
if self.arguments.recover:
self.agent_data[self.my_uuid]['TIMESTAMP'] = GetTime().now - self.delta
else:
self.agent_data[self.my_uuid]['TIMESTAMP'] = GetTime().now
# Return the data to whom ever asked for it
return self.agent_data
def _getStdout(self):
self.log.seek(self.log_position)
output = self.log.read()
self.log_position = self.log.tell()
sys.stdout.write(output)
return output
def _getMemory(self):
tmp_pids = self._getPIDs()
memory_usage = 0
if tmp_pids != {}:
for single_pid in tmp_pids.iteritems():
memory_usage += int(single_pid[1][0])
if memory_usage == 0:
# Memory usage hit zero? Then assume the binary being tracked has exited. So lets begin doing the same.
self.agent_data[self.my_uuid]['DEBUG_LOG'] = 'I found the total memory usage of all my processes hit 0. Stoping'
self.agent_data[self.my_uuid]['STOP'] = True
return 0
return int(memory_usage)
# No binay even detected? Lets assume it exited, so we should begin doing the same.
self.agent_data[self.my_uuid]['STOP'] = True
self.agent_data[self.my_uuid]['DEBUG_LOG'] = 'I found no processes running. Stopping'
return 0
def _getStack(self):
if self._darwin() == True:
stack_trace = LLDB()
else:
stack_trace = GDB()
tmp_pids = self._getPIDs()
if tmp_pids != {}:
last_pid = sorted([x for x in tmp_pids.keys()])[-1]
return stack_trace.getStackTrace(str(last_pid))
else:
return ''
def _getPIDs(self):
pid_list = {}
# Determin the binary to sample and store it. Doing the findCommand is a little expensive.
if self.track_process == '':
self.track_process = self._findCommand(''.join(self.arguments.run))
# A quick way to safely check for the avilability of needed tools
self._verifyCommand(['ps'])
# If we are tracking a binary
if self.arguments.run:
command = [which('ps'), '-e', '-o', 'pid,rss,user,args']
tmp_proc = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
all_pids = tmp_proc.communicate()[0].split('\n')
# Figure out what we are allowed to track (strip away mpiexec, processes not owned by us, etc)
for single_pid in all_pids:
if single_pid.find(self.track_process) != -1 and \
single_pid.find(__file__) == -1 and \
single_pid.find('mpirun') == -1 and \
single_pid.find(os.getenv('USER')) != -1 and \
single_pid.find('mpiexec') == -1:
pid_list[int(single_pid.split()[0])] = []
pid_list[int(single_pid.split()[0])].extend([single_pid.split()[1], single_pid.split()[3]])
return pid_list
def _verifyCommand(self, command_list):
for command in command_list:
if which(command) == None:
print 'Command not found:', command
sys.exit(1)
# determine if we are running on a darwin kernel
def _darwin(self):
if platform.platform(0, 1).split('-')[:-1][0].find('Darwin') != -1:
return True
# Determine the command we are going to track
# A few things are happening here; first we strip off any MPI commands
# we then loop through the remaining items until we find a matching path
# exp: mpiexec -n 12 ../../../moose_test-opt -i simple_diffusion.i -r 6
# would first strip off mpiexec, check for the presence of -n in our
# current directory, then 12, then ../../../moose_test-opt <- found. It would
# stop and return the base name (moose_test-opt).
def _findCommand(self, command):
if command.find('mpiexec') == 0 or command.find('mpirun') == 0:
for binary in command.split():
if os.path.exists(binary):
return os.path.split(binary)[1]
elif os.path.exists(command.split()[0]):
return os.path.split(command.split()[0])[1]
class GetTime:
"""A simple formatted time object.
"""
def __init__(self, posix_time=None):
import datetime
if posix_time == None:
self.posix_time = datetime.datetime.now()
else:
self.posix_time = datetime.datetime.fromtimestamp(posix_time)
self.now = float(datetime.datetime.now().strftime('%s.%f'))
self.microsecond = self.posix_time.microsecond
self.second = self.posix_time.second
self.minute = self.posix_time.strftime('%M')
self.hour = self.posix_time.strftime('%H')
self.day = self.posix_time.strftime('%d')
self.month = self.posix_time.strftime('%m')
self.year = self.posix_time.year
self.dayname = self.posix_time.strftime('%a')
self.monthname = self.posix_time.strftime('%b')
class MemoryPlotter:
def __init__(self, arguments):
self.arguments = arguments
self.buildGraph()
def buildPlots(self):
plot_dictionary = {}
for log in self.arguments.plot:
memory_list = []
if os.path.exists(log):
log_file = open(log, 'r')
csv.field_size_limit(sys.maxsize)
reader = csv.reader(log_file, delimiter=',', quotechar='|', escapechar='\\', quoting=csv.QUOTE_MINIMAL)
for row in reader:
memory_list.append(row)
log_file.close()
plot_dictionary[log.split('/')[-1:][0]] = memory_list
else:
print 'log not found:', log
sys.exit(1)
return plot_dictionary
def buildGraph(self):
try:
import matplotlib.pyplot as plt
except ImportError:
print 'Error importing matplotlib. Matplotlib not available on this system?'
sys.exit(1)
plot_dictionary = self.buildPlots()
fig = plt.figure()
plot_list = []
tmp_plot = []
tmp_legend = []
self.stdout_msgs = {}
self.pstack_msgs = {}
self.multiples = 1
self.memory_label = 'Memory in Bytes'
# Try and calculate memory sizes, so we can move annotations around a bit more accurately
largest_memory = []
for plot_name, value_list in plot_dictionary.iteritems():
for records in value_list:
largest_memory.append(int(records[1]))
largest_memory.sort()
# Determine the scale of the graph
suffixes = ["Terabytes", "Gigabytes", "Megabytes", "Kilobytes", "Bytes"]
multiplier = 1 << 40;
index = 0
while largest_memory[-1] < multiplier and multiplier >= 1:
multiplier = multiplier >> 10
index = index + 1
self.multiples = multiplier
self.memory_label = "Memory in " + suffixes[index-1]
# Loop through each log file
for plot_name, value_list in plot_dictionary.iteritems():
plot_list.append(fig.add_subplot(111))
tmp_memory = []
tmp_time = []
tmp_stdout_x = []
tmp_stdout_y = []
tmp_pstack_x = []
tmp_pstack_y = []
stdout_msg = []
pstack_msg = []
# Get the start time, and make this 0
try:
tmp_zero = decimal.Decimal(value_list[0][0])
except:
print 'Could not parse log file:', plot_name, 'is this a valid memory_logger file?'
sys.exit(1)
# Populate the graph
for records in value_list:
tmp_memory.append(decimal.Decimal(records[1]) / self.multiples)
tmp_time.append(str(decimal.Decimal(records[0]) - tmp_zero))
if len(records[2]) > 0 and self.arguments.stdout:
tmp_stdout_x.append(tmp_time[-1])
tmp_stdout_y.append(tmp_memory[-1])
stdout_msg.append(records[2])
if len(records[3]) > 0 and self.arguments.pstack:
tmp_pstack_x.append(tmp_time[-1])
tmp_pstack_y.append(tmp_memory[-1])
pstack_msg.append(records[3])
# Do the actual plotting:
f, = plot_list[-1].plot(tmp_time, tmp_memory)
tmp_plot.append(f)
tmp_legend.append(plot_name)
plot_list[-1].grid(True)
plot_list[-1].set_ylabel(self.memory_label)
plot_list[-1].set_xlabel('Time in Seconds')
# Plot annotations
if self.arguments.stdout:
stdout_line, = plot_list[-1].plot(tmp_stdout_x, tmp_stdout_y, 'x', picker=10, color=f.get_color())
next_index = str(len(plot_list))
stdout_line.set_gid('stdout' + next_index)
self.stdout_msgs[next_index] = stdout_msg
self.buildAnnotation(plot_list[-1], tmp_stdout_x, tmp_stdout_y, stdout_msg, f.get_color())
if self.arguments.pstack:
pstack_line, = plot_list[-1].plot(tmp_pstack_x, tmp_pstack_y, 'o', picker=10, color=f.get_color())
next_index = str(len(plot_list))
pstack_line.set_gid('pstack' + next_index)
self.pstack_msgs[next_index] = pstack_msg
# Make points clickable
fig.canvas.mpl_connect('pick_event', self)
# Create legend
plt.legend(tmp_plot, tmp_legend, loc = 2)
plt.show()
def __call__(self, event):
color_codes = {'RESET':'\033[0m', 'r':'\033[31m','g':'\033[32m','c':'\033[36m','y':'\033[33m', 'b':'\033[34m', 'm':'\033[35m', 'k':'\033[0m', 'w':'\033[0m' }
line = event.artist
ind = event.ind
name = line.get_gid()[:-1]
index = line.get_gid()[-1]
if self.arguments.stdout and name == 'stdout':
if self.arguments.no_color != False:
print color_codes[line.get_color()]
print "stdout -----------------------------------------------------\n"
for id in ind:
print self.stdout_msgs[index][id]
if self.arguments.no_color != False:
print color_codes['RESET']
if self.arguments.pstack and name == 'pstack':
if self.arguments.no_color != False:
print color_codes[line.get_color()]
print "pstack -----------------------------------------------------\n"
for id in ind:
print self.pstack_msgs[index][id]
if self.arguments.no_color != False:
print color_codes['RESET']
def buildAnnotation(self,fig,x,y,msg,c):
for i in range(len(x)):
fig.annotate(str(msg[i].split('\n')[0][:self.arguments.trim_text[-1]]),
xy=(x[i], y[i]),
rotation=self.arguments.rotate_text[-1],
xytext=(decimal.Decimal(x[i]) + decimal.Decimal(self.arguments.move_text[0]), decimal.Decimal(y[i]) + decimal.Decimal(self.arguments.move_text[1])),
color=c, horizontalalignment='center', verticalalignment='bottom',
arrowprops=dict(arrowstyle="->",
connectionstyle="arc3,rad=0.5",
color=c
)
)
class ReadLog:
"""Read a memory_logger log file, and display the results to stdout in an easy to read form.
"""
def __init__(self, arguments):
self.arguments = arguments
history_file = open(self.arguments.read[-1], 'r')
reader = csv.reader(history_file, delimiter=',', quotechar='|', escapechar='\\', quoting=csv.QUOTE_MINIMAL)
self.memory_list = []
for row in reader:
self.memory_list.append(row)
history_file.close()
self.sorted_list = []
self.mem_list = []
self.use_nodes = False
self.printHistory()
def printHistory(self):
RESET = '\033[0m'
BOLD = '\033[1m'
BLACK = '\033[30m'
RED = '\033[31m'
GREEN = '\033[32m'
CYAN = '\033[36m'
YELLOW = '\033[33m'
last_memory = 0.0
(terminal_width, terminal_height) = self.getTerminalSize()
for timestamp in self.memory_list:
to = GetTime(float(timestamp[0]))
total_memory = int(timestamp[1])
log = timestamp[2].split('\n')
pstack = timestamp[3].split('\n')
node_name = str(timestamp[4])
node_memory = int(timestamp[5])
self.mem_list.append(total_memory)
self.sorted_list.append([str(to.day) + ' ' + str(to.monthname) + ' ' + str(to.hour) + ':' + str(to.minute) + ':' + '{:02.0f}'.format(to.second) + '.' + '{:06.0f}'.format(to.microsecond), total_memory, log, pstack, node_name, node_memory])
largest_memory = decimal.Decimal(max(self.mem_list))
if len(set([x[4] for x in self.sorted_list])) > 1:
self.use_nodes = True
print 'Date Stamp' + ' '*int(17) + 'Memory Usage | Percent of MAX memory used: ( ' + str('{:0,.0f}'.format(largest_memory)) + ' K )'
for item in self.sorted_list:
tmp_str = ''
if decimal.Decimal(item[1]) == largest_memory:
tmp_str = self.formatText(largest_memory, item[0], item[1], item[5], item[2], item[3], item[4], RESET, terminal_width)
elif item[1] > last_memory:
tmp_str = self.formatText(largest_memory, item[0], item[1], item[5], item[2], item[3], item[4], RED, terminal_width)
elif item[1] == last_memory:
tmp_str = self.formatText(largest_memory, item[0], item[1], item[5], item[2], item[3], item[4], CYAN, terminal_width)
else:
tmp_str = self.formatText(largest_memory, item[0], item[1], item[5], item[2], item[3], item[4], GREEN, terminal_width)
last_memory = item[1]
sys.stdout.write(tmp_str)
print 'Date Stamp' + ' '*int(17) + 'Memory Usage | Percent of MAX memory used: ( ' + str('{:0,.0f}'.format(largest_memory)) + ' K )'
def formatText(self, largest_memory, date, total_memory, node_memory, log, pstack, reporting_host, color_code, terminal_width):
RESET = '\033[0m'
if decimal.Decimal(total_memory) == largest_memory:
percent = '100'
elif (decimal.Decimal(total_memory) / largest_memory) == 0:
percent = '0'
else:
percent = str(decimal.Decimal(total_memory) / largest_memory)[2:4] + '.' + str(decimal.Decimal(total_memory) / largest_memory)[4:6]
header = len(date) + 18
footer = len(percent) + 6
additional_correction = 0
max_length = decimal.Decimal(terminal_width - header) / largest_memory
total_position = total_memory * decimal.Decimal(max_length)
node_position = node_memory * decimal.Decimal(max_length)
tmp_log = ''
if self.arguments.stdout:
for single_log in log:
if single_log != '':
tmp_log += ' '*(header - len(' stdout |')) + ' stdout | ' + single_log + '\n'
if self.arguments.pstack:
for single_pstack in pstack:
if single_pstack != '':
tmp_log += ' '*(header - len(' pstack |')) + ' pstack | ' + single_pstack + '\n'
if self.arguments.separate and self.use_nodes != False:
message = '< ' + RESET + reporting_host + ' - ' + '{:10,.0f}'.format(node_memory) + ' K' + color_code + ' >'
additional_correction = len(RESET) + len(color_code)
elif self.use_nodes:
message = '< >'
else:
node_position = 0
message = ''
return date + '{:15,.0f}'.format(total_memory) + ' K | ' + color_code + '-'*int(node_position) + message + '-'*(int(total_position) - (int(node_position) + ((len(message) - additional_correction) + footer))) + RESET + '| ' + percent + '%\n' + tmp_log
def getTerminalSize(self):
"""Quicky to get terminal window size"""
env = os.environ
def ioctl_GWINSZ(fd):
try:
import fcntl, termios, struct, os
cr = struct.unpack('hh', fcntl.ioctl(fd, termios.TIOCGWINSZ, '1234'))
except:
return None
return cr
cr = ioctl_GWINSZ(0) or ioctl_GWINSZ(1) or ioctl_GWINSZ(2)
if not cr:
try:
fd = os.open(os.ctermid(), os.O_RDONLY)
cr = ioctl_GWINSZ(fd)
os.close(fd)
except:
pass
if not cr:
try:
cr = (env['LINES'], env['COLUMNS'])
except:
cr = (25, 80)
return int(cr[1]), int(cr[0])
# A simple which function to return path to program
def which(program):
def is_exe(fpath):
return os.path.exists(fpath) and os.access(fpath, os.X_OK)
fpath, fname = os.path.split(program)
if fpath:
if is_exe(program):
return program
else:
for path in os.environ["PATH"].split(os.pathsep):
exe_file = os.path.join(path, program)
if is_exe(exe_file):
return exe_file
print 'I could not find the following binary:', program
sys.exit(1)
def verifyArgs(args):
option_count = 0
if args.read:
option_count += 1
if args.run:
option_count += 1
if args.plot:
option_count += 1
if option_count != 1 and args.pbs != True:
if args.call_back_host == None:
print 'You must use one of the following: run, read, or plot'
sys.exit(1)
args.cwd = os.getcwd()
# Work with --recover (a MOOSE application specific option)
args.recover = False
if args.run:
if args.run[0].find('--recover') != -1:
args.recover = True
if args.outfile == None and args.run:
# Attempt to build the output file based on input file
if re.findall(r'-i (\w+)', args.run[0]) != []:
args.outfile = [os.getcwd() + '/' + re.findall(r'-i (\w+)', args.run[0])[0] + '_memory.log']
else:
args.outfile = [os.getcwd() + '/' + args.run[0].replace('..', '').replace('/', '').replace(' ', '_') + '.log']
if args.pstack:
if platform.platform(0, 1).split('-')[:-1][0].find('Darwin') != -1:
try:
import lldb
except ImportError:
print 'Unable to import lldb. The lldb API is now supplied by \nXcode but not automatically set in your PYTHONPATH. \nPlease search the internet for how to do this if you \nwish to use --pstack on Mac OS X.\n\nNote: If you installed Xcode to the default location of \n/Applications, you should only have to perform the following:\n\n\texport PYTHONPATH=/Applications/Xcode.app/Contents/SharedFrameworks/LLDB.framework/Resources/Python:$PYTHONPATH\n'
sys.exit(1)
else:
results = which('gdb')
return args
def parseArguments(args=None):
parser = argparse.ArgumentParser(description='Track and Display memory usage')
rungroup = parser.add_argument_group('Tracking', 'The following options control how the memory logger tracks memory usage')
rungroup.add_argument('--run', nargs=1, metavar='command', help='Run specified command. You must encapsulate the command in quotes\n ')
rungroup.add_argument('--pbs', dest='pbs', metavar='', action='store_const', const=True, default=False, help='Instruct memory logger to tally all launches on all nodes\n ')
rungroup.add_argument('--pbs-delay', dest='pbs_delay', metavar='float', nargs=1, type=float, default=[1.0], help='For larger jobs, you may need to increase the delay as to when the memory_logger will launch the tracking agents\n ')
rungroup.add_argument('--sample-delay', dest='sample_delay', metavar='float', nargs=1, type=float, default=[0.25], help='The time to delay before taking the first sample (when not using pbs)')
rungroup.add_argument('--repeat-rate', nargs=1, metavar='float', type=float, default=[0.25], help='Indicate the sleep delay in float seconds to check memory usage (default 0.25 seconds)\n ')
rungroup.add_argument('--outfile', nargs=1, metavar='file', help='Save log to specified file. (Defaults based on run command)\n ')
readgroup = parser.add_argument_group('Read / Display', 'Options to manipulate or read log files created by the memory_logger')
readgroup.add_argument('--read', nargs=1, metavar='file', help='Read a specified memory log file to stdout\n ')
readgroup.add_argument('--separate', dest='separate', action='store_const', const=True, default=False, help='Display individual node memory usage (read mode only)\n ')
readgroup.add_argument('--plot', nargs="+", metavar='file', help='Display a graphical representation of memory usage (Requires Matplotlib). Specify a single file or a list of files to plot\n ')
commongroup = parser.add_argument_group('Common Options', 'The following options can be used when displaying the results')
commongroup.add_argument('--pstack', dest='pstack', action='store_const', const=True, default=False, help='Display/Record stack trace information (if available)\n ')
commongroup.add_argument('--stdout', dest='stdout', action='store_const', const=True, default=False, help='Display stdout information\n ')
plotgroup = parser.add_argument_group('Plot Options', 'Additional options when using --plot')
plotgroup.add_argument('--rotate-text', nargs=1, metavar='int', type=int, default=[30], help='Rotate stdout/pstack text by this ammount (default 30)\n ')
plotgroup.add_argument('--move-text', nargs=2, metavar='int', default=['0', '0'], help='Move text X and Y by this ammount (default 0 0)\n ')
plotgroup.add_argument('--trim-text', nargs=1, metavar='int', type=int, default=[15], help='Display this many characters in stdout/pstack (default 15)\n ')
plotgroup.add_argument('--no-color', dest='no_color', metavar='', action='store_const', const=False, help='When printing output to stdout do not use color codes\n ')
internalgroup = parser.add_argument_group('Internal PBS Options', 'The following options are used to control how memory_logger as a tracking agent connects back to the caller. These are set automatically when using PBS and can be ignored.')
internalgroup.add_argument('--call-back-host', nargs=2, help='Server hostname and port that launched memory_logger\n ')
return verifyArgs(parser.parse_args(args))
if __name__ == '__main__':
args = parseArguments()
if args.read:
ReadLog(args)
sys.exit(0)
if args.plot:
MemoryPlotter(args)
sys.exit(0)
Server(args)
|
lgpl-2.1
|
phaustin/pyman
|
Book/chap8/sources/FitA.py
|
3
|
11175
|
"""Class for fitting data to y = a + bx"""
import numpy as np
import sys
class FitLin:
def __init__(self, x, y, dy=1.0):
if isinstance(x, np.ndarray)==False:
x = np.array(x)
if isinstance(y, np.ndarray)==False:
y = np.array(y)
if isinstance(dy, np.ndarray)==False:
dy = np.array(dy)
if x.size != y.size:
print("ArrayDiffError: Data arrays x & y must have the same size")
sys.exit()
if x.size <= 2:
print("ArraySizeError: Data arrays must have more than 2 elements")
sys.exit()
if dy.size != x.size: # no weighting if dy array not supplied
self.dy = np.ones(x.size)
self.weighting = False
print("NoWeightsWarning: Using uniform unweighted fitting")
else: # use dy for weighting data if supplied
self.dy = dy
self.weighting = True
self.x = x
self.y = y
var = self.dy*self.dy
self.s = (1./var).sum()
self.sx = (x/var).sum()
self.sy = (y/var).sum()
self.sxx = (x*x/var).sum()
self.sxy = (x*y/var).sum()
self.norm = self.s*self.sxx - self.sx*self.sx
self.yint = (self.sxx*self.sy-self.sx*self.sxy)/self.norm
self.slope = (self.s*self.sxy - self.sx*self.sy)/self.norm
def LinearFit(self):
return self.yint, self.slope
def DeltaFitParams(self):
if self.weighting == True:
delta_yint = np.sqrt(self.sxx / self.norm)
delta_slope = np.sqrt(self.s / self.norm)
else:
delta_yint = np.float('NaN')
delta_slope = np.float('nan')
return delta_yint, delta_slope
def Cov(self):
return -self.sx/self.norm
def Residuals(self):
try:
return self.residuals
except AttributeError:
self.residuals = self.y - (self.yint + self.slope*self.x)
return self.residuals
def RedChiSqr(self):
try:
chisq = ((self.residuals/self.dy)**2).sum()
except AttributeError:
self.residuals = self.y - (self.yint + self.slope*self.x)
chisq = ((self.residuals/self.dy)**2).sum()
return chisq/float(self.x.size-2)
def fitlin(x, y, dy=1.0):
"""
fitlin(x, y, dy=1.0)
Use linear least squares method to fit linear function f=a+bx to data
Parameters
----------
x : one dimensional array of x data with n>2 data points
y : one dimensional array of y data with n>2 data points
dy : one dimensional array of uncertainties (errors) in y data
Set equal to 1 for all data points if not specified
Returns
-------
fit : (a,b) tuple of the best fit model parameters a (the y-intercept)
and b (the slope) for the input data arrays x, y
unc : (da,db) tuple of the estimated uncertainties (square root of the
variance) of the best fit model parameters a and b.
redchisq : reduced value of chi-squared goodness of fit parameter
chisq = sum_i^n [(y_i-(a+bx_i)]^2/dy^2. redchisq = chisq/(n-2) where
n is the number of data points in x or y.
cov : covarience of the fitting parameters a and b.
residuals : ndarray of length n of the differences y-f between y-data and
the fitted data f
Raises
------
ArrayDiffError : If input arrays x and y have difference sizes
ArraySizeError : If input arrays have 2 or fewer elements
NoWeightsWarning : If dy is left as an unspecified scaler equal to 1.0
or if it is an array not equal in size to x and y
Examples
--------
Fit a line, "y = a + bx", through some noisy data-points:
>>> x = np.array([0, 1, 2, 3])
>>> y = np.array([-1, 0.2, 0.9, 2.1])
>>> dy = np.array([0.18, 0.13, 0.15, 0.17])
Fitting the x and y data without the dy array of uncertainties in y data
>>> fit, dfit, redchisq, cov, residuals = fitlin(x, y)
>>> print("a = {0:0.2f}\nb = {1:0.2f}".format(fit[0], fit[1]))
a = -0.95
b = 1.00
No estimate of the uncertainties in the fitted values of a and b are
possible without supplying the dy array of uncertainties, so fitlin returns
"nan" (not a number) for the uncertainties da and db.
>>> print("da = {0:0.2f}\ndb = {1:0.2f}".format(dfit[0], dfit[1]))
da = nan
db = nan
When no estimate of the uncertainties is supplied, dy=1.0 and
np.sqrt(redchisq) gives the standard deviation of the y-data from the
fitted values of the y-data.
>>> print("Estimated uncertainties = {0:0.2f}".format(np.sqrt(redchisq)))
Estimated uncertainties = 0.16
The residuals give the differences between the y-data and the fitted values
of the y-data
>>> print(residuals)
array([-0.05 0.15 -0.15 0.05])
--------
Fitting the x and y data _with_ the dy array of uncertainties in y data
>>> fit, dfit, redchisq, cov, residuals = fitlin(x, y, dy)
>>> print("a = {0:0.2f}\nb = {1:0.2f}".format(fit[0], fit[1]))
a = -0.91
b = 0.98
Estimates of the uncertainties in the fitted values of a and b given by da
and db when the dy array of uncertainties is supplied.
>>> print("da = {0:0.2f}\ndb = {1:0.2f}".format(dfit[0], dfit[1]))
da = 0.14
db = 0.08
When the array of uncertainties dy is supplied, redchisq gives the reduced
value of the goodness-of-fit parameter chi-squared. A redchisq value near
1 indicates the fit is believable, provided the dy estimates are valid.
>>> print(redchisq)
1.20731129496665
The covariance divided by the product of the uncertainties of the fitted
parameters gives the coefficient of correlation r, a number between -1 and 1
that tells whether errors in the fitting parameters a and b are likely to
have the same sign (r>0) or opposite signs (r<0)
>>> print(cov/(dfit[0]*dfit[1]))
-0.827152541117
The residuals give the differences between the y-data and the fitted values
of the y-data
>>> print(residuals)
array([-0.08856653, 0.12781099, -0.1558115 , 0.06056602])
>>> import matplotlib.pyplot as plt
>>> plt.errorbar(x, y, yerr=dy, fmt='o', label='data')
>>> plt.plot(x, fit[0] + fit[1]*x, 'r', label='Fitted line')
>>> plt.legend(loc="best")
>>> plt.show()
"""
Fit = FitLin(x, y, dy)
fit = Fit.LinearFit()
unc = Fit.DeltaFitParams()
redchisq = Fit.RedChiSqr()
cov = Fit.Cov()
resids = Fit.Residuals()
return fit, unc, redchisq, cov, resids
if __name__ == '__main__':
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.gridspec as gridspec # for unequal plot boxes
# data set for linear fitting
x = np.array([2.3, 4.7, 7.1, 9.6, 11.7, 14.1, 16.4, 18.8, 21.1, 23.0])
y = np.array([-25., 3., 114., 110., 234., 304., 271., 322., 446., 397.])
dy = np.array([15., 30., 34., 37., 40., 50., 38., 28., 47., 30.])
# Fit linear data set without weighting and print out results
print("\nFit without weighting (no uncertainty estimates)")
fit, dfit, redchisq, cov, residuals = fitlin(x, y)
print(u"y-intercept = {0:0.1f} \xb1 {1:0.1f}".format(fit[0], dfit[0]))
print(u"slope = {0:0.1f} \xb1 {1:0.1f}".format(fit[1], dfit[1]))
print("reduced chi-squared = {0:0.2f}".format(redchisq))
# Fit linear data set with weighting and print out results
print("\nFit with weighting (with uncertainty estimates)")
fit, dfit, redchisq, cov, residuals = fitlin(x, y, dy)
print(u"y-intercept = {0:0.1f} \xb1 {1:0.1f}".format(fit[0], dfit[0]))
print(u"slope = {0:0.1f} \xb1 {1:0.1f}".format(fit[1], dfit[1]))
print("reduced chi-squared = {0:0.2f}".format(redchisq))
# Open figure window for plotting data with linear fit
fig1 = plt.figure(1, figsize=(8,8))
gs = gridspec.GridSpec(2, 1, height_ratios=[2.5, 6])
# Bottom plot: data and fit
ax1 = fig1.add_subplot(gs[1])
# Plot data with error bars on top of fit
ax1.errorbar(x, y, yerr = dy, ecolor="black", fmt="ro", ms=4)
# Plot fit (behind data)
endt = 0.05 * (x.max()-x.min())
tFit = np.array([x.min()-endt, x.max()+endt])
vFit = fit[0] + fit[1]*tFit
ax1.plot(tFit, vFit, '-b', zorder=-1)
# Print out results of fit on plot
ax1.text(0.05, 0.9,
u"y-intercept = {0:0.1f} \xb1 {1:0.1f}".format(fit[0], dfit[0]),
ha='left', va='center', transform = ax1.transAxes)
ax1.text(0.05, 0.83,
u"slope = {0:0.1f} \xb1 {1:0.1f}".format(fit[1], dfit[1]),
ha='left', va='center', transform = ax1.transAxes)
ax1.text(0.05, 0.76,
"redchisq = {0:0.2f}".format(redchisq),
ha='left', va='center', transform = ax1.transAxes)
ax1.text(0.05, 0.69,
"r = {0:0.2f}".format(cov/(dfit[0]*dfit[1])),
ha='left', va='center', transform = ax1.transAxes)
# Label axes
ax1.set_xlabel('time')
ax1.set_ylabel('velocity')
# Top plot: residuals
ax2 = fig1.add_subplot(gs[0])
ax2.axhline(color="gray")
ax2.errorbar(x, residuals, yerr = dy, ecolor="black", fmt="ro", ms=4)
ax2.set_ylabel('residuals')
ax2.set_ylim(-100, 150)
ax2.set_yticks((-100, 0, 100))
plt.show()
# data set for linear fitting using lists instead of NumPy arrays
print("\n\nTesting with data lists instead of data arrays")
x = [2.3, 4.7, 7.1, 9.6, 11.7, 14.1, 16.4, 18.8, 21.1, 23.0]
y = [-25., 3., 114., 110., 234., 304., 271., 322., 446., 397.]
dy = [15., 30., 34., 37., 40., 50., 38., 28., 47., 30.]
# Fit linear data set without weighting and print out results
print("\nFit without weighting (no uncertainty estimates)")
fit, dfit, redchisq, cov, residuals = fitlin(x, y)
print(u"y-intercept = {0:0.1f} \xb1 {1:0.1f}".format(fit[0], dfit[0]))
print(u"slope = {0:0.1f} \xb1 {1:0.1f}".format(fit[1], dfit[1]))
print("reduced chi-squared = {0:0.2f}".format(redchisq))
# Fit linear data set with weighting and print out results
print("\nFit with weighting (with uncertainty estimates)")
fit, dfit, redchisq, cov, residuals = fitlin(x, y, dy)
print(u"y-intercept = {0:0.1f} \xb1 {1:0.1f}".format(fit[0], dfit[0]))
print(u"slope = {0:0.1f} \xb1 {1:0.1f}".format(fit[1], dfit[1]))
print("reduced chi-squared = {0:0.2f}".format(redchisq))
# data set for linear fitting using lists instead of NumPy arrays
print("\n\nTesting unequally sized data sets")
x = [2.3, 4.7, 7.1, 9.6, 11.7, 14.1, 16.4, 18.8, 21.1]
y = [-25., 3., 114., 110., 234., 304., 271., 322., 446., 397.]
dy = [15., 30., 34., 37., 40., 50., 38., 28., 47., 30.]
fit, dfit, redchisq, cov, residuals = fitlin(x, y, dy)
# data sets with only 2 elements
print("\n\nTesting too small data sets")
x = [2.3, 4.7]
y = [-25., 3.]
dy = [15., 30.]
fit, dfit, redchisq, cov, residuals = fitlin(x, y, dy)
|
cc0-1.0
|
heli522/scikit-learn
|
examples/mixture/plot_gmm_selection.py
|
248
|
3223
|
"""
=================================
Gaussian Mixture Model Selection
=================================
This example shows that model selection can be performed with
Gaussian Mixture Models using information-theoretic criteria (BIC).
Model selection concerns both the covariance type
and the number of components in the model.
In that case, AIC also provides the right result (not shown to save time),
but BIC is better suited if the problem is to identify the right model.
Unlike Bayesian procedures, such inferences are prior-free.
In that case, the model with 2 components and full covariance
(which corresponds to the true generative model) is selected.
"""
print(__doc__)
import itertools
import numpy as np
from scipy import linalg
import matplotlib.pyplot as plt
import matplotlib as mpl
from sklearn import mixture
# Number of samples per component
n_samples = 500
# Generate random sample, two components
np.random.seed(0)
C = np.array([[0., -0.1], [1.7, .4]])
X = np.r_[np.dot(np.random.randn(n_samples, 2), C),
.7 * np.random.randn(n_samples, 2) + np.array([-6, 3])]
lowest_bic = np.infty
bic = []
n_components_range = range(1, 7)
cv_types = ['spherical', 'tied', 'diag', 'full']
for cv_type in cv_types:
for n_components in n_components_range:
# Fit a mixture of Gaussians with EM
gmm = mixture.GMM(n_components=n_components, covariance_type=cv_type)
gmm.fit(X)
bic.append(gmm.bic(X))
if bic[-1] < lowest_bic:
lowest_bic = bic[-1]
best_gmm = gmm
bic = np.array(bic)
color_iter = itertools.cycle(['k', 'r', 'g', 'b', 'c', 'm', 'y'])
clf = best_gmm
bars = []
# Plot the BIC scores
spl = plt.subplot(2, 1, 1)
for i, (cv_type, color) in enumerate(zip(cv_types, color_iter)):
xpos = np.array(n_components_range) + .2 * (i - 2)
bars.append(plt.bar(xpos, bic[i * len(n_components_range):
(i + 1) * len(n_components_range)],
width=.2, color=color))
plt.xticks(n_components_range)
plt.ylim([bic.min() * 1.01 - .01 * bic.max(), bic.max()])
plt.title('BIC score per model')
xpos = np.mod(bic.argmin(), len(n_components_range)) + .65 +\
.2 * np.floor(bic.argmin() / len(n_components_range))
plt.text(xpos, bic.min() * 0.97 + .03 * bic.max(), '*', fontsize=14)
spl.set_xlabel('Number of components')
spl.legend([b[0] for b in bars], cv_types)
# Plot the winner
splot = plt.subplot(2, 1, 2)
Y_ = clf.predict(X)
for i, (mean, covar, color) in enumerate(zip(clf.means_, clf.covars_,
color_iter)):
v, w = linalg.eigh(covar)
if not np.any(Y_ == i):
continue
plt.scatter(X[Y_ == i, 0], X[Y_ == i, 1], .8, color=color)
# Plot an ellipse to show the Gaussian component
angle = np.arctan2(w[0][1], w[0][0])
angle = 180 * angle / np.pi # convert to degrees
v *= 4
ell = mpl.patches.Ellipse(mean, v[0], v[1], 180 + angle, color=color)
ell.set_clip_box(splot.bbox)
ell.set_alpha(.5)
splot.add_artist(ell)
plt.xlim(-10, 10)
plt.ylim(-3, 6)
plt.xticks(())
plt.yticks(())
plt.title('Selected GMM: full model, 2 components')
plt.subplots_adjust(hspace=.35, bottom=.02)
plt.show()
|
bsd-3-clause
|
mshakya/PyPiReT
|
piret/checks/dependencies.py
|
1
|
1596
|
#! /usr/bin/env python
"""Check design."""
from __future__ import print_function
import sys
from plumbum import local
class CheckDependencies():
"""Check if third party dependencies are in the path."""
def __init__(self, package):
"""Initialize."""
self.package = package
# self.logger = logger
def check_thirdparty(self):
"""Check if thirdparty tool is in the path."""
try:
local[self.package]
print('%s package exist' % self.package)
# self.logger.info('%s package exist' % self.package)
return True
except:
exit_msg = """%s is not installed or not in your PATH!
Please install.
See README for instructions on how to install it."""
sys.exit(exit_msg % self.package)
def check_depen(tool_list):
"""A function that checks the list of tool are in path or not."""
for tool in tool_list:
check = CheckDependencies(package=tool)
check.check_thirdparty()
def check_pydepen():
"""A function that checks the list of tool are in path or not."""
try:
import luigi
import plumbum
import pandas
import Bio
import gffutils
import os
import sys
import argparse
import logging
print("All required python package exist")
except:
exit_msg = """python package luigi is not installed! \n Please install
it using python setup.py install and try again"""
sys.exit(exit_msg)
|
bsd-3-clause
|
PrashntS/scikit-learn
|
sklearn/metrics/classification.py
|
95
|
67713
|
"""Metrics to assess performance on classification task given classe prediction
Functions named as ``*_score`` return a scalar value to maximize: the higher
the better
Function named as ``*_error`` or ``*_loss`` return a scalar value to minimize:
the lower the better
"""
# Authors: Alexandre Gramfort <alexandre.gramfort@inria.fr>
# Mathieu Blondel <mathieu@mblondel.org>
# Olivier Grisel <olivier.grisel@ensta.org>
# Arnaud Joly <a.joly@ulg.ac.be>
# Jochen Wersdorfer <jochen@wersdoerfer.de>
# Lars Buitinck <L.J.Buitinck@uva.nl>
# Joel Nothman <joel.nothman@gmail.com>
# Noel Dawe <noel@dawe.me>
# Jatin Shah <jatindshah@gmail.com>
# Saurabh Jha <saurabh.jhaa@gmail.com>
# License: BSD 3 clause
from __future__ import division
import warnings
import numpy as np
from scipy.sparse import coo_matrix
from scipy.sparse import csr_matrix
from scipy.spatial.distance import hamming as sp_hamming
from ..preprocessing import LabelBinarizer, label_binarize
from ..preprocessing import LabelEncoder
from ..utils import check_array
from ..utils import check_consistent_length
from ..utils import column_or_1d
from ..utils.multiclass import unique_labels
from ..utils.multiclass import type_of_target
from ..utils.validation import _num_samples
from ..utils.sparsefuncs import count_nonzero
from ..utils.fixes import bincount
from .base import UndefinedMetricWarning
def _check_targets(y_true, y_pred):
"""Check that y_true and y_pred belong to the same classification task
This converts multiclass or binary types to a common shape, and raises a
ValueError for a mix of multilabel and multiclass targets, a mix of
multilabel formats, for the presence of continuous-valued or multioutput
targets, or for targets of different lengths.
Column vectors are squeezed to 1d, while multilabel formats are returned
as CSR sparse label indicators.
Parameters
----------
y_true : array-like
y_pred : array-like
Returns
-------
type_true : one of {'multilabel-indicator', 'multiclass', 'binary'}
The type of the true target data, as output by
``utils.multiclass.type_of_target``
y_true : array or indicator matrix
y_pred : array or indicator matrix
"""
check_consistent_length(y_true, y_pred)
type_true = type_of_target(y_true)
type_pred = type_of_target(y_pred)
y_type = set([type_true, type_pred])
if y_type == set(["binary", "multiclass"]):
y_type = set(["multiclass"])
if len(y_type) > 1:
raise ValueError("Can't handle mix of {0} and {1}"
"".format(type_true, type_pred))
# We can't have more than one value on y_type => The set is no more needed
y_type = y_type.pop()
# No metrics support "multiclass-multioutput" format
if (y_type not in ["binary", "multiclass", "multilabel-indicator"]):
raise ValueError("{0} is not supported".format(y_type))
if y_type in ["binary", "multiclass"]:
y_true = column_or_1d(y_true)
y_pred = column_or_1d(y_pred)
if y_type.startswith('multilabel'):
y_true = csr_matrix(y_true)
y_pred = csr_matrix(y_pred)
y_type = 'multilabel-indicator'
return y_type, y_true, y_pred
def _weighted_sum(sample_score, sample_weight, normalize=False):
if normalize:
return np.average(sample_score, weights=sample_weight)
elif sample_weight is not None:
return np.dot(sample_score, sample_weight)
else:
return sample_score.sum()
def accuracy_score(y_true, y_pred, normalize=True, sample_weight=None):
"""Accuracy classification score.
In multilabel classification, this function computes subset accuracy:
the set of labels predicted for a sample must *exactly* match the
corresponding set of labels in y_true.
Read more in the :ref:`User Guide <accuracy_score>`.
Parameters
----------
y_true : 1d array-like, or label indicator array / sparse matrix
Ground truth (correct) labels.
y_pred : 1d array-like, or label indicator array / sparse matrix
Predicted labels, as returned by a classifier.
normalize : bool, optional (default=True)
If ``False``, return the number of correctly classified samples.
Otherwise, return the fraction of correctly classified samples.
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
score : float
If ``normalize == True``, return the correctly classified samples
(float), else it returns the number of correctly classified samples
(int).
The best performance is 1 with ``normalize == True`` and the number
of samples with ``normalize == False``.
See also
--------
jaccard_similarity_score, hamming_loss, zero_one_loss
Notes
-----
In binary and multiclass classification, this function is equal
to the ``jaccard_similarity_score`` function.
Examples
--------
>>> import numpy as np
>>> from sklearn.metrics import accuracy_score
>>> y_pred = [0, 2, 1, 3]
>>> y_true = [0, 1, 2, 3]
>>> accuracy_score(y_true, y_pred)
0.5
>>> accuracy_score(y_true, y_pred, normalize=False)
2
In the multilabel case with binary label indicators:
>>> accuracy_score(np.array([[0, 1], [1, 1]]), np.ones((2, 2)))
0.5
"""
# Compute accuracy for each possible representation
y_type, y_true, y_pred = _check_targets(y_true, y_pred)
if y_type.startswith('multilabel'):
differing_labels = count_nonzero(y_true - y_pred, axis=1)
score = differing_labels == 0
else:
score = y_true == y_pred
return _weighted_sum(score, sample_weight, normalize)
def confusion_matrix(y_true, y_pred, labels=None):
"""Compute confusion matrix to evaluate the accuracy of a classification
By definition a confusion matrix :math:`C` is such that :math:`C_{i, j}`
is equal to the number of observations known to be in group :math:`i` but
predicted to be in group :math:`j`.
Read more in the :ref:`User Guide <confusion_matrix>`.
Parameters
----------
y_true : array, shape = [n_samples]
Ground truth (correct) target values.
y_pred : array, shape = [n_samples]
Estimated targets as returned by a classifier.
labels : array, shape = [n_classes], optional
List of labels to index the matrix. This may be used to reorder
or select a subset of labels.
If none is given, those that appear at least once
in ``y_true`` or ``y_pred`` are used in sorted order.
Returns
-------
C : array, shape = [n_classes, n_classes]
Confusion matrix
References
----------
.. [1] `Wikipedia entry for the Confusion matrix
<http://en.wikipedia.org/wiki/Confusion_matrix>`_
Examples
--------
>>> from sklearn.metrics import confusion_matrix
>>> y_true = [2, 0, 2, 2, 0, 1]
>>> y_pred = [0, 0, 2, 2, 0, 2]
>>> confusion_matrix(y_true, y_pred)
array([[2, 0, 0],
[0, 0, 1],
[1, 0, 2]])
>>> y_true = ["cat", "ant", "cat", "cat", "ant", "bird"]
>>> y_pred = ["ant", "ant", "cat", "cat", "ant", "cat"]
>>> confusion_matrix(y_true, y_pred, labels=["ant", "bird", "cat"])
array([[2, 0, 0],
[0, 0, 1],
[1, 0, 2]])
"""
y_type, y_true, y_pred = _check_targets(y_true, y_pred)
if y_type not in ("binary", "multiclass"):
raise ValueError("%s is not supported" % y_type)
if labels is None:
labels = unique_labels(y_true, y_pred)
else:
labels = np.asarray(labels)
n_labels = labels.size
label_to_ind = dict((y, x) for x, y in enumerate(labels))
# convert yt, yp into index
y_pred = np.array([label_to_ind.get(x, n_labels + 1) for x in y_pred])
y_true = np.array([label_to_ind.get(x, n_labels + 1) for x in y_true])
# intersect y_pred, y_true with labels, eliminate items not in labels
ind = np.logical_and(y_pred < n_labels, y_true < n_labels)
y_pred = y_pred[ind]
y_true = y_true[ind]
CM = coo_matrix((np.ones(y_true.shape[0], dtype=np.int), (y_true, y_pred)),
shape=(n_labels, n_labels)
).toarray()
return CM
def cohen_kappa_score(y1, y2, labels=None):
"""Cohen's kappa: a statistic that measures inter-annotator agreement.
This function computes Cohen's kappa [1], a score that expresses the level
of agreement between two annotators on a classification problem. It is
defined as
.. math::
\kappa = (p_o - p_e) / (1 - p_e)
where :math:`p_o` is the empirical probability of agreement on the label
assigned to any sample (the observed agreement ratio), and :math:`p_e` is
the expected agreement when both annotators assign labels randomly.
:math:`p_e` is estimated using a per-annotator empirical prior over the
class labels [2].
Parameters
----------
y1 : array, shape = [n_samples]
Labels assigned by the first annotator.
y2 : array, shape = [n_samples]
Labels assigned by the second annotator. The kappa statistic is
symmetric, so swapping ``y1`` and ``y2`` doesn't change the value.
labels : array, shape = [n_classes], optional
List of labels to index the matrix. This may be used to select a
subset of labels. If None, all labels that appear at least once in
``y1`` or ``y2`` are used.
Returns
-------
kappa : float
The kappa statistic, which is a number between -1 and 1. The maximum
value means complete agreement; zero or lower means chance agreement.
References
----------
.. [1] J. Cohen (1960). "A coefficient of agreement for nominal scales".
Educational and Psychological Measurement 20(1):37-46.
doi:10.1177/001316446002000104.
.. [2] R. Artstein and M. Poesio (2008). "Inter-coder agreement for
computational linguistics". Computational Linguistic 34(4):555-596.
"""
confusion = confusion_matrix(y1, y2, labels=labels)
P = confusion / float(confusion.sum())
p_observed = np.trace(P)
p_expected = np.dot(P.sum(axis=0), P.sum(axis=1))
return (p_observed - p_expected) / (1 - p_expected)
def jaccard_similarity_score(y_true, y_pred, normalize=True,
sample_weight=None):
"""Jaccard similarity coefficient score
The Jaccard index [1], or Jaccard similarity coefficient, defined as
the size of the intersection divided by the size of the union of two label
sets, is used to compare set of predicted labels for a sample to the
corresponding set of labels in ``y_true``.
Read more in the :ref:`User Guide <jaccard_similarity_score>`.
Parameters
----------
y_true : 1d array-like, or label indicator array / sparse matrix
Ground truth (correct) labels.
y_pred : 1d array-like, or label indicator array / sparse matrix
Predicted labels, as returned by a classifier.
normalize : bool, optional (default=True)
If ``False``, return the sum of the Jaccard similarity coefficient
over the sample set. Otherwise, return the average of Jaccard
similarity coefficient.
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
score : float
If ``normalize == True``, return the average Jaccard similarity
coefficient, else it returns the sum of the Jaccard similarity
coefficient over the sample set.
The best performance is 1 with ``normalize == True`` and the number
of samples with ``normalize == False``.
See also
--------
accuracy_score, hamming_loss, zero_one_loss
Notes
-----
In binary and multiclass classification, this function is equivalent
to the ``accuracy_score``. It differs in the multilabel classification
problem.
References
----------
.. [1] `Wikipedia entry for the Jaccard index
<http://en.wikipedia.org/wiki/Jaccard_index>`_
Examples
--------
>>> import numpy as np
>>> from sklearn.metrics import jaccard_similarity_score
>>> y_pred = [0, 2, 1, 3]
>>> y_true = [0, 1, 2, 3]
>>> jaccard_similarity_score(y_true, y_pred)
0.5
>>> jaccard_similarity_score(y_true, y_pred, normalize=False)
2
In the multilabel case with binary label indicators:
>>> jaccard_similarity_score(np.array([[0, 1], [1, 1]]),\
np.ones((2, 2)))
0.75
"""
# Compute accuracy for each possible representation
y_type, y_true, y_pred = _check_targets(y_true, y_pred)
if y_type.startswith('multilabel'):
with np.errstate(divide='ignore', invalid='ignore'):
# oddly, we may get an "invalid" rather than a "divide" error here
pred_or_true = count_nonzero(y_true + y_pred, axis=1)
pred_and_true = count_nonzero(y_true.multiply(y_pred), axis=1)
score = pred_and_true / pred_or_true
# If there is no label, it results in a Nan instead, we set
# the jaccard to 1: lim_{x->0} x/x = 1
# Note with py2.6 and np 1.3: we can't check safely for nan.
score[pred_or_true == 0.0] = 1.0
else:
score = y_true == y_pred
return _weighted_sum(score, sample_weight, normalize)
def matthews_corrcoef(y_true, y_pred):
"""Compute the Matthews correlation coefficient (MCC) for binary classes
The Matthews correlation coefficient is used in machine learning as a
measure of the quality of binary (two-class) classifications. It takes into
account true and false positives and negatives and is generally regarded as
a balanced measure which can be used even if the classes are of very
different sizes. The MCC is in essence a correlation coefficient value
between -1 and +1. A coefficient of +1 represents a perfect prediction, 0
an average random prediction and -1 an inverse prediction. The statistic
is also known as the phi coefficient. [source: Wikipedia]
Only in the binary case does this relate to information about true and
false positives and negatives. See references below.
Read more in the :ref:`User Guide <matthews_corrcoef>`.
Parameters
----------
y_true : array, shape = [n_samples]
Ground truth (correct) target values.
y_pred : array, shape = [n_samples]
Estimated targets as returned by a classifier.
Returns
-------
mcc : float
The Matthews correlation coefficient (+1 represents a perfect
prediction, 0 an average random prediction and -1 and inverse
prediction).
References
----------
.. [1] `Baldi, Brunak, Chauvin, Andersen and Nielsen, (2000). Assessing the
accuracy of prediction algorithms for classification: an overview
<http://dx.doi.org/10.1093/bioinformatics/16.5.412>`_
.. [2] `Wikipedia entry for the Matthews Correlation Coefficient
<http://en.wikipedia.org/wiki/Matthews_correlation_coefficient>`_
Examples
--------
>>> from sklearn.metrics import matthews_corrcoef
>>> y_true = [+1, +1, +1, -1]
>>> y_pred = [+1, -1, +1, +1]
>>> matthews_corrcoef(y_true, y_pred) # doctest: +ELLIPSIS
-0.33...
"""
y_type, y_true, y_pred = _check_targets(y_true, y_pred)
if y_type != "binary":
raise ValueError("%s is not supported" % y_type)
lb = LabelEncoder()
lb.fit(np.hstack([y_true, y_pred]))
y_true = lb.transform(y_true)
y_pred = lb.transform(y_pred)
with np.errstate(invalid='ignore'):
mcc = np.corrcoef(y_true, y_pred)[0, 1]
if np.isnan(mcc):
return 0.
else:
return mcc
def zero_one_loss(y_true, y_pred, normalize=True, sample_weight=None):
"""Zero-one classification loss.
If normalize is ``True``, return the fraction of misclassifications
(float), else it returns the number of misclassifications (int). The best
performance is 0.
Read more in the :ref:`User Guide <zero_one_loss>`.
Parameters
----------
y_true : 1d array-like, or label indicator array / sparse matrix
Ground truth (correct) labels.
y_pred : 1d array-like, or label indicator array / sparse matrix
Predicted labels, as returned by a classifier.
normalize : bool, optional (default=True)
If ``False``, return the number of misclassifications.
Otherwise, return the fraction of misclassifications.
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
loss : float or int,
If ``normalize == True``, return the fraction of misclassifications
(float), else it returns the number of misclassifications (int).
Notes
-----
In multilabel classification, the zero_one_loss function corresponds to
the subset zero-one loss: for each sample, the entire set of labels must be
correctly predicted, otherwise the loss for that sample is equal to one.
See also
--------
accuracy_score, hamming_loss, jaccard_similarity_score
Examples
--------
>>> from sklearn.metrics import zero_one_loss
>>> y_pred = [1, 2, 3, 4]
>>> y_true = [2, 2, 3, 4]
>>> zero_one_loss(y_true, y_pred)
0.25
>>> zero_one_loss(y_true, y_pred, normalize=False)
1
In the multilabel case with binary label indicators:
>>> zero_one_loss(np.array([[0, 1], [1, 1]]), np.ones((2, 2)))
0.5
"""
score = accuracy_score(y_true, y_pred,
normalize=normalize,
sample_weight=sample_weight)
if normalize:
return 1 - score
else:
if sample_weight is not None:
n_samples = np.sum(sample_weight)
else:
n_samples = _num_samples(y_true)
return n_samples - score
def f1_score(y_true, y_pred, labels=None, pos_label=1, average='binary',
sample_weight=None):
"""Compute the F1 score, also known as balanced F-score or F-measure
The F1 score can be interpreted as a weighted average of the precision and
recall, where an F1 score reaches its best value at 1 and worst score at 0.
The relative contribution of precision and recall to the F1 score are
equal. The formula for the F1 score is::
F1 = 2 * (precision * recall) / (precision + recall)
In the multi-class and multi-label case, this is the weighted average of
the F1 score of each class.
Read more in the :ref:`User Guide <precision_recall_f_measure_metrics>`.
Parameters
----------
y_true : 1d array-like, or label indicator array / sparse matrix
Ground truth (correct) target values.
y_pred : 1d array-like, or label indicator array / sparse matrix
Estimated targets as returned by a classifier.
labels : list, optional
The set of labels to include when ``average != 'binary'``, and their
order if ``average is None``. Labels present in the data can be
excluded, for example to calculate a multiclass average ignoring a
majority negative class, while labels not present in the data will
result in 0 components in a macro average. For multilabel targets,
labels are column indices. By default, all labels in ``y_true`` and
``y_pred`` are used in sorted order.
pos_label : str or int, 1 by default
The class to report if ``average='binary'``. Until version 0.18 it is
necessary to set ``pos_label=None`` if seeking to use another averaging
method over binary targets.
average : string, [None, 'binary' (default), 'micro', 'macro', 'samples', \
'weighted']
This parameter is required for multiclass/multilabel targets.
If ``None``, the scores for each class are returned. Otherwise, this
determines the type of averaging performed on the data:
``'binary'``:
Only report results for the class specified by ``pos_label``.
This is applicable only if targets (``y_{true,pred}``) are binary.
``'micro'``:
Calculate metrics globally by counting the total true positives,
false negatives and false positives.
``'macro'``:
Calculate metrics for each label, and find their unweighted
mean. This does not take label imbalance into account.
``'weighted'``:
Calculate metrics for each label, and find their average, weighted
by support (the number of true instances for each label). This
alters 'macro' to account for label imbalance; it can result in an
F-score that is not between precision and recall.
``'samples'``:
Calculate metrics for each instance, and find their average (only
meaningful for multilabel classification where this differs from
:func:`accuracy_score`).
Note that if ``pos_label`` is given in binary classification with
`average != 'binary'`, only that positive class is reported. This
behavior is deprecated and will change in version 0.18.
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
f1_score : float or array of float, shape = [n_unique_labels]
F1 score of the positive class in binary classification or weighted
average of the F1 scores of each class for the multiclass task.
References
----------
.. [1] `Wikipedia entry for the F1-score
<http://en.wikipedia.org/wiki/F1_score>`_
Examples
--------
>>> from sklearn.metrics import f1_score
>>> y_true = [0, 1, 2, 0, 1, 2]
>>> y_pred = [0, 2, 1, 0, 0, 1]
>>> f1_score(y_true, y_pred, average='macro') # doctest: +ELLIPSIS
0.26...
>>> f1_score(y_true, y_pred, average='micro') # doctest: +ELLIPSIS
0.33...
>>> f1_score(y_true, y_pred, average='weighted') # doctest: +ELLIPSIS
0.26...
>>> f1_score(y_true, y_pred, average=None)
array([ 0.8, 0. , 0. ])
"""
return fbeta_score(y_true, y_pred, 1, labels=labels,
pos_label=pos_label, average=average,
sample_weight=sample_weight)
def fbeta_score(y_true, y_pred, beta, labels=None, pos_label=1,
average='binary', sample_weight=None):
"""Compute the F-beta score
The F-beta score is the weighted harmonic mean of precision and recall,
reaching its optimal value at 1 and its worst value at 0.
The `beta` parameter determines the weight of precision in the combined
score. ``beta < 1`` lends more weight to precision, while ``beta > 1``
favors recall (``beta -> 0`` considers only precision, ``beta -> inf``
only recall).
Read more in the :ref:`User Guide <precision_recall_f_measure_metrics>`.
Parameters
----------
y_true : 1d array-like, or label indicator array / sparse matrix
Ground truth (correct) target values.
y_pred : 1d array-like, or label indicator array / sparse matrix
Estimated targets as returned by a classifier.
beta: float
Weight of precision in harmonic mean.
labels : list, optional
The set of labels to include when ``average != 'binary'``, and their
order if ``average is None``. Labels present in the data can be
excluded, for example to calculate a multiclass average ignoring a
majority negative class, while labels not present in the data will
result in 0 components in a macro average. For multilabel targets,
labels are column indices. By default, all labels in ``y_true`` and
``y_pred`` are used in sorted order.
pos_label : str or int, 1 by default
The class to report if ``average='binary'``. Until version 0.18 it is
necessary to set ``pos_label=None`` if seeking to use another averaging
method over binary targets.
average : string, [None, 'binary' (default), 'micro', 'macro', 'samples', \
'weighted']
This parameter is required for multiclass/multilabel targets.
If ``None``, the scores for each class are returned. Otherwise, this
determines the type of averaging performed on the data:
``'binary'``:
Only report results for the class specified by ``pos_label``.
This is applicable only if targets (``y_{true,pred}``) are binary.
``'micro'``:
Calculate metrics globally by counting the total true positives,
false negatives and false positives.
``'macro'``:
Calculate metrics for each label, and find their unweighted
mean. This does not take label imbalance into account.
``'weighted'``:
Calculate metrics for each label, and find their average, weighted
by support (the number of true instances for each label). This
alters 'macro' to account for label imbalance; it can result in an
F-score that is not between precision and recall.
``'samples'``:
Calculate metrics for each instance, and find their average (only
meaningful for multilabel classification where this differs from
:func:`accuracy_score`).
Note that if ``pos_label`` is given in binary classification with
`average != 'binary'`, only that positive class is reported. This
behavior is deprecated and will change in version 0.18.
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
fbeta_score : float (if average is not None) or array of float, shape =\
[n_unique_labels]
F-beta score of the positive class in binary classification or weighted
average of the F-beta score of each class for the multiclass task.
References
----------
.. [1] R. Baeza-Yates and B. Ribeiro-Neto (2011).
Modern Information Retrieval. Addison Wesley, pp. 327-328.
.. [2] `Wikipedia entry for the F1-score
<http://en.wikipedia.org/wiki/F1_score>`_
Examples
--------
>>> from sklearn.metrics import fbeta_score
>>> y_true = [0, 1, 2, 0, 1, 2]
>>> y_pred = [0, 2, 1, 0, 0, 1]
>>> fbeta_score(y_true, y_pred, average='macro', beta=0.5)
... # doctest: +ELLIPSIS
0.23...
>>> fbeta_score(y_true, y_pred, average='micro', beta=0.5)
... # doctest: +ELLIPSIS
0.33...
>>> fbeta_score(y_true, y_pred, average='weighted', beta=0.5)
... # doctest: +ELLIPSIS
0.23...
>>> fbeta_score(y_true, y_pred, average=None, beta=0.5)
... # doctest: +ELLIPSIS
array([ 0.71..., 0. , 0. ])
"""
_, _, f, _ = precision_recall_fscore_support(y_true, y_pred,
beta=beta,
labels=labels,
pos_label=pos_label,
average=average,
warn_for=('f-score',),
sample_weight=sample_weight)
return f
def _prf_divide(numerator, denominator, metric, modifier, average, warn_for):
"""Performs division and handles divide-by-zero.
On zero-division, sets the corresponding result elements to zero
and raises a warning.
The metric, modifier and average arguments are used only for determining
an appropriate warning.
"""
result = numerator / denominator
mask = denominator == 0.0
if not np.any(mask):
return result
# remove infs
result[mask] = 0.0
# build appropriate warning
# E.g. "Precision and F-score are ill-defined and being set to 0.0 in
# labels with no predicted samples"
axis0 = 'sample'
axis1 = 'label'
if average == 'samples':
axis0, axis1 = axis1, axis0
if metric in warn_for and 'f-score' in warn_for:
msg_start = '{0} and F-score are'.format(metric.title())
elif metric in warn_for:
msg_start = '{0} is'.format(metric.title())
elif 'f-score' in warn_for:
msg_start = 'F-score is'
else:
return result
msg = ('{0} ill-defined and being set to 0.0 {{0}} '
'no {1} {2}s.'.format(msg_start, modifier, axis0))
if len(mask) == 1:
msg = msg.format('due to')
else:
msg = msg.format('in {0}s with'.format(axis1))
warnings.warn(msg, UndefinedMetricWarning, stacklevel=2)
return result
def precision_recall_fscore_support(y_true, y_pred, beta=1.0, labels=None,
pos_label=1, average=None,
warn_for=('precision', 'recall',
'f-score'),
sample_weight=None):
"""Compute precision, recall, F-measure and support for each class
The precision is the ratio ``tp / (tp + fp)`` where ``tp`` is the number of
true positives and ``fp`` the number of false positives. The precision is
intuitively the ability of the classifier not to label as positive a sample
that is negative.
The recall is the ratio ``tp / (tp + fn)`` where ``tp`` is the number of
true positives and ``fn`` the number of false negatives. The recall is
intuitively the ability of the classifier to find all the positive samples.
The F-beta score can be interpreted as a weighted harmonic mean of
the precision and recall, where an F-beta score reaches its best
value at 1 and worst score at 0.
The F-beta score weights recall more than precision by a factor of
``beta``. ``beta == 1.0`` means recall and precision are equally important.
The support is the number of occurrences of each class in ``y_true``.
If ``pos_label is None`` and in binary classification, this function
returns the average precision, recall and F-measure if ``average``
is one of ``'micro'``, ``'macro'``, ``'weighted'`` or ``'samples'``.
Read more in the :ref:`User Guide <precision_recall_f_measure_metrics>`.
Parameters
----------
y_true : 1d array-like, or label indicator array / sparse matrix
Ground truth (correct) target values.
y_pred : 1d array-like, or label indicator array / sparse matrix
Estimated targets as returned by a classifier.
beta : float, 1.0 by default
The strength of recall versus precision in the F-score.
labels : list, optional
The set of labels to include when ``average != 'binary'``, and their
order if ``average is None``. Labels present in the data can be
excluded, for example to calculate a multiclass average ignoring a
majority negative class, while labels not present in the data will
result in 0 components in a macro average. For multilabel targets,
labels are column indices. By default, all labels in ``y_true`` and
``y_pred`` are used in sorted order.
pos_label : str or int, 1 by default
The class to report if ``average='binary'``. Until version 0.18 it is
necessary to set ``pos_label=None`` if seeking to use another averaging
method over binary targets.
average : string, [None (default), 'binary', 'micro', 'macro', 'samples', \
'weighted']
If ``None``, the scores for each class are returned. Otherwise, this
determines the type of averaging performed on the data:
``'binary'``:
Only report results for the class specified by ``pos_label``.
This is applicable only if targets (``y_{true,pred}``) are binary.
``'micro'``:
Calculate metrics globally by counting the total true positives,
false negatives and false positives.
``'macro'``:
Calculate metrics for each label, and find their unweighted
mean. This does not take label imbalance into account.
``'weighted'``:
Calculate metrics for each label, and find their average, weighted
by support (the number of true instances for each label). This
alters 'macro' to account for label imbalance; it can result in an
F-score that is not between precision and recall.
``'samples'``:
Calculate metrics for each instance, and find their average (only
meaningful for multilabel classification where this differs from
:func:`accuracy_score`).
Note that if ``pos_label`` is given in binary classification with
`average != 'binary'`, only that positive class is reported. This
behavior is deprecated and will change in version 0.18.
warn_for : tuple or set, for internal use
This determines which warnings will be made in the case that this
function is being used to return only one of its metrics.
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
precision: float (if average is not None) or array of float, shape =\
[n_unique_labels]
recall: float (if average is not None) or array of float, , shape =\
[n_unique_labels]
fbeta_score: float (if average is not None) or array of float, shape =\
[n_unique_labels]
support: int (if average is not None) or array of int, shape =\
[n_unique_labels]
The number of occurrences of each label in ``y_true``.
References
----------
.. [1] `Wikipedia entry for the Precision and recall
<http://en.wikipedia.org/wiki/Precision_and_recall>`_
.. [2] `Wikipedia entry for the F1-score
<http://en.wikipedia.org/wiki/F1_score>`_
.. [3] `Discriminative Methods for Multi-labeled Classification Advances
in Knowledge Discovery and Data Mining (2004), pp. 22-30 by Shantanu
Godbole, Sunita Sarawagi
<http://www.godbole.net/shantanu/pubs/multilabelsvm-pakdd04.pdf>`
Examples
--------
>>> from sklearn.metrics import precision_recall_fscore_support
>>> y_true = np.array(['cat', 'dog', 'pig', 'cat', 'dog', 'pig'])
>>> y_pred = np.array(['cat', 'pig', 'dog', 'cat', 'cat', 'dog'])
>>> precision_recall_fscore_support(y_true, y_pred, average='macro')
... # doctest: +ELLIPSIS
(0.22..., 0.33..., 0.26..., None)
>>> precision_recall_fscore_support(y_true, y_pred, average='micro')
... # doctest: +ELLIPSIS
(0.33..., 0.33..., 0.33..., None)
>>> precision_recall_fscore_support(y_true, y_pred, average='weighted')
... # doctest: +ELLIPSIS
(0.22..., 0.33..., 0.26..., None)
It is possible to compute per-label precisions, recalls, F1-scores and
supports instead of averaging:
>>> precision_recall_fscore_support(y_true, y_pred, average=None,
... labels=['pig', 'dog', 'cat'])
... # doctest: +ELLIPSIS,+NORMALIZE_WHITESPACE
(array([ 0. , 0. , 0.66...]),
array([ 0., 0., 1.]),
array([ 0. , 0. , 0.8]),
array([2, 2, 2]))
"""
average_options = (None, 'micro', 'macro', 'weighted', 'samples')
if average not in average_options and average != 'binary':
raise ValueError('average has to be one of ' +
str(average_options))
if beta <= 0:
raise ValueError("beta should be >0 in the F-beta score")
y_type, y_true, y_pred = _check_targets(y_true, y_pred)
present_labels = unique_labels(y_true, y_pred)
if average == 'binary' and (y_type != 'binary' or pos_label is None):
warnings.warn('The default `weighted` averaging is deprecated, '
'and from version 0.18, use of precision, recall or '
'F-score with multiclass or multilabel data or '
'pos_label=None will result in an exception. '
'Please set an explicit value for `average`, one of '
'%s. In cross validation use, for instance, '
'scoring="f1_weighted" instead of scoring="f1".'
% str(average_options), DeprecationWarning, stacklevel=2)
average = 'weighted'
if y_type == 'binary' and pos_label is not None and average is not None:
if average != 'binary':
warnings.warn('From version 0.18, binary input will not be '
'handled specially when using averaged '
'precision/recall/F-score. '
'Please use average=\'binary\' to report only the '
'positive class performance.', DeprecationWarning)
if labels is None or len(labels) <= 2:
if pos_label not in present_labels:
if len(present_labels) < 2:
# Only negative labels
return (0., 0., 0., 0)
else:
raise ValueError("pos_label=%r is not a valid label: %r" %
(pos_label, present_labels))
labels = [pos_label]
if labels is None:
labels = present_labels
n_labels = None
else:
n_labels = len(labels)
labels = np.hstack([labels, np.setdiff1d(present_labels, labels,
assume_unique=True)])
### Calculate tp_sum, pred_sum, true_sum ###
if y_type.startswith('multilabel'):
sum_axis = 1 if average == 'samples' else 0
# All labels are index integers for multilabel.
# Select labels:
if not np.all(labels == present_labels):
if np.max(labels) > np.max(present_labels):
raise ValueError('All labels must be in [0, n labels). '
'Got %d > %d' %
(np.max(labels), np.max(present_labels)))
if np.min(labels) < 0:
raise ValueError('All labels must be in [0, n labels). '
'Got %d < 0' % np.min(labels))
y_true = y_true[:, labels[:n_labels]]
y_pred = y_pred[:, labels[:n_labels]]
# calculate weighted counts
true_and_pred = y_true.multiply(y_pred)
tp_sum = count_nonzero(true_and_pred, axis=sum_axis,
sample_weight=sample_weight)
pred_sum = count_nonzero(y_pred, axis=sum_axis,
sample_weight=sample_weight)
true_sum = count_nonzero(y_true, axis=sum_axis,
sample_weight=sample_weight)
elif average == 'samples':
raise ValueError("Sample-based precision, recall, fscore is "
"not meaningful outside multilabel "
"classification. See the accuracy_score instead.")
else:
le = LabelEncoder()
le.fit(labels)
y_true = le.transform(y_true)
y_pred = le.transform(y_pred)
sorted_labels = le.classes_
# labels are now from 0 to len(labels) - 1 -> use bincount
tp = y_true == y_pred
tp_bins = y_true[tp]
if sample_weight is not None:
tp_bins_weights = np.asarray(sample_weight)[tp]
else:
tp_bins_weights = None
if len(tp_bins):
tp_sum = bincount(tp_bins, weights=tp_bins_weights,
minlength=len(labels))
else:
# Pathological case
true_sum = pred_sum = tp_sum = np.zeros(len(labels))
if len(y_pred):
pred_sum = bincount(y_pred, weights=sample_weight,
minlength=len(labels))
if len(y_true):
true_sum = bincount(y_true, weights=sample_weight,
minlength=len(labels))
# Retain only selected labels
indices = np.searchsorted(sorted_labels, labels[:n_labels])
tp_sum = tp_sum[indices]
true_sum = true_sum[indices]
pred_sum = pred_sum[indices]
if average == 'micro':
tp_sum = np.array([tp_sum.sum()])
pred_sum = np.array([pred_sum.sum()])
true_sum = np.array([true_sum.sum()])
### Finally, we have all our sufficient statistics. Divide! ###
beta2 = beta ** 2
with np.errstate(divide='ignore', invalid='ignore'):
# Divide, and on zero-division, set scores to 0 and warn:
# Oddly, we may get an "invalid" rather than a "divide" error
# here.
precision = _prf_divide(tp_sum, pred_sum,
'precision', 'predicted', average, warn_for)
recall = _prf_divide(tp_sum, true_sum,
'recall', 'true', average, warn_for)
# Don't need to warn for F: either P or R warned, or tp == 0 where pos
# and true are nonzero, in which case, F is well-defined and zero
f_score = ((1 + beta2) * precision * recall /
(beta2 * precision + recall))
f_score[tp_sum == 0] = 0.0
## Average the results ##
if average == 'weighted':
weights = true_sum
if weights.sum() == 0:
return 0, 0, 0, None
elif average == 'samples':
weights = sample_weight
else:
weights = None
if average is not None:
assert average != 'binary' or len(precision) == 1
precision = np.average(precision, weights=weights)
recall = np.average(recall, weights=weights)
f_score = np.average(f_score, weights=weights)
true_sum = None # return no support
return precision, recall, f_score, true_sum
def precision_score(y_true, y_pred, labels=None, pos_label=1,
average='binary', sample_weight=None):
"""Compute the precision
The precision is the ratio ``tp / (tp + fp)`` where ``tp`` is the number of
true positives and ``fp`` the number of false positives. The precision is
intuitively the ability of the classifier not to label as positive a sample
that is negative.
The best value is 1 and the worst value is 0.
Read more in the :ref:`User Guide <precision_recall_f_measure_metrics>`.
Parameters
----------
y_true : 1d array-like, or label indicator array / sparse matrix
Ground truth (correct) target values.
y_pred : 1d array-like, or label indicator array / sparse matrix
Estimated targets as returned by a classifier.
labels : list, optional
The set of labels to include when ``average != 'binary'``, and their
order if ``average is None``. Labels present in the data can be
excluded, for example to calculate a multiclass average ignoring a
majority negative class, while labels not present in the data will
result in 0 components in a macro average. For multilabel targets,
labels are column indices. By default, all labels in ``y_true`` and
``y_pred`` are used in sorted order.
pos_label : str or int, 1 by default
The class to report if ``average='binary'``. Until version 0.18 it is
necessary to set ``pos_label=None`` if seeking to use another averaging
method over binary targets.
average : string, [None, 'binary' (default), 'micro', 'macro', 'samples', \
'weighted']
This parameter is required for multiclass/multilabel targets.
If ``None``, the scores for each class are returned. Otherwise, this
determines the type of averaging performed on the data:
``'binary'``:
Only report results for the class specified by ``pos_label``.
This is applicable only if targets (``y_{true,pred}``) are binary.
``'micro'``:
Calculate metrics globally by counting the total true positives,
false negatives and false positives.
``'macro'``:
Calculate metrics for each label, and find their unweighted
mean. This does not take label imbalance into account.
``'weighted'``:
Calculate metrics for each label, and find their average, weighted
by support (the number of true instances for each label). This
alters 'macro' to account for label imbalance; it can result in an
F-score that is not between precision and recall.
``'samples'``:
Calculate metrics for each instance, and find their average (only
meaningful for multilabel classification where this differs from
:func:`accuracy_score`).
Note that if ``pos_label`` is given in binary classification with
`average != 'binary'`, only that positive class is reported. This
behavior is deprecated and will change in version 0.18.
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
precision : float (if average is not None) or array of float, shape =\
[n_unique_labels]
Precision of the positive class in binary classification or weighted
average of the precision of each class for the multiclass task.
Examples
--------
>>> from sklearn.metrics import precision_score
>>> y_true = [0, 1, 2, 0, 1, 2]
>>> y_pred = [0, 2, 1, 0, 0, 1]
>>> precision_score(y_true, y_pred, average='macro') # doctest: +ELLIPSIS
0.22...
>>> precision_score(y_true, y_pred, average='micro') # doctest: +ELLIPSIS
0.33...
>>> precision_score(y_true, y_pred, average='weighted')
... # doctest: +ELLIPSIS
0.22...
>>> precision_score(y_true, y_pred, average=None) # doctest: +ELLIPSIS
array([ 0.66..., 0. , 0. ])
"""
p, _, _, _ = precision_recall_fscore_support(y_true, y_pred,
labels=labels,
pos_label=pos_label,
average=average,
warn_for=('precision',),
sample_weight=sample_weight)
return p
def recall_score(y_true, y_pred, labels=None, pos_label=1, average='binary',
sample_weight=None):
"""Compute the recall
The recall is the ratio ``tp / (tp + fn)`` where ``tp`` is the number of
true positives and ``fn`` the number of false negatives. The recall is
intuitively the ability of the classifier to find all the positive samples.
The best value is 1 and the worst value is 0.
Read more in the :ref:`User Guide <precision_recall_f_measure_metrics>`.
Parameters
----------
y_true : 1d array-like, or label indicator array / sparse matrix
Ground truth (correct) target values.
y_pred : 1d array-like, or label indicator array / sparse matrix
Estimated targets as returned by a classifier.
labels : list, optional
The set of labels to include when ``average != 'binary'``, and their
order if ``average is None``. Labels present in the data can be
excluded, for example to calculate a multiclass average ignoring a
majority negative class, while labels not present in the data will
result in 0 components in a macro average. For multilabel targets,
labels are column indices. By default, all labels in ``y_true`` and
``y_pred`` are used in sorted order.
pos_label : str or int, 1 by default
The class to report if ``average='binary'``. Until version 0.18 it is
necessary to set ``pos_label=None`` if seeking to use another averaging
method over binary targets.
average : string, [None, 'binary' (default), 'micro', 'macro', 'samples', \
'weighted']
This parameter is required for multiclass/multilabel targets.
If ``None``, the scores for each class are returned. Otherwise, this
determines the type of averaging performed on the data:
``'binary'``:
Only report results for the class specified by ``pos_label``.
This is applicable only if targets (``y_{true,pred}``) are binary.
``'micro'``:
Calculate metrics globally by counting the total true positives,
false negatives and false positives.
``'macro'``:
Calculate metrics for each label, and find their unweighted
mean. This does not take label imbalance into account.
``'weighted'``:
Calculate metrics for each label, and find their average, weighted
by support (the number of true instances for each label). This
alters 'macro' to account for label imbalance; it can result in an
F-score that is not between precision and recall.
``'samples'``:
Calculate metrics for each instance, and find their average (only
meaningful for multilabel classification where this differs from
:func:`accuracy_score`).
Note that if ``pos_label`` is given in binary classification with
`average != 'binary'`, only that positive class is reported. This
behavior is deprecated and will change in version 0.18.
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
recall : float (if average is not None) or array of float, shape =\
[n_unique_labels]
Recall of the positive class in binary classification or weighted
average of the recall of each class for the multiclass task.
Examples
--------
>>> from sklearn.metrics import recall_score
>>> y_true = [0, 1, 2, 0, 1, 2]
>>> y_pred = [0, 2, 1, 0, 0, 1]
>>> recall_score(y_true, y_pred, average='macro') # doctest: +ELLIPSIS
0.33...
>>> recall_score(y_true, y_pred, average='micro') # doctest: +ELLIPSIS
0.33...
>>> recall_score(y_true, y_pred, average='weighted') # doctest: +ELLIPSIS
0.33...
>>> recall_score(y_true, y_pred, average=None)
array([ 1., 0., 0.])
"""
_, r, _, _ = precision_recall_fscore_support(y_true, y_pred,
labels=labels,
pos_label=pos_label,
average=average,
warn_for=('recall',),
sample_weight=sample_weight)
return r
def classification_report(y_true, y_pred, labels=None, target_names=None,
sample_weight=None, digits=2):
"""Build a text report showing the main classification metrics
Read more in the :ref:`User Guide <classification_report>`.
Parameters
----------
y_true : 1d array-like, or label indicator array / sparse matrix
Ground truth (correct) target values.
y_pred : 1d array-like, or label indicator array / sparse matrix
Estimated targets as returned by a classifier.
labels : array, shape = [n_labels]
Optional list of label indices to include in the report.
target_names : list of strings
Optional display names matching the labels (same order).
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
digits : int
Number of digits for formatting output floating point values
Returns
-------
report : string
Text summary of the precision, recall, F1 score for each class.
Examples
--------
>>> from sklearn.metrics import classification_report
>>> y_true = [0, 1, 2, 2, 2]
>>> y_pred = [0, 0, 2, 2, 1]
>>> target_names = ['class 0', 'class 1', 'class 2']
>>> print(classification_report(y_true, y_pred, target_names=target_names))
precision recall f1-score support
<BLANKLINE>
class 0 0.50 1.00 0.67 1
class 1 0.00 0.00 0.00 1
class 2 1.00 0.67 0.80 3
<BLANKLINE>
avg / total 0.70 0.60 0.61 5
<BLANKLINE>
"""
if labels is None:
labels = unique_labels(y_true, y_pred)
else:
labels = np.asarray(labels)
last_line_heading = 'avg / total'
if target_names is None:
width = len(last_line_heading)
target_names = ['%s' % l for l in labels]
else:
width = max(len(cn) for cn in target_names)
width = max(width, len(last_line_heading), digits)
headers = ["precision", "recall", "f1-score", "support"]
fmt = '%% %ds' % width # first column: class name
fmt += ' '
fmt += ' '.join(['% 9s' for _ in headers])
fmt += '\n'
headers = [""] + headers
report = fmt % tuple(headers)
report += '\n'
p, r, f1, s = precision_recall_fscore_support(y_true, y_pred,
labels=labels,
average=None,
sample_weight=sample_weight)
for i, label in enumerate(labels):
values = [target_names[i]]
for v in (p[i], r[i], f1[i]):
values += ["{0:0.{1}f}".format(v, digits)]
values += ["{0}".format(s[i])]
report += fmt % tuple(values)
report += '\n'
# compute averages
values = [last_line_heading]
for v in (np.average(p, weights=s),
np.average(r, weights=s),
np.average(f1, weights=s)):
values += ["{0:0.{1}f}".format(v, digits)]
values += ['{0}'.format(np.sum(s))]
report += fmt % tuple(values)
return report
def hamming_loss(y_true, y_pred, classes=None):
"""Compute the average Hamming loss.
The Hamming loss is the fraction of labels that are incorrectly predicted.
Read more in the :ref:`User Guide <hamming_loss>`.
Parameters
----------
y_true : 1d array-like, or label indicator array / sparse matrix
Ground truth (correct) labels.
y_pred : 1d array-like, or label indicator array / sparse matrix
Predicted labels, as returned by a classifier.
classes : array, shape = [n_labels], optional
Integer array of labels.
Returns
-------
loss : float or int,
Return the average Hamming loss between element of ``y_true`` and
``y_pred``.
See Also
--------
accuracy_score, jaccard_similarity_score, zero_one_loss
Notes
-----
In multiclass classification, the Hamming loss correspond to the Hamming
distance between ``y_true`` and ``y_pred`` which is equivalent to the
subset ``zero_one_loss`` function.
In multilabel classification, the Hamming loss is different from the
subset zero-one loss. The zero-one loss considers the entire set of labels
for a given sample incorrect if it does entirely match the true set of
labels. Hamming loss is more forgiving in that it penalizes the individual
labels.
The Hamming loss is upperbounded by the subset zero-one loss. When
normalized over samples, the Hamming loss is always between 0 and 1.
References
----------
.. [1] Grigorios Tsoumakas, Ioannis Katakis. Multi-Label Classification:
An Overview. International Journal of Data Warehousing & Mining,
3(3), 1-13, July-September 2007.
.. [2] `Wikipedia entry on the Hamming distance
<http://en.wikipedia.org/wiki/Hamming_distance>`_
Examples
--------
>>> from sklearn.metrics import hamming_loss
>>> y_pred = [1, 2, 3, 4]
>>> y_true = [2, 2, 3, 4]
>>> hamming_loss(y_true, y_pred)
0.25
In the multilabel case with binary label indicators:
>>> hamming_loss(np.array([[0, 1], [1, 1]]), np.zeros((2, 2)))
0.75
"""
y_type, y_true, y_pred = _check_targets(y_true, y_pred)
if classes is None:
classes = unique_labels(y_true, y_pred)
else:
classes = np.asarray(classes)
if y_type.startswith('multilabel'):
n_differences = count_nonzero(y_true - y_pred)
return (n_differences / (y_true.shape[0] * len(classes)))
elif y_type in ["binary", "multiclass"]:
return sp_hamming(y_true, y_pred)
else:
raise ValueError("{0} is not supported".format(y_type))
def log_loss(y_true, y_pred, eps=1e-15, normalize=True, sample_weight=None):
"""Log loss, aka logistic loss or cross-entropy loss.
This is the loss function used in (multinomial) logistic regression
and extensions of it such as neural networks, defined as the negative
log-likelihood of the true labels given a probabilistic classifier's
predictions. For a single sample with true label yt in {0,1} and
estimated probability yp that yt = 1, the log loss is
-log P(yt|yp) = -(yt log(yp) + (1 - yt) log(1 - yp))
Read more in the :ref:`User Guide <log_loss>`.
Parameters
----------
y_true : array-like or label indicator matrix
Ground truth (correct) labels for n_samples samples.
y_pred : array-like of float, shape = (n_samples, n_classes)
Predicted probabilities, as returned by a classifier's
predict_proba method.
eps : float
Log loss is undefined for p=0 or p=1, so probabilities are
clipped to max(eps, min(1 - eps, p)).
normalize : bool, optional (default=True)
If true, return the mean loss per sample.
Otherwise, return the sum of the per-sample losses.
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
loss : float
Examples
--------
>>> log_loss(["spam", "ham", "ham", "spam"], # doctest: +ELLIPSIS
... [[.1, .9], [.9, .1], [.8, .2], [.35, .65]])
0.21616...
References
----------
C.M. Bishop (2006). Pattern Recognition and Machine Learning. Springer,
p. 209.
Notes
-----
The logarithm used is the natural logarithm (base-e).
"""
lb = LabelBinarizer()
T = lb.fit_transform(y_true)
if T.shape[1] == 1:
T = np.append(1 - T, T, axis=1)
# Clipping
Y = np.clip(y_pred, eps, 1 - eps)
# This happens in cases when elements in y_pred have type "str".
if not isinstance(Y, np.ndarray):
raise ValueError("y_pred should be an array of floats.")
# If y_pred is of single dimension, assume y_true to be binary
# and then check.
if Y.ndim == 1:
Y = Y[:, np.newaxis]
if Y.shape[1] == 1:
Y = np.append(1 - Y, Y, axis=1)
# Check if dimensions are consistent.
check_consistent_length(T, Y)
T = check_array(T)
Y = check_array(Y)
if T.shape[1] != Y.shape[1]:
raise ValueError("y_true and y_pred have different number of classes "
"%d, %d" % (T.shape[1], Y.shape[1]))
# Renormalize
Y /= Y.sum(axis=1)[:, np.newaxis]
loss = -(T * np.log(Y)).sum(axis=1)
return _weighted_sum(loss, sample_weight, normalize)
def hinge_loss(y_true, pred_decision, labels=None, sample_weight=None):
"""Average hinge loss (non-regularized)
In binary class case, assuming labels in y_true are encoded with +1 and -1,
when a prediction mistake is made, ``margin = y_true * pred_decision`` is
always negative (since the signs disagree), implying ``1 - margin`` is
always greater than 1. The cumulated hinge loss is therefore an upper
bound of the number of mistakes made by the classifier.
In multiclass case, the function expects that either all the labels are
included in y_true or an optional labels argument is provided which
contains all the labels. The multilabel margin is calculated according
to Crammer-Singer's method. As in the binary case, the cumulated hinge loss
is an upper bound of the number of mistakes made by the classifier.
Read more in the :ref:`User Guide <hinge_loss>`.
Parameters
----------
y_true : array, shape = [n_samples]
True target, consisting of integers of two values. The positive label
must be greater than the negative label.
pred_decision : array, shape = [n_samples] or [n_samples, n_classes]
Predicted decisions, as output by decision_function (floats).
labels : array, optional, default None
Contains all the labels for the problem. Used in multiclass hinge loss.
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
loss : float
References
----------
.. [1] `Wikipedia entry on the Hinge loss
<http://en.wikipedia.org/wiki/Hinge_loss>`_
.. [2] Koby Crammer, Yoram Singer. On the Algorithmic
Implementation of Multiclass Kernel-based Vector
Machines. Journal of Machine Learning Research 2,
(2001), 265-292
.. [3] `L1 AND L2 Regularization for Multiclass Hinge Loss Models
by Robert C. Moore, John DeNero.
<http://www.ttic.edu/sigml/symposium2011/papers/
Moore+DeNero_Regularization.pdf>`_
Examples
--------
>>> from sklearn import svm
>>> from sklearn.metrics import hinge_loss
>>> X = [[0], [1]]
>>> y = [-1, 1]
>>> est = svm.LinearSVC(random_state=0)
>>> est.fit(X, y)
LinearSVC(C=1.0, class_weight=None, dual=True, fit_intercept=True,
intercept_scaling=1, loss='squared_hinge', max_iter=1000,
multi_class='ovr', penalty='l2', random_state=0, tol=0.0001,
verbose=0)
>>> pred_decision = est.decision_function([[-2], [3], [0.5]])
>>> pred_decision # doctest: +ELLIPSIS
array([-2.18..., 2.36..., 0.09...])
>>> hinge_loss([-1, 1, 1], pred_decision) # doctest: +ELLIPSIS
0.30...
In the multiclass case:
>>> X = np.array([[0], [1], [2], [3]])
>>> Y = np.array([0, 1, 2, 3])
>>> labels = np.array([0, 1, 2, 3])
>>> est = svm.LinearSVC()
>>> est.fit(X, Y)
LinearSVC(C=1.0, class_weight=None, dual=True, fit_intercept=True,
intercept_scaling=1, loss='squared_hinge', max_iter=1000,
multi_class='ovr', penalty='l2', random_state=None, tol=0.0001,
verbose=0)
>>> pred_decision = est.decision_function([[-1], [2], [3]])
>>> y_true = [0, 2, 3]
>>> hinge_loss(y_true, pred_decision, labels) #doctest: +ELLIPSIS
0.56...
"""
check_consistent_length(y_true, pred_decision, sample_weight)
pred_decision = check_array(pred_decision, ensure_2d=False)
y_true = column_or_1d(y_true)
y_true_unique = np.unique(y_true)
if y_true_unique.size > 2:
if (labels is None and pred_decision.ndim > 1 and
(np.size(y_true_unique) != pred_decision.shape[1])):
raise ValueError("Please include all labels in y_true "
"or pass labels as third argument")
if labels is None:
labels = y_true_unique
le = LabelEncoder()
le.fit(labels)
y_true = le.transform(y_true)
mask = np.ones_like(pred_decision, dtype=bool)
mask[np.arange(y_true.shape[0]), y_true] = False
margin = pred_decision[~mask]
margin -= np.max(pred_decision[mask].reshape(y_true.shape[0], -1),
axis=1)
else:
# Handles binary class case
# this code assumes that positive and negative labels
# are encoded as +1 and -1 respectively
pred_decision = column_or_1d(pred_decision)
pred_decision = np.ravel(pred_decision)
lbin = LabelBinarizer(neg_label=-1)
y_true = lbin.fit_transform(y_true)[:, 0]
try:
margin = y_true * pred_decision
except TypeError:
raise TypeError("pred_decision should be an array of floats.")
losses = 1 - margin
# The hinge_loss doesn't penalize good enough predictions.
losses[losses <= 0] = 0
return np.average(losses, weights=sample_weight)
def _check_binary_probabilistic_predictions(y_true, y_prob):
"""Check that y_true is binary and y_prob contains valid probabilities"""
check_consistent_length(y_true, y_prob)
labels = np.unique(y_true)
if len(labels) != 2:
raise ValueError("Only binary classification is supported. "
"Provided labels %s." % labels)
if y_prob.max() > 1:
raise ValueError("y_prob contains values greater than 1.")
if y_prob.min() < 0:
raise ValueError("y_prob contains values less than 0.")
return label_binarize(y_true, labels)[:, 0]
def brier_score_loss(y_true, y_prob, sample_weight=None, pos_label=None):
"""Compute the Brier score.
The smaller the Brier score, the better, hence the naming with "loss".
Across all items in a set N predictions, the Brier score measures the
mean squared difference between (1) the predicted probability assigned
to the possible outcomes for item i, and (2) the actual outcome.
Therefore, the lower the Brier score is for a set of predictions, the
better the predictions are calibrated. Note that the Brier score always
takes on a value between zero and one, since this is the largest
possible difference between a predicted probability (which must be
between zero and one) and the actual outcome (which can take on values
of only 0 and 1).
The Brier score is appropriate for binary and categorical outcomes that
can be structured as true or false, but is inappropriate for ordinal
variables which can take on three or more values (this is because the
Brier score assumes that all possible outcomes are equivalently
"distant" from one another). Which label is considered to be the positive
label is controlled via the parameter pos_label, which defaults to 1.
Read more in the :ref:`User Guide <calibration>`.
Parameters
----------
y_true : array, shape (n_samples,)
True targets.
y_prob : array, shape (n_samples,)
Probabilities of the positive class.
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
pos_label : int (default: None)
Label of the positive class. If None, the maximum label is used as
positive class
Returns
-------
score : float
Brier score
Examples
--------
>>> import numpy as np
>>> from sklearn.metrics import brier_score_loss
>>> y_true = np.array([0, 1, 1, 0])
>>> y_true_categorical = np.array(["spam", "ham", "ham", "spam"])
>>> y_prob = np.array([0.1, 0.9, 0.8, 0.3])
>>> brier_score_loss(y_true, y_prob) # doctest: +ELLIPSIS
0.037...
>>> brier_score_loss(y_true, 1-y_prob, pos_label=0) # doctest: +ELLIPSIS
0.037...
>>> brier_score_loss(y_true_categorical, y_prob, \
pos_label="ham") # doctest: +ELLIPSIS
0.037...
>>> brier_score_loss(y_true, np.array(y_prob) > 0.5)
0.0
References
----------
http://en.wikipedia.org/wiki/Brier_score
"""
y_true = column_or_1d(y_true)
y_prob = column_or_1d(y_prob)
if pos_label is None:
pos_label = y_true.max()
y_true = np.array(y_true == pos_label, int)
y_true = _check_binary_probabilistic_predictions(y_true, y_prob)
return np.average((y_true - y_prob) ** 2, weights=sample_weight)
|
bsd-3-clause
|
robjstan/modelling-course
|
notebooks/python/f02.py
|
1
|
2889
|
# setup ipython environment
from ipywidgets import interact, fixed
# setup python environment
import numpy as np
import matplotlib.pyplot as plt
plt.style.use('ggplot')
def plot_expm(r:(0,4,0.1)=1.1, n0=fixed(100)):
nt = [r**t * n0 for t in range(10)]
plt.figure(figsize=[9,4])
plt.plot(nt, lw=2)
plt.xlabel('generation number')
plt.ylabel('population size')
plt.ylim(0, max(nt)*1.1)
plt.show()
def logistic_map(xt, r):
return r*xt*(1-xt)
def gen_lm(xt, r, tmax=1):
for t in range(tmax+1):
yield xt
xt = logistic_map(xt, r)
def plot_lm(r:(0,2,0.1)=1.1):
plt.figure(figsize=[9,4])
for ic in np.linspace(0.1, 0.9, 5):
plt.plot(list(gen_lm(ic, r, 20)), lw=2)
plt.xlabel('generation number')
plt.ylabel('population size')
plt.ylim(0,1)
plt.show()
def plot_lm_web(r:(0,2,0.05)=1.1, ic:(0,1,0.01)=0.5, n_iter:(1,100)=1):
plt.figure(figsize=[8,7])
plt.plot([0,1], [0,1], 'k') # x=y line
# logistic map
x_pts = np.linspace(0,1,30)
plt.plot(x_pts, logistic_map(x_pts, r), lw=2)
# plot 'web'
lm_pts = [v for v in gen_lm(ic, r, n_iter) for _ in (0, 1)]
plt.plot(lm_pts[:-2], [0]+lm_pts[2:-1], lw=1)
plt.xlabel('x_t')
plt.ylabel('x_{t+1}')
plt.xlim(0,1)
plt.ylim(0,1)
plt.show()
def plot_lm_bif1():
plt.figure(figsize=[9,4])
c = plt.rcParams['axes.color_cycle']
r_pts1 = np.linspace(0.001, 1, 30)
r_pts2 = np.linspace(1, 3, 30)
plt.plot(r_pts1, [0]*len(r_pts1), ls='-', lw=2, color=c[2])
plt.plot(r_pts2, [0]*len(r_pts2), ls='--', lw=2, color=c[2])
plt.plot(r_pts1, 1-1/r_pts1, ls='--', lw=2, color=c[5])
plt.plot(r_pts2, 1-1/r_pts2, ls='-', lw=2, color=c[5])
plt.xlabel('$r$')
plt.ylabel('$x*$')
plt.ylim(-1,1)
plt.show()
def plot_lm_bif2():
def lm_ss(x0, r):
gen = gen_lm(0.1, r, 1000)
[next(gen) for _ in range(800)] #ignore first 800 pts
return list(set([round(pt,3) for pt in gen]))
def lm_unique_points_to_plot(r_pts):
for r in r_pts:
for pt in lm_ss(0.1, r):
yield (r, pt)
r_pts = np.linspace(2.8,4,1000)
lm_pts = list(zip(*lm_unique_points_to_plot(r_pts)))
plt.figure(figsize=[9,5])
plt.plot(lm_pts[0], lm_pts[1], ',')
plt.xlabel('$r$')
plt.xlim(2.8,4)
plt.ylabel('$x*$')
plt.show()
# plant model
def gen_pm(a, b, g, s, p0, p1, tmax=2):
p = [p0, p1]
yield p0; yield p1
for t in range(tmax-1):
pn = a*s*g*p[1] + b*s**2*(1-a)*g*p[0]
yield pn
p = [p[1], pn]
def plot_pm(a:(0,1,0.05)=1, b:(0,1,0.05)=1, g:(0,2,0.1)=1, s:(0,1,0.05)=1):
plt.figure(figsize=[8,7])
pts = list(gen_pm(a, b, g, s, 0, 10, 20))
plt.plot(pts, lw=2)
plt.xlabel('generation number')
plt.ylabel('population size')
plt.ylim(0, max(pts)*1.1)
plt.show()
|
mit
|
ikaee/bfr-attendant
|
facerecognitionlibrary/jni-build/jni/include/tensorflow/examples/learn/text_classification_character_cnn.py
|
18
|
4289
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This is an example of using convolutional networks over characters for DBpedia dataset to predict class from description of an entity.
This model is similar to one described in this paper:
"Character-level Convolutional Networks for Text Classification"
http://arxiv.org/abs/1509.01626
and is somewhat alternative to the Lua code from here:
https://github.com/zhangxiangxiao/Crepe
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import sys
import numpy as np
import pandas
from sklearn import metrics
import tensorflow as tf
learn = tf.contrib.learn
FLAGS = None
MAX_DOCUMENT_LENGTH = 100
N_FILTERS = 10
FILTER_SHAPE1 = [20, 256]
FILTER_SHAPE2 = [20, N_FILTERS]
POOLING_WINDOW = 4
POOLING_STRIDE = 2
def char_cnn_model(features, target):
"""Character level convolutional neural network model to predict classes."""
target = tf.one_hot(target, 15, 1, 0)
byte_list = tf.reshape(
tf.one_hot(features, 256), [-1, MAX_DOCUMENT_LENGTH, 256, 1])
with tf.variable_scope('CNN_Layer1'):
# Apply Convolution filtering on input sequence.
conv1 = tf.contrib.layers.convolution2d(
byte_list, N_FILTERS, FILTER_SHAPE1, padding='VALID')
# Add a RELU for non linearity.
conv1 = tf.nn.relu(conv1)
# Max pooling across output of Convolution+Relu.
pool1 = tf.nn.max_pool(
conv1,
ksize=[1, POOLING_WINDOW, 1, 1],
strides=[1, POOLING_STRIDE, 1, 1],
padding='SAME')
# Transpose matrix so that n_filters from convolution becomes width.
pool1 = tf.transpose(pool1, [0, 1, 3, 2])
with tf.variable_scope('CNN_Layer2'):
# Second level of convolution filtering.
conv2 = tf.contrib.layers.convolution2d(
pool1, N_FILTERS, FILTER_SHAPE2, padding='VALID')
# Max across each filter to get useful features for classification.
pool2 = tf.squeeze(tf.reduce_max(conv2, 1), squeeze_dims=[1])
# Apply regular WX + B and classification.
logits = tf.contrib.layers.fully_connected(pool2, 15, activation_fn=None)
loss = tf.losses.softmax_cross_entropy(target, logits)
train_op = tf.contrib.layers.optimize_loss(
loss,
tf.contrib.framework.get_global_step(),
optimizer='Adam',
learning_rate=0.01)
return ({
'class': tf.argmax(logits, 1),
'prob': tf.nn.softmax(logits)
}, loss, train_op)
def main(unused_argv):
# Prepare training and testing data
dbpedia = learn.datasets.load_dataset(
'dbpedia', test_with_fake_data=FLAGS.test_with_fake_data, size='large')
x_train = pandas.DataFrame(dbpedia.train.data)[1]
y_train = pandas.Series(dbpedia.train.target)
x_test = pandas.DataFrame(dbpedia.test.data)[1]
y_test = pandas.Series(dbpedia.test.target)
# Process vocabulary
char_processor = learn.preprocessing.ByteProcessor(MAX_DOCUMENT_LENGTH)
x_train = np.array(list(char_processor.fit_transform(x_train)))
x_test = np.array(list(char_processor.transform(x_test)))
# Build model
classifier = learn.Estimator(model_fn=char_cnn_model)
# Train and predict
classifier.fit(x_train, y_train, steps=100)
y_predicted = [
p['class'] for p in classifier.predict(
x_test, as_iterable=True)
]
score = metrics.accuracy_score(y_test, y_predicted)
print('Accuracy: {0:f}'.format(score))
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument(
'--test_with_fake_data',
default=False,
help='Test the example code with fake data.',
action='store_true')
FLAGS, unparsed = parser.parse_known_args()
tf.app.run(main=main, argv=[sys.argv[0]] + unparsed)
|
apache-2.0
|
rohit21122012/DCASE2013
|
runs/2016/baseline64/src/dataset.py
|
37
|
78389
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import urllib2
import socket
import locale
import zipfile
import tarfile
from sklearn.cross_validation import StratifiedShuffleSplit, KFold
from ui import *
from general import *
from files import *
class Dataset(object):
"""Dataset base class.
The specific dataset classes are inherited from this class, and only needed methods are reimplemented.
"""
def __init__(self, data_path='data', name='dataset'):
"""__init__ method.
Parameters
----------
data_path : str
Basepath where the dataset is stored.
(Default value='data')
"""
# Folder name for dataset
self.name = name
# Path to the dataset
self.local_path = os.path.join(data_path, self.name)
# Create the dataset path if does not exist
if not os.path.isdir(self.local_path):
os.makedirs(self.local_path)
# Evaluation setup folder
self.evaluation_setup_folder = 'evaluation_setup'
# Path to the folder containing evaluation setup files
self.evaluation_setup_path = os.path.join(self.local_path, self.evaluation_setup_folder)
# Meta data file, csv-format
self.meta_filename = 'meta.txt'
# Path to meta data file
self.meta_file = os.path.join(self.local_path, self.meta_filename)
# Hash file to detect removed or added files
self.filelisthash_filename = 'filelist.hash'
# Number of evaluation folds
self.evaluation_folds = 1
# List containing dataset package items
# Define this in the inherited class.
# Format:
# {
# 'remote_package': download_url,
# 'local_package': os.path.join(self.local_path, 'name_of_downloaded_package'),
# 'local_audio_path': os.path.join(self.local_path, 'name_of_folder_containing_audio_files'),
# }
self.package_list = []
# List of audio files
self.files = None
# List of meta data dict
self.meta_data = None
# Training meta data for folds
self.evaluation_data_train = {}
# Testing meta data for folds
self.evaluation_data_test = {}
# Recognized audio extensions
self.audio_extensions = {'wav', 'flac'}
# Info fields for dataset
self.authors = ''
self.name_remote = ''
self.url = ''
self.audio_source = ''
self.audio_type = ''
self.recording_device_model = ''
self.microphone_model = ''
@property
def audio_files(self):
"""Get all audio files in the dataset
Parameters
----------
Nothing
Returns
-------
filelist : list
File list with absolute paths
"""
if self.files is None:
self.files = []
for item in self.package_list:
path = item['local_audio_path']
if path:
l = os.listdir(path)
for f in l:
file_name, file_extension = os.path.splitext(f)
if file_extension[1:] in self.audio_extensions:
self.files.append(os.path.abspath(os.path.join(path, f)))
self.files.sort()
return self.files
@property
def audio_file_count(self):
"""Get number of audio files in dataset
Parameters
----------
Nothing
Returns
-------
filecount : int
Number of audio files
"""
return len(self.audio_files)
@property
def meta(self):
"""Get meta data for dataset. If not already read from disk, data is read and returned.
Parameters
----------
Nothing
Returns
-------
meta_data : list
List containing meta data as dict.
Raises
-------
IOError
meta file not found.
"""
if self.meta_data is None:
self.meta_data = []
meta_id = 0
if os.path.isfile(self.meta_file):
f = open(self.meta_file, 'rt')
try:
reader = csv.reader(f, delimiter='\t')
for row in reader:
if len(row) == 2:
# Scene meta
self.meta_data.append({'file': row[0], 'scene_label': row[1].rstrip()})
elif len(row) == 4:
# Audio tagging meta
self.meta_data.append(
{'file': row[0], 'scene_label': row[1].rstrip(), 'tag_string': row[2].rstrip(),
'tags': row[3].split(';')})
elif len(row) == 6:
# Event meta
self.meta_data.append({'file': row[0],
'scene_label': row[1].rstrip(),
'event_onset': float(row[2]),
'event_offset': float(row[3]),
'event_label': row[4].rstrip(),
'event_type': row[5].rstrip(),
'id': meta_id
})
meta_id += 1
finally:
f.close()
else:
raise IOError("Meta file not found [%s]" % self.meta_file)
return self.meta_data
@property
def meta_count(self):
"""Number of meta data items.
Parameters
----------
Nothing
Returns
-------
meta_item_count : int
Meta data item count
"""
return len(self.meta)
@property
def fold_count(self):
"""Number of fold in the evaluation setup.
Parameters
----------
Nothing
Returns
-------
fold_count : int
Number of folds
"""
return self.evaluation_folds
@property
def scene_labels(self):
"""List of unique scene labels in the meta data.
Parameters
----------
Nothing
Returns
-------
labels : list
List of scene labels in alphabetical order.
"""
labels = []
for item in self.meta:
if 'scene_label' in item and item['scene_label'] not in labels:
labels.append(item['scene_label'])
labels.sort()
return labels
@property
def scene_label_count(self):
"""Number of unique scene labels in the meta data.
Parameters
----------
Nothing
Returns
-------
scene_label_count : int
Number of unique scene labels.
"""
return len(self.scene_labels)
@property
def event_labels(self):
"""List of unique event labels in the meta data.
Parameters
----------
Nothing
Returns
-------
labels : list
List of event labels in alphabetical order.
"""
labels = []
for item in self.meta:
if 'event_label' in item and item['event_label'].rstrip() not in labels:
labels.append(item['event_label'].rstrip())
labels.sort()
return labels
@property
def event_label_count(self):
"""Number of unique event labels in the meta data.
Parameters
----------
Nothing
Returns
-------
event_label_count : int
Number of unique event labels
"""
return len(self.event_labels)
@property
def audio_tags(self):
"""List of unique audio tags in the meta data.
Parameters
----------
Nothing
Returns
-------
labels : list
List of audio tags in alphabetical order.
"""
tags = []
for item in self.meta:
if 'tags' in item:
for tag in item['tags']:
if tag and tag not in tags:
tags.append(tag)
tags.sort()
return tags
@property
def audio_tag_count(self):
"""Number of unique audio tags in the meta data.
Parameters
----------
Nothing
Returns
-------
audio_tag_count : int
Number of unique audio tags
"""
return len(self.audio_tags)
def __getitem__(self, i):
"""Getting meta data item
Parameters
----------
i : int
item id
Returns
-------
meta_data : dict
Meta data item
"""
if i < len(self.meta):
return self.meta[i]
else:
return None
def __iter__(self):
"""Iterator for meta data items
Parameters
----------
Nothing
Returns
-------
Nothing
"""
i = 0
meta = self[i]
# yield window while it's valid
while meta is not None:
yield meta
# get next item
i += 1
meta = self[i]
@staticmethod
def print_bytes(num_bytes):
"""Output number of bytes according to locale and with IEC binary prefixes
Parameters
----------
num_bytes : int > 0 [scalar]
Bytes
Returns
-------
bytes : str
Human readable string
"""
KiB = 1024
MiB = KiB * KiB
GiB = KiB * MiB
TiB = KiB * GiB
PiB = KiB * TiB
EiB = KiB * PiB
ZiB = KiB * EiB
YiB = KiB * ZiB
locale.setlocale(locale.LC_ALL, '')
output = locale.format("%d", num_bytes, grouping=True) + ' bytes'
if num_bytes > YiB:
output += ' (%.4g YiB)' % (num_bytes / YiB)
elif num_bytes > ZiB:
output += ' (%.4g ZiB)' % (num_bytes / ZiB)
elif num_bytes > EiB:
output += ' (%.4g EiB)' % (num_bytes / EiB)
elif num_bytes > PiB:
output += ' (%.4g PiB)' % (num_bytes / PiB)
elif num_bytes > TiB:
output += ' (%.4g TiB)' % (num_bytes / TiB)
elif num_bytes > GiB:
output += ' (%.4g GiB)' % (num_bytes / GiB)
elif num_bytes > MiB:
output += ' (%.4g MiB)' % (num_bytes / MiB)
elif num_bytes > KiB:
output += ' (%.4g KiB)' % (num_bytes / KiB)
return output
def download(self):
"""Download dataset over the internet to the local path
Parameters
----------
Nothing
Returns
-------
Nothing
Raises
-------
IOError
Download failed.
"""
section_header('Download dataset')
for item in self.package_list:
try:
if item['remote_package'] and not os.path.isfile(item['local_package']):
data = None
req = urllib2.Request(item['remote_package'], data, {})
handle = urllib2.urlopen(req)
if "Content-Length" in handle.headers.items():
size = int(handle.info()["Content-Length"])
else:
size = None
actualSize = 0
blocksize = 64 * 1024
tmp_file = os.path.join(self.local_path, 'tmp_file')
fo = open(tmp_file, "wb")
terminate = False
while not terminate:
block = handle.read(blocksize)
actualSize += len(block)
if size:
progress(title_text=os.path.split(item['local_package'])[1],
percentage=actualSize / float(size),
note=self.print_bytes(actualSize))
else:
progress(title_text=os.path.split(item['local_package'])[1],
note=self.print_bytes(actualSize))
if len(block) == 0:
break
fo.write(block)
fo.close()
os.rename(tmp_file, item['local_package'])
except (urllib2.URLError, socket.timeout), e:
try:
fo.close()
except:
raise IOError('Download failed [%s]' % (item['remote_package']))
foot()
def extract(self):
"""Extract the dataset packages
Parameters
----------
Nothing
Returns
-------
Nothing
"""
section_header('Extract dataset')
for item_id, item in enumerate(self.package_list):
if item['local_package']:
if item['local_package'].endswith('.zip'):
with zipfile.ZipFile(item['local_package'], "r") as z:
# Trick to omit first level folder
parts = []
for name in z.namelist():
if not name.endswith('/'):
parts.append(name.split('/')[:-1])
prefix = os.path.commonprefix(parts) or ''
if prefix:
if len(prefix) > 1:
prefix_ = list()
prefix_.append(prefix[0])
prefix = prefix_
prefix = '/'.join(prefix) + '/'
offset = len(prefix)
# Start extraction
members = z.infolist()
file_count = 1
for i, member in enumerate(members):
if len(member.filename) > offset:
member.filename = member.filename[offset:]
if not os.path.isfile(os.path.join(self.local_path, member.filename)):
z.extract(member, self.local_path)
progress(title_text='Extracting ['+str(item_id)+'/'+str(len(self.package_list))+']', percentage=(file_count / float(len(members))),
note=member.filename)
file_count += 1
elif item['local_package'].endswith('.tar.gz'):
tar = tarfile.open(item['local_package'], "r:gz")
for i, tar_info in enumerate(tar):
if not os.path.isfile(os.path.join(self.local_path, tar_info.name)):
tar.extract(tar_info, self.local_path)
progress(title_text='Extracting ['+str(item_id)+'/'+str(len(self.package_list))+']', note=tar_info.name)
tar.members = []
tar.close()
foot()
def on_after_extract(self):
"""Dataset meta data preparation, this will be overloaded in dataset specific classes
Parameters
----------
Nothing
Returns
-------
Nothing
"""
pass
def get_filelist(self):
"""List of files under local_path
Parameters
----------
Nothing
Returns
-------
filelist: list
File list
"""
filelist = []
for path, subdirs, files in os.walk(self.local_path):
for name in files:
filelist.append(os.path.join(path, name))
return filelist
def check_filelist(self):
"""Generates hash from file list and check does it matches with one saved in filelist.hash.
If some files have been deleted or added, checking will result False.
Parameters
----------
Nothing
Returns
-------
result: bool
Result
"""
if os.path.isfile(os.path.join(self.local_path, self.filelisthash_filename)):
hash = load_text(os.path.join(self.local_path, self.filelisthash_filename))[0]
if hash != get_parameter_hash(sorted(self.get_filelist())):
return False
else:
return True
else:
return False
def save_filelist_hash(self):
"""Generates file list hash, and saves it as filelist.hash under local_path.
Parameters
----------
Nothing
Returns
-------
Nothing
"""
filelist = self.get_filelist()
filelist_hash_not_found = True
for file in filelist:
if self.filelisthash_filename in file:
filelist_hash_not_found = False
if filelist_hash_not_found:
filelist.append(os.path.join(self.local_path, self.filelisthash_filename))
save_text(os.path.join(self.local_path, self.filelisthash_filename), get_parameter_hash(sorted(filelist)))
def fetch(self):
"""Download, extract and prepare the dataset.
Parameters
----------
Nothing
Returns
-------
Nothing
"""
if not self.check_filelist():
self.download()
self.extract()
self.on_after_extract()
self.save_filelist_hash()
return self
def train(self, fold=0):
"""List of training items.
Parameters
----------
fold : int > 0 [scalar]
Fold id, if zero all meta data is returned.
(Default value=0)
Returns
-------
list : list of dicts
List containing all meta data assigned to training set for given fold.
"""
if fold not in self.evaluation_data_train:
self.evaluation_data_train[fold] = []
if fold > 0:
with open(os.path.join(self.evaluation_setup_path, 'fold' + str(fold) + '_train.txt'), 'rt') as f:
for row in csv.reader(f, delimiter='\t'):
if len(row) == 2:
# Scene meta
self.evaluation_data_train[fold].append({
'file': self.relative_to_absolute_path(row[0]),
'scene_label': row[1]
})
elif len(row) == 4:
# Audio tagging meta
self.evaluation_data_train[fold].append({
'file': self.relative_to_absolute_path(row[0]),
'scene_label': row[1],
'tag_string': row[2],
'tags': row[3].split(';')
})
elif len(row) == 5:
# Event meta
self.evaluation_data_train[fold].append({
'file': self.relative_to_absolute_path(row[0]),
'scene_label': row[1],
'event_onset': float(row[2]),
'event_offset': float(row[3]),
'event_label': row[4]
})
else:
data = []
for item in self.meta:
if 'event_label' in item:
data.append({'file': self.relative_to_absolute_path(item['file']),
'scene_label': item['scene_label'],
'event_onset': item['event_onset'],
'event_offset': item['event_offset'],
'event_label': item['event_label'],
})
else:
data.append({'file': self.relative_to_absolute_path(item['file']),
'scene_label': item['scene_label']
})
self.evaluation_data_train[0] = data
return self.evaluation_data_train[fold]
def test(self, fold=0):
"""List of testing items.
Parameters
----------
fold : int > 0 [scalar]
Fold id, if zero all meta data is returned.
(Default value=0)
Returns
-------
list : list of dicts
List containing all meta data assigned to testing set for given fold.
"""
if fold not in self.evaluation_data_test:
self.evaluation_data_test[fold] = []
if fold > 0:
with open(os.path.join(self.evaluation_setup_path, 'fold' + str(fold) + '_test.txt'), 'rt') as f:
for row in csv.reader(f, delimiter='\t'):
self.evaluation_data_test[fold].append({'file': self.relative_to_absolute_path(row[0])})
else:
data = []
files = []
for item in self.meta:
if self.relative_to_absolute_path(item['file']) not in files:
data.append({'file': self.relative_to_absolute_path(item['file'])})
files.append(self.relative_to_absolute_path(item['file']))
self.evaluation_data_test[fold] = data
return self.evaluation_data_test[fold]
def folds(self, mode='folds'):
"""List of fold ids
Parameters
----------
mode : str {'folds','full'}
Fold setup type, possible values are 'folds' and 'full'. In 'full' mode fold number is set 0 and all data is used for training.
(Default value=folds)
Returns
-------
list : list of integers
Fold ids
"""
if mode == 'folds':
return range(1, self.evaluation_folds + 1)
elif mode == 'full':
return [0]
def file_meta(self, file):
"""Meta data for given file
Parameters
----------
file : str
File name
Returns
-------
list : list of dicts
List containing all meta data related to given file.
"""
file = self.absolute_to_relative(file)
file_meta = []
for item in self.meta:
if item['file'] == file:
file_meta.append(item)
return file_meta
def relative_to_absolute_path(self, path):
"""Converts relative path into absolute path.
Parameters
----------
path : str
Relative path
Returns
-------
path : str
Absolute path
"""
return os.path.abspath(os.path.join(self.local_path, path))
def absolute_to_relative(self, path):
"""Converts absolute path into relative path.
Parameters
----------
path : str
Absolute path
Returns
-------
path : str
Relative path
"""
if path.startswith(os.path.abspath(self.local_path)):
return os.path.relpath(path, self.local_path)
else:
return path
# =====================================================
# DCASE 2016
# =====================================================
class TUTAcousticScenes_2016_DevelopmentSet(Dataset):
"""TUT Acoustic scenes 2016 development dataset
This dataset is used in DCASE2016 - Task 1, Acoustic scene classification
"""
def __init__(self, data_path='data'):
Dataset.__init__(self, data_path=data_path, name='TUT-acoustic-scenes-2016-development')
self.authors = 'Annamaria Mesaros, Toni Heittola, and Tuomas Virtanen'
self.name_remote = 'TUT Acoustic Scenes 2016, development dataset'
self.url = 'https://zenodo.org/record/45739'
self.audio_source = 'Field recording'
self.audio_type = 'Natural'
self.recording_device_model = 'Roland Edirol R-09'
self.microphone_model = 'Soundman OKM II Klassik/studio A3 electret microphone'
self.evaluation_folds = 4
self.package_list = [
{
'remote_package': None,
'local_package': None,
'local_audio_path': os.path.join(self.local_path, 'audio'),
},
{
'remote_package': 'https://zenodo.org/record/45739/files/TUT-acoustic-scenes-2016-development.doc.zip',
'local_package': os.path.join(self.local_path, 'TUT-acoustic-scenes-2016-development.doc.zip'),
'local_audio_path': os.path.join(self.local_path, 'audio'),
},
{
'remote_package': 'https://zenodo.org/record/45739/files/TUT-acoustic-scenes-2016-development.meta.zip',
'local_package': os.path.join(self.local_path, 'TUT-acoustic-scenes-2016-development.meta.zip'),
'local_audio_path': os.path.join(self.local_path, 'audio'),
},
{
'remote_package': 'https://zenodo.org/record/45739/files/TUT-acoustic-scenes-2016-development.audio.1.zip',
'local_package': os.path.join(self.local_path, 'TUT-acoustic-scenes-2016-development.audio.1.zip'),
'local_audio_path': os.path.join(self.local_path, 'audio'),
},
{
'remote_package': 'https://zenodo.org/record/45739/files/TUT-acoustic-scenes-2016-development.audio.2.zip',
'local_package': os.path.join(self.local_path, 'TUT-acoustic-scenes-2016-development.audio.2.zip'),
'local_audio_path': os.path.join(self.local_path, 'audio'),
},
{
'remote_package': 'https://zenodo.org/record/45739/files/TUT-acoustic-scenes-2016-development.audio.3.zip',
'local_package': os.path.join(self.local_path, 'TUT-acoustic-scenes-2016-development.audio.3.zip'),
'local_audio_path': os.path.join(self.local_path, 'audio'),
},
{
'remote_package': 'https://zenodo.org/record/45739/files/TUT-acoustic-scenes-2016-development.audio.4.zip',
'local_package': os.path.join(self.local_path, 'TUT-acoustic-scenes-2016-development.audio.4.zip'),
'local_audio_path': os.path.join(self.local_path, 'audio'),
},
{
'remote_package': 'https://zenodo.org/record/45739/files/TUT-acoustic-scenes-2016-development.audio.5.zip',
'local_package': os.path.join(self.local_path, 'TUT-acoustic-scenes-2016-development.audio.5.zip'),
'local_audio_path': os.path.join(self.local_path, 'audio'),
},
{
'remote_package': 'https://zenodo.org/record/45739/files/TUT-acoustic-scenes-2016-development.audio.6.zip',
'local_package': os.path.join(self.local_path, 'TUT-acoustic-scenes-2016-development.audio.6.zip'),
'local_audio_path': os.path.join(self.local_path, 'audio'),
},
{
'remote_package': 'https://zenodo.org/record/45739/files/TUT-acoustic-scenes-2016-development.audio.7.zip',
'local_package': os.path.join(self.local_path, 'TUT-acoustic-scenes-2016-development.audio.7.zip'),
'local_audio_path': os.path.join(self.local_path, 'audio'),
},
{
'remote_package': 'https://zenodo.org/record/45739/files/TUT-acoustic-scenes-2016-development.audio.8.zip',
'local_package': os.path.join(self.local_path, 'TUT-acoustic-scenes-2016-development.audio.8.zip'),
'local_audio_path': os.path.join(self.local_path, 'audio'),
}
]
def on_after_extract(self):
"""After dataset packages are downloaded and extracted, meta-files are checked.
Parameters
----------
nothing
Returns
-------
nothing
"""
if not os.path.isfile(self.meta_file):
section_header('Generating meta file for dataset')
meta_data = {}
for fold in xrange(1, self.evaluation_folds):
# Read train files in
train_filename = os.path.join(self.evaluation_setup_path, 'fold' + str(fold) + '_train.txt')
f = open(train_filename, 'rt')
reader = csv.reader(f, delimiter='\t')
for row in reader:
if row[0] not in meta_data:
meta_data[row[0]] = row[1]
f.close()
# Read evaluation files in
eval_filename = os.path.join(self.evaluation_setup_path, 'fold' + str(fold) + '_evaluate.txt')
f = open(eval_filename, 'rt')
reader = csv.reader(f, delimiter='\t')
for row in reader:
if row[0] not in meta_data:
meta_data[row[0]] = row[1]
f.close()
f = open(self.meta_file, 'wt')
try:
writer = csv.writer(f, delimiter='\t')
for file in meta_data:
raw_path, raw_filename = os.path.split(file)
relative_path = self.absolute_to_relative(raw_path)
label = meta_data[file]
writer.writerow((os.path.join(relative_path, raw_filename), label))
finally:
f.close()
foot()
class TUTAcousticScenes_2016_EvaluationSet(Dataset):
"""TUT Acoustic scenes 2016 evaluation dataset
This dataset is used in DCASE2016 - Task 1, Acoustic scene classification
"""
def __init__(self, data_path='data'):
Dataset.__init__(self, data_path=data_path, name='TUT-acoustic-scenes-2016-evaluation')
self.authors = 'Annamaria Mesaros, Toni Heittola, and Tuomas Virtanen'
self.name_remote = 'TUT Acoustic Scenes 2016, evaluation dataset'
self.url = 'http://www.cs.tut.fi/sgn/arg/dcase2016/download/'
self.audio_source = 'Field recording'
self.audio_type = 'Natural'
self.recording_device_model = 'Roland Edirol R-09'
self.microphone_model = 'Soundman OKM II Klassik/studio A3 electret microphone'
self.evaluation_folds = 1
self.package_list = [
{
'remote_package': None,
'local_package': None,
'local_audio_path': os.path.join(self.local_path, 'audio'),
},
]
def on_after_extract(self):
"""After dataset packages are downloaded and extracted, meta-files are checked.
Parameters
----------
nothing
Returns
-------
nothing
"""
eval_filename = os.path.join(self.evaluation_setup_path, 'evaluate.txt')
if not os.path.isfile(self.meta_file) and os.path.isfile(eval_filename):
section_header('Generating meta file for dataset')
meta_data = {}
f = open(eval_filename, 'rt')
reader = csv.reader(f, delimiter='\t')
for row in reader:
if row[0] not in meta_data:
meta_data[row[0]] = row[1]
f.close()
f = open(self.meta_file, 'wt')
try:
writer = csv.writer(f, delimiter='\t')
for file in meta_data:
raw_path, raw_filename = os.path.split(file)
relative_path = self.absolute_to_relative(raw_path)
label = meta_data[file]
writer.writerow((os.path.join(relative_path, raw_filename), label))
finally:
f.close()
foot()
def train(self, fold=0):
raise IOError('Train setup not available.')
# TUT Sound events 2016 development and evaluation sets
class TUTSoundEvents_2016_DevelopmentSet(Dataset):
"""TUT Sound events 2016 development dataset
This dataset is used in DCASE2016 - Task 3, Sound event detection in real life audio
"""
def __init__(self, data_path='data'):
Dataset.__init__(self, data_path=data_path, name='TUT-sound-events-2016-development')
self.authors = 'Annamaria Mesaros, Toni Heittola, and Tuomas Virtanen'
self.name_remote = 'TUT Sound Events 2016, development dataset'
self.url = 'https://zenodo.org/record/45759'
self.audio_source = 'Field recording'
self.audio_type = 'Natural'
self.recording_device_model = 'Roland Edirol R-09'
self.microphone_model = 'Soundman OKM II Klassik/studio A3 electret microphone'
self.evaluation_folds = 4
self.package_list = [
{
'remote_package': None,
'local_package': None,
'local_audio_path': os.path.join(self.local_path, 'audio'),
},
{
'remote_package': None,
'local_package': None,
'local_audio_path': os.path.join(self.local_path, 'audio', 'residential_area'),
},
{
'remote_package': None,
'local_package': None,
'local_audio_path': os.path.join(self.local_path, 'audio', 'home'),
},
{
'remote_package': 'https://zenodo.org/record/45759/files/TUT-sound-events-2016-development.doc.zip',
'local_package': os.path.join(self.local_path, 'TUT-sound-events-2016-development.doc.zip'),
'local_audio_path': os.path.join(self.local_path, 'audio'),
},
{
'remote_package': 'https://zenodo.org/record/45759/files/TUT-sound-events-2016-development.meta.zip',
'local_package': os.path.join(self.local_path, 'TUT-sound-events-2016-development.meta.zip'),
'local_audio_path': os.path.join(self.local_path, 'audio'),
},
{
'remote_package': 'https://zenodo.org/record/45759/files/TUT-sound-events-2016-development.audio.zip',
'local_package': os.path.join(self.local_path, 'TUT-sound-events-2016-development.audio.zip'),
'local_audio_path': os.path.join(self.local_path, 'audio'),
},
]
def event_label_count(self, scene_label=None):
return len(self.event_labels(scene_label=scene_label))
def event_labels(self, scene_label=None):
labels = []
for item in self.meta:
if scene_label is None or item['scene_label'] == scene_label:
if 'event_label' in item and item['event_label'].rstrip() not in labels:
labels.append(item['event_label'].rstrip())
labels.sort()
return labels
def on_after_extract(self):
"""After dataset packages are downloaded and extracted, meta-files are checked.
Parameters
----------
nothing
Returns
-------
nothing
"""
if not os.path.isfile(self.meta_file):
meta_file_handle = open(self.meta_file, 'wt')
try:
writer = csv.writer(meta_file_handle, delimiter='\t')
for filename in self.audio_files:
raw_path, raw_filename = os.path.split(filename)
relative_path = self.absolute_to_relative(raw_path)
scene_label = relative_path.replace('audio', '')[1:]
base_filename, file_extension = os.path.splitext(raw_filename)
annotation_filename = os.path.join(self.local_path, relative_path.replace('audio', 'meta'), base_filename + '.ann')
if os.path.isfile(annotation_filename):
annotation_file_handle = open(annotation_filename, 'rt')
try:
annotation_file_reader = csv.reader(annotation_file_handle, delimiter='\t')
for annotation_file_row in annotation_file_reader:
writer.writerow((os.path.join(relative_path, raw_filename),
scene_label,
float(annotation_file_row[0].replace(',', '.')),
float(annotation_file_row[1].replace(',', '.')),
annotation_file_row[2], 'm'))
finally:
annotation_file_handle.close()
finally:
meta_file_handle.close()
def train(self, fold=0, scene_label=None):
if fold not in self.evaluation_data_train:
self.evaluation_data_train[fold] = {}
for scene_label_ in self.scene_labels:
if scene_label_ not in self.evaluation_data_train[fold]:
self.evaluation_data_train[fold][scene_label_] = []
if fold > 0:
with open(os.path.join(self.evaluation_setup_path, scene_label_+'_fold' + str(fold) + '_train.txt'), 'rt') as f:
for row in csv.reader(f, delimiter='\t'):
if len(row) == 5:
# Event meta
self.evaluation_data_train[fold][scene_label_].append({
'file': self.relative_to_absolute_path(row[0]),
'scene_label': row[1],
'event_onset': float(row[2]),
'event_offset': float(row[3]),
'event_label': row[4]
})
else:
data = []
for item in self.meta:
if item['scene_label'] == scene_label_:
if 'event_label' in item:
data.append({'file': self.relative_to_absolute_path(item['file']),
'scene_label': item['scene_label'],
'event_onset': item['event_onset'],
'event_offset': item['event_offset'],
'event_label': item['event_label'],
})
self.evaluation_data_train[0][scene_label_] = data
if scene_label:
return self.evaluation_data_train[fold][scene_label]
else:
data = []
for scene_label_ in self.scene_labels:
for item in self.evaluation_data_train[fold][scene_label_]:
data.append(item)
return data
def test(self, fold=0, scene_label=None):
if fold not in self.evaluation_data_test:
self.evaluation_data_test[fold] = {}
for scene_label_ in self.scene_labels:
if scene_label_ not in self.evaluation_data_test[fold]:
self.evaluation_data_test[fold][scene_label_] = []
if fold > 0:
with open(os.path.join(self.evaluation_setup_path, scene_label_+'_fold' + str(fold) + '_test.txt'), 'rt') as f:
for row in csv.reader(f, delimiter='\t'):
self.evaluation_data_test[fold][scene_label_].append({'file': self.relative_to_absolute_path(row[0])})
else:
data = []
files = []
for item in self.meta:
if scene_label_ in item:
if self.relative_to_absolute_path(item['file']) not in files:
data.append({'file': self.relative_to_absolute_path(item['file'])})
files.append(self.relative_to_absolute_path(item['file']))
self.evaluation_data_test[0][scene_label_] = data
if scene_label:
return self.evaluation_data_test[fold][scene_label]
else:
data = []
for scene_label_ in self.scene_labels:
for item in self.evaluation_data_test[fold][scene_label_]:
data.append(item)
return data
class TUTSoundEvents_2016_EvaluationSet(Dataset):
"""TUT Sound events 2016 evaluation dataset
This dataset is used in DCASE2016 - Task 3, Sound event detection in real life audio
"""
def __init__(self, data_path='data'):
Dataset.__init__(self, data_path=data_path, name='TUT-sound-events-2016-evaluation')
self.authors = 'Annamaria Mesaros, Toni Heittola, and Tuomas Virtanen'
self.name_remote = 'TUT Sound Events 2016, evaluation dataset'
self.url = 'http://www.cs.tut.fi/sgn/arg/dcase2016/download/'
self.audio_source = 'Field recording'
self.audio_type = 'Natural'
self.recording_device_model = 'Roland Edirol R-09'
self.microphone_model = 'Soundman OKM II Klassik/studio A3 electret microphone'
self.evaluation_folds = 1
self.package_list = [
{
'remote_package': None,
'local_package': None,
'local_audio_path': os.path.join(self.local_path, 'audio'),
},
{
'remote_package': None,
'local_package': None,
'local_audio_path': os.path.join(self.local_path, 'audio', 'home'),
},
{
'remote_package': None,
'local_package': None,
'local_audio_path': os.path.join(self.local_path, 'audio', 'residential_area'),
},
]
@property
def scene_labels(self):
labels = ['home', 'residential_area']
labels.sort()
return labels
def event_label_count(self, scene_label=None):
return len(self.event_labels(scene_label=scene_label))
def event_labels(self, scene_label=None):
labels = []
for item in self.meta:
if scene_label is None or item['scene_label'] == scene_label:
if 'event_label' in item and item['event_label'] not in labels:
labels.append(item['event_label'])
labels.sort()
return labels
def on_after_extract(self):
"""After dataset packages are downloaded and extracted, meta-files are checked.
Parameters
----------
nothing
Returns
-------
nothing
"""
if not os.path.isfile(self.meta_file) and os.path.isdir(os.path.join(self.local_path,'meta')):
meta_file_handle = open(self.meta_file, 'wt')
try:
writer = csv.writer(meta_file_handle, delimiter='\t')
for filename in self.audio_files:
raw_path, raw_filename = os.path.split(filename)
relative_path = self.absolute_to_relative(raw_path)
scene_label = relative_path.replace('audio', '')[1:]
base_filename, file_extension = os.path.splitext(raw_filename)
annotation_filename = os.path.join(self.local_path, relative_path.replace('audio', 'meta'), base_filename + '.ann')
if os.path.isfile(annotation_filename):
annotation_file_handle = open(annotation_filename, 'rt')
try:
annotation_file_reader = csv.reader(annotation_file_handle, delimiter='\t')
for annotation_file_row in annotation_file_reader:
writer.writerow((os.path.join(relative_path, raw_filename),
scene_label,
float(annotation_file_row[0].replace(',', '.')),
float(annotation_file_row[1].replace(',', '.')),
annotation_file_row[2], 'm'))
finally:
annotation_file_handle.close()
finally:
meta_file_handle.close()
def train(self, fold=0, scene_label=None):
raise IOError('Train setup not available.')
def test(self, fold=0, scene_label=None):
if fold not in self.evaluation_data_test:
self.evaluation_data_test[fold] = {}
for scene_label_ in self.scene_labels:
if scene_label_ not in self.evaluation_data_test[fold]:
self.evaluation_data_test[fold][scene_label_] = []
if fold > 0:
with open(os.path.join(self.evaluation_setup_path, scene_label + '_fold' + str(fold) + '_test.txt'), 'rt') as f:
for row in csv.reader(f, delimiter='\t'):
self.evaluation_data_test[fold][scene_label_].append({'file': self.relative_to_absolute_path(row[0])})
else:
data = []
files = []
for item in self.audio_files:
if scene_label_ in item:
if self.relative_to_absolute_path(item) not in files:
data.append({'file': self.relative_to_absolute_path(item)})
files.append(self.relative_to_absolute_path(item))
self.evaluation_data_test[0][scene_label_] = data
if scene_label:
return self.evaluation_data_test[fold][scene_label]
else:
data = []
for scene_label_ in self.scene_labels:
for item in self.evaluation_data_test[fold][scene_label_]:
data.append(item)
return data
# CHIME home
class CHiMEHome_DomesticAudioTag_DevelopmentSet(Dataset):
def __init__(self, data_path=None):
Dataset.__init__(self, data_path=data_path, name = 'CHiMeHome-audiotag-development')
self.authors = 'Peter Foster, Siddharth Sigtia, Sacha Krstulovic, Jon Barker, and Mark Plumbley'
self.name_remote = 'The CHiME-Home dataset is a collection of annotated domestic environment audio recordings.'
self.url = ''
self.audio_source = 'Field recording'
self.audio_type = 'Natural'
self.recording_device_model = 'Unknown'
self.microphone_model = 'Unknown'
self.evaluation_folds = 10
self.package_list = [
{
'remote_package': 'https://archive.org/download/chime-home/chime_home.tar.gz',
'local_package': os.path.join(self.local_path, 'chime_home.tar.gz'),
'local_audio_path': os.path.join(self.local_path, 'chime_home', 'chunks'),
},
]
@property
def audio_files(self):
"""Get all audio files in the dataset, use only file from CHime-Home-refined set.
Parameters
----------
nothing
Returns
-------
files : list
audio files
"""
if self.files is None:
refined_files = []
with open(os.path.join(self.local_path, 'chime_home', 'chunks_refined.csv'), 'rt') as f:
for row in csv.reader(f, delimiter=','):
refined_files.append(row[1])
self.files = []
for file in self.package_list:
path = file['local_audio_path']
if path:
l = os.listdir(path)
p = path.replace(self.local_path + os.path.sep, '')
for f in l:
fileName, fileExtension = os.path.splitext(f)
if fileExtension[1:] in self.audio_extensions and fileName in refined_files:
self.files.append(os.path.abspath(os.path.join(path, f)))
self.files.sort()
return self.files
def read_chunk_meta(self, meta_filename):
if os.path.isfile(meta_filename):
meta_file_handle = open(meta_filename, 'rt')
try:
meta_file_reader = csv.reader(meta_file_handle, delimiter=',')
data = {}
for meta_file_row in meta_file_reader:
data[meta_file_row[0]] = meta_file_row[1]
finally:
meta_file_handle.close()
return data
def tagcode_to_taglabel(self, tag):
map = {'c': 'child speech',
'm': 'adult male speech',
'f': 'adult female speech',
'v': 'video game/tv',
'p': 'percussive sound',
'b': 'broadband noise',
'o': 'other',
'S': 'silence/background',
'U': 'unidentifiable'
}
if tag in map:
return map[tag]
else:
return None
def on_after_extract(self):
"""After dataset packages are downloaded and extracted, meta-files are checked.
Legacy dataset meta files are converted to be compatible with current scheme.
Parameters
----------
nothing
Returns
-------
nothing
"""
if not os.path.isfile(self.meta_file):
section_header('Generating meta file for dataset')
scene_label = 'home'
f = open(self.meta_file, 'wt')
try:
writer = csv.writer(f, delimiter='\t')
for file in self.audio_files:
raw_path, raw_filename = os.path.split(file)
relative_path = self.absolute_to_relative(raw_path)
base_filename, file_extension = os.path.splitext(raw_filename)
annotation_filename = os.path.join(raw_path, base_filename + '.csv')
meta_data = self.read_chunk_meta(annotation_filename)
tags = []
for i, tag in enumerate(meta_data['majorityvote']):
if tag is 'b':
print file
if tag is not 'S' and tag is not 'U':
tags.append(self.tagcode_to_taglabel(tag))
tags = ';'.join(tags)
writer.writerow(
(os.path.join(relative_path, raw_filename), scene_label, meta_data['majorityvote'], tags))
finally:
f.close()
foot()
all_folds_found = True
for fold in xrange(1, self.evaluation_folds):
for target_tag in self.audio_tags:
if not os.path.isfile(os.path.join(self.evaluation_setup_path,
'fold' + str(fold) + '_' + target_tag.replace('/', '-').replace(' ',
'_') + '_train.txt')):
all_folds_found = False
if not os.path.isfile(os.path.join(self.evaluation_setup_path,
'fold' + str(fold) + '_' + target_tag.replace('/', '-').replace(' ',
'_') + '_test.txt')):
all_folds_found = False
if not all_folds_found:
if not os.path.isdir(self.evaluation_setup_path):
os.makedirs(self.evaluation_setup_path)
numpy.random.seed(475686)
kf = KFold(n=len(self.audio_files), n_folds=self.evaluation_folds, shuffle=True)
refined_files = []
with open(os.path.join(self.local_path, 'chime_home', 'chunks_refined.csv'), 'rt') as f:
for row in csv.reader(f, delimiter=','):
refined_files.append(self.relative_to_absolute_path(os.path.join('chime_home','chunks',row[1]+'.wav')))
fold = 1
files = numpy.array(refined_files)
for train_index, test_index in kf:
train_files = files[train_index]
test_files = files[test_index]
with open(os.path.join(self.evaluation_setup_path, 'fold' + str(fold) + '_train.txt'), 'wt') as f:
writer = csv.writer(f, delimiter='\t')
for file in train_files:
raw_path, raw_filename = os.path.split(file)
relative_path = raw_path.replace(self.local_path + os.path.sep, '')
item = self.file_meta(file)[0]
writer.writerow([os.path.join(relative_path, raw_filename), item['scene_label'],item['tag_string'], ';'.join(item['tags'])])
with open(os.path.join(self.evaluation_setup_path, 'fold' + str(fold) + '_test.txt'), 'wt') as f:
writer = csv.writer(f, delimiter='\t')
for file in test_files:
raw_path, raw_filename = os.path.split(file)
relative_path = raw_path.replace(self.local_path + os.path.sep, '')
writer.writerow([os.path.join(relative_path, raw_filename)])
with open(os.path.join(self.evaluation_setup_path, 'fold' + str(fold) + '_evaluate.txt'), 'wt') as f:
writer = csv.writer(f, delimiter='\t')
for file in test_files:
raw_path, raw_filename = os.path.split(file)
relative_path = raw_path.replace(self.local_path + os.path.sep, '')
item = self.file_meta(file)[0]
writer.writerow([os.path.join(relative_path, raw_filename), item['scene_label'],item['tag_string'], ';'.join(item['tags'])])
fold+= 1
# Legacy datasets
# =====================================================
# DCASE 2013
# =====================================================
class DCASE2013_Scene_DevelopmentSet(Dataset):
"""DCASE 2013 Acoustic scene classification, development dataset
"""
def __init__(self, data_path='data'):
Dataset.__init__(self, data_path=data_path, name='DCASE2013-scene-development')
self.authors = 'Dimitrios Giannoulis, Emmanouil Benetos, Dan Stowell, and Mark Plumbley'
self.name_remote = 'IEEE AASP 2013 CASA Challenge - Public Dataset for Scene Classification Task'
self.url = 'http://www.elec.qmul.ac.uk/digitalmusic/sceneseventschallenge/'
self.audio_source = 'Field recording'
self.audio_type = 'Natural'
self.recording_device_model = 'Unknown'
self.microphone_model = 'Soundman OKM II Klassik/studio A3 electret microphone'
self.evaluation_folds = 5
self.package_list = [
{
'remote_package': 'http://c4dm.eecs.qmul.ac.uk/rdr/bitstream/handle/123456789/29/scenes_stereo.zip?sequence=1',
'local_package': os.path.join(self.local_path, 'scenes_stereo.zip'),
'local_audio_path': os.path.join(self.local_path, 'scenes_stereo'),
}
]
def on_after_extract(self):
# Make legacy dataset compatible with DCASE2016 dataset scheme
if not os.path.isfile(self.meta_file):
section_header('Generating meta file for dataset')
f = open(self.meta_file, 'wt')
try:
writer = csv.writer(f, delimiter='\t')
for file in self.audio_files:
raw_path, raw_filename = os.path.split(file)
relative_path = self.absolute_to_relative(raw_path)
label = os.path.splitext(os.path.split(file)[1])[0][:-2]
writer.writerow((os.path.join(relative_path, raw_filename), label))
finally:
f.close()
foot()
all_folds_found = True
for fold in xrange(1, self.evaluation_folds):
if not os.path.isfile(os.path.join(self.evaluation_setup_path, 'fold' + str(fold) + '_train.txt')):
all_folds_found = False
if not os.path.isfile(os.path.join(self.evaluation_setup_path, 'fold' + str(fold) + '_test.txt')):
all_folds_found = False
if not all_folds_found:
section_header('Generating evaluation setup files for dataset')
if not os.path.isdir(self.evaluation_setup_path):
os.makedirs(self.evaluation_setup_path)
print self.evaluation_setup_path
classes = []
files = []
for item in self.meta:
classes.append(item['scene_label'])
files.append(item['file'])
files = numpy.array(files)
sss = StratifiedShuffleSplit(y=classes, n_iter=self.evaluation_folds, test_size=0.3, random_state=0)
fold = 1
for train_index, test_index in sss:
# print("TRAIN:", train_index, "TEST:", test_index)
train_files = files[train_index]
with open(os.path.join(self.evaluation_setup_path, 'fold' + str(fold) + '_train.txt'), 'wt') as f:
writer = csv.writer(f, delimiter='\t')
for file in train_files:
raw_path, raw_filename = os.path.split(file)
label = self.file_meta(file)[0]['scene_label']
writer.writerow([os.path.join(raw_path, raw_filename), label])
test_files = files[test_index]
with open(os.path.join(self.evaluation_setup_path, 'fold' + str(fold) + '_test.txt'), 'wt') as f:
writer = csv.writer(f, delimiter='\t')
for file in test_files:
raw_path, raw_filename = os.path.split(file)
writer.writerow([os.path.join(raw_path, raw_filename)])
with open(os.path.join(self.evaluation_setup_path, 'fold' + str(fold) + '_evaluate.txt'), 'wt') as f:
writer = csv.writer(f, delimiter='\t')
for file in test_files:
raw_path, raw_filename = os.path.split(file)
label = self.file_meta(file)[0]['scene_label']
writer.writerow([os.path.join(raw_path, raw_filename), label])
fold += 1
foot()
class DCASE2013_Scene_EvaluationSet(DCASE2013_Scene_DevelopmentSet):
"""DCASE 2013 Acoustic scene classification, evaluation dataset
"""
def __init__(self, data_path='data'):
Dataset.__init__(self, data_path=data_path, name='DCASE2013-scene-challenge')
self.authors = 'Dimitrios Giannoulis, Emmanouil Benetos, Dan Stowell, and Mark Plumbley'
self.name_remote = 'IEEE AASP 2013 CASA Challenge - Private Dataset for Scene Classification Task'
self.url = 'http://www.elec.qmul.ac.uk/digitalmusic/sceneseventschallenge/'
self.audio_source = 'Field recording'
self.audio_type = 'Natural'
self.recording_device_model = 'Unknown'
self.microphone_model = 'Soundman OKM II Klassik/studio A3 electret microphone'
self.evaluation_folds = 5
self.package_list = [
{
'remote_package': 'https://archive.org/download/dcase2013_scene_classification_testset/scenes_stereo_testset.zip',
'local_package': os.path.join(self.local_path, 'scenes_stereo_testset.zip'),
'local_audio_path': os.path.join(self.local_path, 'scenes_stereo_testset'),
}
]
def on_after_extract(self):
# Make legacy dataset compatible with DCASE2016 dataset scheme
if not os.path.isfile(self.meta_file) or 1:
section_header('Generating meta file for dataset')
f = open(self.meta_file, 'wt')
try:
writer = csv.writer(f, delimiter='\t')
for file in self.audio_files:
raw_path, raw_filename = os.path.split(file)
relative_path = self.absolute_to_relative(raw_path)
label = os.path.splitext(os.path.split(file)[1])[0][:-2]
writer.writerow((os.path.join(relative_path, raw_filename), label))
finally:
f.close()
foot()
all_folds_found = True
for fold in xrange(1, self.evaluation_folds):
if not os.path.isfile(os.path.join(self.evaluation_setup_path, 'fold' + str(fold) + '_train.txt')):
all_folds_found = False
if not os.path.isfile(os.path.join(self.evaluation_setup_path, 'fold' + str(fold) + '_test.txt')):
all_folds_found = False
if not all_folds_found:
section_header('Generating evaluation setup files for dataset')
if not os.path.isdir(self.evaluation_setup_path):
os.makedirs(self.evaluation_setup_path)
classes = []
files = []
for item in self.meta:
classes.append(item['scene_label'])
files.append(item['file'])
files = numpy.array(files)
sss = StratifiedShuffleSplit(y=classes, n_iter=self.evaluation_folds, test_size=0.3, random_state=0)
fold = 1
for train_index, test_index in sss:
train_files = files[train_index]
with open(os.path.join(self.evaluation_setup_path, 'fold' + str(fold) + '_train.txt'), 'wt') as f:
writer = csv.writer(f, delimiter='\t')
for file in train_files:
raw_path, raw_filename = os.path.split(file)
label = self.file_meta(file)[0]['scene_label']
writer.writerow([os.path.join(raw_path, raw_filename), label])
test_files = files[test_index]
with open(os.path.join(self.evaluation_setup_path, 'fold' + str(fold) + '_test.txt'), 'wt') as f:
writer = csv.writer(f, delimiter='\t')
for file in test_files:
raw_path, raw_filename = os.path.split(file)
writer.writerow([os.path.join(raw_path, raw_filename)])
with open(os.path.join(self.evaluation_setup_path, 'fold' + str(fold) + '_evaluate.txt'), 'wt') as f:
writer = csv.writer(f, delimiter='\t')
for file in test_files:
raw_path, raw_filename = os.path.split(file)
label = self.file_meta(file)[0]['scene_label']
writer.writerow([os.path.join(raw_path, raw_filename), label])
fold += 1
foot()
# Sound events
class DCASE2013_Event_DevelopmentSet(Dataset):
"""DCASE 2013 Sound event detection, development dataset
"""
def __init__(self, data_path='data'):
Dataset.__init__(self, data_path=data_path, name='DCASE2013-event-development')
self.authors = 'Dimitrios Giannoulis, Emmanouil Benetos, Dan Stowell, and Mark Plumbley'
self.name_remote = 'IEEE AASP CASA Challenge - Public Dataset for Event Detection Task'
self.url = 'http://www.elec.qmul.ac.uk/digitalmusic/sceneseventschallenge/'
self.audio_source = 'Field recording'
self.audio_type = 'Natural'
self.recording_device_model = 'Unknown'
self.microphone_model = 'Soundman OKM II Klassik/studio A3 electret microphone'
self.evaluation_folds = 5
self.package_list = [
{
'remote_package': 'https://archive.org/download/dcase2013_event_detection_development_OS/events_OS_development_v2.zip',
'local_package': os.path.join(self.local_path, 'events_OS_development_v2.zip'),
'local_audio_path': os.path.join(self.local_path, 'events_OS_development_v2'),
},
# {
# 'remote_package':'http://c4dm.eecs.qmul.ac.uk/rdr/bitstream/handle/123456789/28/singlesounds_annotation.zip?sequence=9',
# 'local_package': os.path.join(self.local_path, 'singlesounds_annotation.zip'),
# 'local_audio_path': None,
# },
# {
# 'remote_package':'http://c4dm.eecs.qmul.ac.uk/rdr/bitstream/handle/123456789/28/singlesounds_stereo.zip?sequence=7',
# 'local_package': os.path.join(self.local_path, 'singlesounds_stereo.zip'),
# 'local_audio_path': os.path.join(self.local_path, 'singlesounds_stereo'),
# },
]
def on_after_extract(self):
# Make legacy dataset compatible with DCASE2016 dataset scheme
scene_label = 'office'
if not os.path.isfile(self.meta_file):
meta_file_handle = open(self.meta_file, 'wt')
try:
writer = csv.writer(meta_file_handle, delimiter='\t')
for file in self.audio_files:
raw_path, raw_filename = os.path.split(file)
relative_path = self.absolute_to_relative(raw_path)
base_filename, file_extension = os.path.splitext(raw_filename)
if file.find('singlesounds_stereo') != -1:
annotation_filename = os.path.join(self.local_path, 'Annotation1', base_filename + '_bdm.txt')
label = base_filename[:-2]
if os.path.isfile(annotation_filename):
annotation_file_handle = open(annotation_filename, 'rt')
try:
annotation_file_reader = csv.reader(annotation_file_handle, delimiter='\t')
for annotation_file_row in annotation_file_reader:
writer.writerow((os.path.join(relative_path, raw_filename), scene_label,
annotation_file_row[0], annotation_file_row[1], label, 'i'))
finally:
annotation_file_handle.close()
elif file.find('events_OS_development_v2') != -1:
annotation_filename = os.path.join(self.local_path, 'events_OS_development_v2',
base_filename + '_v2.txt')
if os.path.isfile(annotation_filename):
annotation_file_handle = open(annotation_filename, 'rt')
try:
annotation_file_reader = csv.reader(annotation_file_handle, delimiter='\t')
for annotation_file_row in annotation_file_reader:
writer.writerow((os.path.join(relative_path, raw_filename), scene_label,
annotation_file_row[0], annotation_file_row[1],
annotation_file_row[2], 'm'))
finally:
annotation_file_handle.close()
finally:
meta_file_handle.close()
all_folds_found = True
for fold in xrange(1, self.evaluation_folds):
if not os.path.isfile(os.path.join(self.evaluation_setup_path, 'fold' + str(fold) + '_train.txt')):
all_folds_found = False
if not os.path.isfile(os.path.join(self.evaluation_setup_path, 'fold' + str(fold) + '_test.txt')):
all_folds_found = False
if not all_folds_found:
# Construct training and testing sets. Isolated sound are used for training and
# polyphonic mixtures are used for testing.
if not os.path.isdir(self.evaluation_setup_path):
os.makedirs(self.evaluation_setup_path)
files = []
for item in self.meta:
if item['file'] not in files:
files.append(item['file'])
files = numpy.array(files)
f = numpy.zeros(len(files))
sss = StratifiedShuffleSplit(y=f, n_iter=5, test_size=0.3, random_state=0)
fold = 1
for train_index, test_index in sss:
# print("TRAIN:", train_index, "TEST:", test_index)
train_files = files[train_index]
with open(os.path.join(self.evaluation_setup_path, 'fold' + str(fold) + '_train.txt'), 'wt') as f:
writer = csv.writer(f, delimiter='\t')
for file in train_files:
raw_path, raw_filename = os.path.split(file)
relative_path = raw_path.replace(self.local_path + os.path.sep, '')
for item in self.meta:
if item['file'] == file:
writer.writerow([os.path.join(relative_path, raw_filename), item['scene_label'],
item['event_onset'], item['event_offset'], item['event_label']])
test_files = files[test_index]
with open(os.path.join(self.evaluation_setup_path, 'fold' + str(fold) + '_test.txt'), 'wt') as f:
writer = csv.writer(f, delimiter='\t')
for file in test_files:
raw_path, raw_filename = os.path.split(file)
relative_path = raw_path.replace(self.local_path + os.path.sep, '')
writer.writerow([os.path.join(relative_path, raw_filename)])
with open(os.path.join(self.evaluation_setup_path, 'fold' + str(fold) + '_evaluate.txt'), 'wt') as f:
writer = csv.writer(f, delimiter='\t')
for file in test_files:
raw_path, raw_filename = os.path.split(file)
relative_path = raw_path.replace(self.local_path + os.path.sep, '')
for item in self.meta:
if item['file'] == file:
writer.writerow([os.path.join(relative_path, raw_filename), item['scene_label'],
item['event_onset'], item['event_offset'], item['event_label']])
fold += 1
class DCASE2013_Event_EvaluationSet(Dataset):
"""DCASE 2013 Sound event detection, evaluation dataset
"""
def __init__(self, data_path='data'):
Dataset.__init__(self, data_path=data_path, name='DCASE2013-event-challenge')
self.authors = 'Dimitrios Giannoulis, Emmanouil Benetos, Dan Stowell, and Mark Plumbley'
self.name_remote = 'IEEE AASP CASA Challenge - Private Dataset for Event Detection Task'
self.url = 'http://www.elec.qmul.ac.uk/digitalmusic/sceneseventschallenge/'
self.audio_source = 'Field recording'
self.audio_type = 'Natural'
self.recording_device_model = 'Unknown'
self.microphone_model = 'Soundman OKM II Klassik/studio A3 electret microphone'
self.evaluation_folds = 5
self.package_list = [
{
'remote_package': 'https://archive.org/download/dcase2013_event_detection_testset_OS/dcase2013_event_detection_testset_OS.zip',
'local_package': os.path.join(self.local_path, 'dcase2013_event_detection_testset_OS.zip'),
'local_audio_path': os.path.join(self.local_path, 'dcase2013_event_detection_testset_OS'),
}
]
def on_after_extract(self):
# Make legacy dataset compatible with DCASE2016 dataset scheme
scene_label = 'office'
if not os.path.isfile(self.meta_file):
meta_file_handle = open(self.meta_file, 'wt')
try:
writer = csv.writer(meta_file_handle, delimiter='\t')
for file in self.audio_files:
raw_path, raw_filename = os.path.split(file)
relative_path = self.absolute_to_relative(raw_path)
base_filename, file_extension = os.path.splitext(raw_filename)
if file.find('dcase2013_event_detection_testset_OS') != -1:
annotation_filename = os.path.join(self.local_path, 'dcase2013_event_detection_testset_OS',base_filename + '_v2.txt')
if os.path.isfile(annotation_filename):
annotation_file_handle = open(annotation_filename, 'rt')
try:
annotation_file_reader = csv.reader(annotation_file_handle, delimiter='\t')
for annotation_file_row in annotation_file_reader:
writer.writerow((os.path.join(relative_path, raw_filename), scene_label,
annotation_file_row[0], annotation_file_row[1],
annotation_file_row[2], 'm'))
finally:
annotation_file_handle.close()
else:
annotation_filename = os.path.join(self.local_path, 'dcase2013_event_detection_testset_OS',base_filename + '.txt')
if os.path.isfile(annotation_filename):
annotation_file_handle = open(annotation_filename, 'rt')
try:
annotation_file_reader = csv.reader(annotation_file_handle, delimiter='\t')
for annotation_file_row in annotation_file_reader:
writer.writerow((os.path.join(relative_path, raw_filename), scene_label,
annotation_file_row[0], annotation_file_row[1],
annotation_file_row[2], 'm'))
finally:
annotation_file_handle.close()
finally:
meta_file_handle.close()
all_folds_found = True
for fold in xrange(1, self.evaluation_folds):
if not os.path.isfile(os.path.join(self.evaluation_setup_path, 'fold' + str(fold) + '_train.txt')):
all_folds_found = False
if not os.path.isfile(os.path.join(self.evaluation_setup_path, 'fold' + str(fold) + '_test.txt')):
all_folds_found = False
if not all_folds_found:
# Construct training and testing sets. Isolated sound are used for training and
# polyphonic mixtures are used for testing.
if not os.path.isdir(self.evaluation_setup_path):
os.makedirs(self.evaluation_setup_path)
files = []
for item in self.meta:
if item['file'] not in files:
files.append(item['file'])
files = numpy.array(files)
f = numpy.zeros(len(files))
sss = StratifiedShuffleSplit(y=f, n_iter=5, test_size=0.3, random_state=0)
fold = 1
for train_index, test_index in sss:
# print("TRAIN:", train_index, "TEST:", test_index)
train_files = files[train_index]
with open(os.path.join(self.evaluation_setup_path, 'fold' + str(fold) + '_train.txt'), 'wt') as f:
writer = csv.writer(f, delimiter='\t')
for file in train_files:
raw_path, raw_filename = os.path.split(file)
relative_path = raw_path.replace(self.local_path + os.path.sep, '')
for item in self.meta:
if item['file'] == file:
writer.writerow([os.path.join(relative_path, raw_filename), item['scene_label'],
item['event_onset'], item['event_offset'], item['event_label']])
test_files = files[test_index]
with open(os.path.join(self.evaluation_setup_path, 'fold' + str(fold) + '_test.txt'), 'wt') as f:
writer = csv.writer(f, delimiter='\t')
for file in test_files:
raw_path, raw_filename = os.path.split(file)
relative_path = raw_path.replace(self.local_path + os.path.sep, '')
writer.writerow([os.path.join(relative_path, raw_filename)])
with open(os.path.join(self.evaluation_setup_path, 'fold' + str(fold) + '_evaluate.txt'), 'wt') as f:
writer = csv.writer(f, delimiter='\t')
for file in test_files:
raw_path, raw_filename = os.path.split(file)
relative_path = raw_path.replace(self.local_path + os.path.sep, '')
for item in self.meta:
if item['file'] == file:
writer.writerow([os.path.join(relative_path, raw_filename), item['scene_label'],
item['event_onset'], item['event_offset'], item['event_label']])
fold += 1
|
mit
|
IshankGulati/scikit-learn
|
examples/linear_model/plot_ransac.py
|
103
|
1797
|
"""
===========================================
Robust linear model estimation using RANSAC
===========================================
In this example we see how to robustly fit a linear model to faulty data using
the RANSAC algorithm.
"""
import numpy as np
from matplotlib import pyplot as plt
from sklearn import linear_model, datasets
n_samples = 1000
n_outliers = 50
X, y, coef = datasets.make_regression(n_samples=n_samples, n_features=1,
n_informative=1, noise=10,
coef=True, random_state=0)
# Add outlier data
np.random.seed(0)
X[:n_outliers] = 3 + 0.5 * np.random.normal(size=(n_outliers, 1))
y[:n_outliers] = -3 + 10 * np.random.normal(size=n_outliers)
# Fit line using all data
lr = linear_model.LinearRegression()
lr.fit(X, y)
# Robustly fit linear model with RANSAC algorithm
ransac = linear_model.RANSACRegressor()
ransac.fit(X, y)
inlier_mask = ransac.inlier_mask_
outlier_mask = np.logical_not(inlier_mask)
# Predict data of estimated models
line_X = np.arange(X.min(), X.max())[:, np.newaxis]
line_y = lr.predict(line_X)
line_y_ransac = ransac.predict(line_X)
# Compare estimated coefficients
print("Estimated coefficients (true, linear regression, RANSAC):")
print(coef, lr.coef_, ransac.estimator_.coef_)
lw = 2
plt.scatter(X[inlier_mask], y[inlier_mask], color='yellowgreen', marker='.',
label='Inliers')
plt.scatter(X[outlier_mask], y[outlier_mask], color='gold', marker='.',
label='Outliers')
plt.plot(line_X, line_y, color='navy', linewidth=lw, label='Linear regressor')
plt.plot(line_X, line_y_ransac, color='cornflowerblue', linewidth=lw,
label='RANSAC regressor')
plt.legend(loc='lower right')
plt.xlabel("Input")
plt.ylabel("Response")
plt.show()
|
bsd-3-clause
|
shiquanwang/caffe
|
python/detect.py
|
25
|
5026
|
#!/usr/bin/env python
"""
detector.py is an out-of-the-box windowed detector
callable from the command line.
By default it configures and runs the Caffe reference ImageNet model.
Note that this model was trained for image classification and not detection,
and finetuning for detection can be expected to improve results.
The selective_search_ijcv_with_python code required for the selective search
proposal mode is available at
https://github.com/sergeyk/selective_search_ijcv_with_python
TODO:
- batch up image filenames as well: don't want to load all of them into memory
- come up with a batching scheme that preserved order / keeps a unique ID
"""
import numpy as np
import pandas as pd
import os
import argparse
import time
import caffe
CROP_MODES = ['list', 'selective_search']
COORD_COLS = ['ymin', 'xmin', 'ymax', 'xmax']
def main(argv):
pycaffe_dir = os.path.dirname(__file__)
parser = argparse.ArgumentParser()
# Required arguments: input and output.
parser.add_argument(
"input_file",
help="Input txt/csv filename. If .txt, must be list of filenames.\
If .csv, must be comma-separated file with header\
'filename, xmin, ymin, xmax, ymax'"
)
parser.add_argument(
"output_file",
help="Output h5/csv filename. Format depends on extension."
)
# Optional arguments.
parser.add_argument(
"--model_def",
default=os.path.join(pycaffe_dir,
"../examples/imagenet/imagenet_deploy.prototxt"),
help="Model definition file."
)
parser.add_argument(
"--pretrained_model",
default=os.path.join(pycaffe_dir,
"../examples/imagenet/caffe_reference_imagenet_model"),
help="Trained model weights file."
)
parser.add_argument(
"--crop_mode",
default="center_only",
choices=CROP_MODES,
help="Image crop mode"
)
parser.add_argument(
"--gpu",
action='store_true',
help="Switch for gpu computation."
)
parser.add_argument(
"--mean_file",
default=os.path.join(pycaffe_dir,
'caffe/imagenet/ilsvrc_2012_mean.npy'),
help="Data set image mean of H x W x K dimensions (numpy array). " +
"Set to '' for no mean subtraction."
)
parser.add_argument(
"--input_scale",
type=float,
default=255,
help="Multiply input features by this scale before input to net"
)
parser.add_argument(
"--channel_swap",
default='2,1,0',
help="Order to permute input channels. The default converts " +
"RGB -> BGR since BGR is the Caffe default by way of OpenCV."
)
args = parser.parse_args()
channel_swap = [int(s) for s in args.channel_swap.split(',')]
# Make detector.
detector = caffe.Detector(args.model_def, args.pretrained_model,
gpu=args.gpu, mean_file=args.mean_file,
input_scale=args.input_scale, channel_swap=channel_swap)
if args.gpu:
print 'GPU mode'
# Load input.
t = time.time()
print('Loading input...')
if args.input_file.lower().endswith('txt'):
with open(args.input_file) as f:
inputs = [_.strip() for _ in f.readlines()]
elif args.input_file.lower().endswith('csv'):
inputs = pd.read_csv(args.input_file, sep=',', dtype={'filename': str})
inputs.set_index('filename', inplace=True)
else:
raise Exception("Unknown input file type: not in txt or csv.")
# Detect.
if args.crop_mode == 'list':
# Unpack sequence of (image filename, windows).
images_windows = (
(ix, inputs.iloc[np.where(inputs.index == ix)][COORD_COLS].values)
for ix in inputs.index.unique()
)
detections = detector.detect_windows(images_windows)
else:
detections = detector.detect_selective_search(inputs)
print("Processed {} windows in {:.3f} s.".format(len(detections),
time.time() - t))
# Collect into dataframe with labeled fields.
df = pd.DataFrame(detections)
df.set_index('filename', inplace=True)
df[COORD_COLS] = pd.DataFrame(
data=np.vstack(df['window']), index=df.index, columns=COORD_COLS)
del(df['window'])
# Save results.
t = time.time()
if args.output_file.lower().endswith('csv'):
# csv
# Enumerate the class probabilities.
class_cols = ['class{}'.format(x) for x in range(NUM_OUTPUT)]
df[class_cols] = pd.DataFrame(
data=np.vstack(df['feat']), index=df.index, columns=class_cols)
df.to_csv(args.output_file, cols=COORD_COLS + class_cols)
else:
# h5
df.to_hdf(args.output_file, 'df', mode='w')
print("Saved to {} in {:.3f} s.".format(args.output_file,
time.time() - t))
if __name__ == "__main__":
import sys
main(sys.argv)
|
bsd-2-clause
|
pratapvardhan/pandas
|
pandas/tests/tseries/test_frequencies.py
|
5
|
30953
|
from datetime import datetime, timedelta
from pandas.compat import range
import pytest
import numpy as np
from pandas import (Index, DatetimeIndex, Timestamp, Series,
date_range, period_range)
from pandas._libs.tslibs.frequencies import (_period_code_map,
INVALID_FREQ_ERR_MSG)
from pandas._libs.tslibs.ccalendar import MONTHS
from pandas._libs.tslibs import resolution
import pandas.tseries.frequencies as frequencies
from pandas.core.tools.datetimes import to_datetime
import pandas.tseries.offsets as offsets
from pandas.core.indexes.period import PeriodIndex
import pandas.compat as compat
from pandas.compat import is_platform_windows
import pandas.util.testing as tm
from pandas import Timedelta
class TestToOffset(object):
def test_to_offset_multiple(self):
freqstr = '2h30min'
freqstr2 = '2h 30min'
result = frequencies.to_offset(freqstr)
assert (result == frequencies.to_offset(freqstr2))
expected = offsets.Minute(150)
assert (result == expected)
freqstr = '2h30min15s'
result = frequencies.to_offset(freqstr)
expected = offsets.Second(150 * 60 + 15)
assert (result == expected)
freqstr = '2h 60min'
result = frequencies.to_offset(freqstr)
expected = offsets.Hour(3)
assert (result == expected)
freqstr = '2h 20.5min'
result = frequencies.to_offset(freqstr)
expected = offsets.Second(8430)
assert (result == expected)
freqstr = '1.5min'
result = frequencies.to_offset(freqstr)
expected = offsets.Second(90)
assert (result == expected)
freqstr = '0.5S'
result = frequencies.to_offset(freqstr)
expected = offsets.Milli(500)
assert (result == expected)
freqstr = '15l500u'
result = frequencies.to_offset(freqstr)
expected = offsets.Micro(15500)
assert (result == expected)
freqstr = '10s75L'
result = frequencies.to_offset(freqstr)
expected = offsets.Milli(10075)
assert (result == expected)
freqstr = '1s0.25ms'
result = frequencies.to_offset(freqstr)
expected = offsets.Micro(1000250)
assert (result == expected)
freqstr = '1s0.25L'
result = frequencies.to_offset(freqstr)
expected = offsets.Micro(1000250)
assert (result == expected)
freqstr = '2800N'
result = frequencies.to_offset(freqstr)
expected = offsets.Nano(2800)
assert (result == expected)
freqstr = '2SM'
result = frequencies.to_offset(freqstr)
expected = offsets.SemiMonthEnd(2)
assert (result == expected)
freqstr = '2SM-16'
result = frequencies.to_offset(freqstr)
expected = offsets.SemiMonthEnd(2, day_of_month=16)
assert (result == expected)
freqstr = '2SMS-14'
result = frequencies.to_offset(freqstr)
expected = offsets.SemiMonthBegin(2, day_of_month=14)
assert (result == expected)
freqstr = '2SMS-15'
result = frequencies.to_offset(freqstr)
expected = offsets.SemiMonthBegin(2)
assert (result == expected)
# malformed
with tm.assert_raises_regex(ValueError,
'Invalid frequency: 2h20m'):
frequencies.to_offset('2h20m')
def test_to_offset_negative(self):
freqstr = '-1S'
result = frequencies.to_offset(freqstr)
assert (result.n == -1)
freqstr = '-5min10s'
result = frequencies.to_offset(freqstr)
assert (result.n == -310)
freqstr = '-2SM'
result = frequencies.to_offset(freqstr)
assert (result.n == -2)
freqstr = '-1SMS'
result = frequencies.to_offset(freqstr)
assert (result.n == -1)
def test_to_offset_invalid(self):
# GH 13930
with tm.assert_raises_regex(ValueError,
'Invalid frequency: U1'):
frequencies.to_offset('U1')
with tm.assert_raises_regex(ValueError,
'Invalid frequency: -U'):
frequencies.to_offset('-U')
with tm.assert_raises_regex(ValueError,
'Invalid frequency: 3U1'):
frequencies.to_offset('3U1')
with tm.assert_raises_regex(ValueError,
'Invalid frequency: -2-3U'):
frequencies.to_offset('-2-3U')
with tm.assert_raises_regex(ValueError,
'Invalid frequency: -2D:3H'):
frequencies.to_offset('-2D:3H')
with tm.assert_raises_regex(ValueError,
'Invalid frequency: 1.5.0S'):
frequencies.to_offset('1.5.0S')
# split offsets with spaces are valid
assert frequencies.to_offset('2D 3H') == offsets.Hour(51)
assert frequencies.to_offset('2 D3 H') == offsets.Hour(51)
assert frequencies.to_offset('2 D 3 H') == offsets.Hour(51)
assert frequencies.to_offset(' 2 D 3 H ') == offsets.Hour(51)
assert frequencies.to_offset(' H ') == offsets.Hour()
assert frequencies.to_offset(' 3 H ') == offsets.Hour(3)
# special cases
assert frequencies.to_offset('2SMS-15') == offsets.SemiMonthBegin(2)
with tm.assert_raises_regex(ValueError,
'Invalid frequency: 2SMS-15-15'):
frequencies.to_offset('2SMS-15-15')
with tm.assert_raises_regex(ValueError,
'Invalid frequency: 2SMS-15D'):
frequencies.to_offset('2SMS-15D')
def test_to_offset_leading_zero(self):
freqstr = '00H 00T 01S'
result = frequencies.to_offset(freqstr)
assert (result.n == 1)
freqstr = '-00H 03T 14S'
result = frequencies.to_offset(freqstr)
assert (result.n == -194)
def test_to_offset_leading_plus(self):
freqstr = '+1d'
result = frequencies.to_offset(freqstr)
assert (result.n == 1)
freqstr = '+2h30min'
result = frequencies.to_offset(freqstr)
assert (result.n == 150)
for bad_freq in ['+-1d', '-+1h', '+1', '-7', '+d', '-m']:
with tm.assert_raises_regex(ValueError, 'Invalid frequency:'):
frequencies.to_offset(bad_freq)
def test_to_offset_pd_timedelta(self):
# Tests for #9064
td = Timedelta(days=1, seconds=1)
result = frequencies.to_offset(td)
expected = offsets.Second(86401)
assert (expected == result)
td = Timedelta(days=-1, seconds=1)
result = frequencies.to_offset(td)
expected = offsets.Second(-86399)
assert (expected == result)
td = Timedelta(hours=1, minutes=10)
result = frequencies.to_offset(td)
expected = offsets.Minute(70)
assert (expected == result)
td = Timedelta(hours=1, minutes=-10)
result = frequencies.to_offset(td)
expected = offsets.Minute(50)
assert (expected == result)
td = Timedelta(weeks=1)
result = frequencies.to_offset(td)
expected = offsets.Day(7)
assert (expected == result)
td1 = Timedelta(hours=1)
result1 = frequencies.to_offset(td1)
result2 = frequencies.to_offset('60min')
assert (result1 == result2)
td = Timedelta(microseconds=1)
result = frequencies.to_offset(td)
expected = offsets.Micro(1)
assert (expected == result)
td = Timedelta(microseconds=0)
pytest.raises(ValueError, lambda: frequencies.to_offset(td))
def test_anchored_shortcuts(self):
result = frequencies.to_offset('W')
expected = frequencies.to_offset('W-SUN')
assert (result == expected)
result1 = frequencies.to_offset('Q')
result2 = frequencies.to_offset('Q-DEC')
expected = offsets.QuarterEnd(startingMonth=12)
assert (result1 == expected)
assert (result2 == expected)
result1 = frequencies.to_offset('Q-MAY')
expected = offsets.QuarterEnd(startingMonth=5)
assert (result1 == expected)
result1 = frequencies.to_offset('SM')
result2 = frequencies.to_offset('SM-15')
expected = offsets.SemiMonthEnd(day_of_month=15)
assert (result1 == expected)
assert (result2 == expected)
result = frequencies.to_offset('SM-1')
expected = offsets.SemiMonthEnd(day_of_month=1)
assert (result == expected)
result = frequencies.to_offset('SM-27')
expected = offsets.SemiMonthEnd(day_of_month=27)
assert (result == expected)
result = frequencies.to_offset('SMS-2')
expected = offsets.SemiMonthBegin(day_of_month=2)
assert (result == expected)
result = frequencies.to_offset('SMS-27')
expected = offsets.SemiMonthBegin(day_of_month=27)
assert (result == expected)
# ensure invalid cases fail as expected
invalid_anchors = ['SM-0', 'SM-28', 'SM-29',
'SM-FOO', 'BSM', 'SM--1',
'SMS-1', 'SMS-28', 'SMS-30',
'SMS-BAR', 'SMS-BYR' 'BSMS',
'SMS--2']
for invalid_anchor in invalid_anchors:
with tm.assert_raises_regex(ValueError,
'Invalid frequency: '):
frequencies.to_offset(invalid_anchor)
def test_ms_vs_MS():
left = frequencies.get_offset('ms')
right = frequencies.get_offset('MS')
assert left == offsets.Milli()
assert right == offsets.MonthBegin()
def test_rule_aliases():
rule = frequencies.to_offset('10us')
assert rule == offsets.Micro(10)
class TestFrequencyCode(object):
def test_freq_code(self):
assert frequencies.get_freq('A') == 1000
assert frequencies.get_freq('3A') == 1000
assert frequencies.get_freq('-1A') == 1000
assert frequencies.get_freq('Y') == 1000
assert frequencies.get_freq('3Y') == 1000
assert frequencies.get_freq('-1Y') == 1000
assert frequencies.get_freq('W') == 4000
assert frequencies.get_freq('W-MON') == 4001
assert frequencies.get_freq('W-FRI') == 4005
for freqstr, code in compat.iteritems(_period_code_map):
result = frequencies.get_freq(freqstr)
assert result == code
result = resolution.get_freq_group(freqstr)
assert result == code // 1000 * 1000
result = resolution.get_freq_group(code)
assert result == code // 1000 * 1000
def test_freq_group(self):
assert resolution.get_freq_group('A') == 1000
assert resolution.get_freq_group('3A') == 1000
assert resolution.get_freq_group('-1A') == 1000
assert resolution.get_freq_group('A-JAN') == 1000
assert resolution.get_freq_group('A-MAY') == 1000
assert resolution.get_freq_group('Y') == 1000
assert resolution.get_freq_group('3Y') == 1000
assert resolution.get_freq_group('-1Y') == 1000
assert resolution.get_freq_group('Y-JAN') == 1000
assert resolution.get_freq_group('Y-MAY') == 1000
assert resolution.get_freq_group(offsets.YearEnd()) == 1000
assert resolution.get_freq_group(offsets.YearEnd(month=1)) == 1000
assert resolution.get_freq_group(offsets.YearEnd(month=5)) == 1000
assert resolution.get_freq_group('W') == 4000
assert resolution.get_freq_group('W-MON') == 4000
assert resolution.get_freq_group('W-FRI') == 4000
assert resolution.get_freq_group(offsets.Week()) == 4000
assert resolution.get_freq_group(offsets.Week(weekday=1)) == 4000
assert resolution.get_freq_group(offsets.Week(weekday=5)) == 4000
def test_get_to_timestamp_base(self):
tsb = frequencies.get_to_timestamp_base
assert (tsb(frequencies.get_freq_code('D')[0]) ==
frequencies.get_freq_code('D')[0])
assert (tsb(frequencies.get_freq_code('W')[0]) ==
frequencies.get_freq_code('D')[0])
assert (tsb(frequencies.get_freq_code('M')[0]) ==
frequencies.get_freq_code('D')[0])
assert (tsb(frequencies.get_freq_code('S')[0]) ==
frequencies.get_freq_code('S')[0])
assert (tsb(frequencies.get_freq_code('T')[0]) ==
frequencies.get_freq_code('S')[0])
assert (tsb(frequencies.get_freq_code('H')[0]) ==
frequencies.get_freq_code('S')[0])
def test_freq_to_reso(self):
Reso = frequencies.Resolution
assert Reso.get_str_from_freq('A') == 'year'
assert Reso.get_str_from_freq('Q') == 'quarter'
assert Reso.get_str_from_freq('M') == 'month'
assert Reso.get_str_from_freq('D') == 'day'
assert Reso.get_str_from_freq('H') == 'hour'
assert Reso.get_str_from_freq('T') == 'minute'
assert Reso.get_str_from_freq('S') == 'second'
assert Reso.get_str_from_freq('L') == 'millisecond'
assert Reso.get_str_from_freq('U') == 'microsecond'
assert Reso.get_str_from_freq('N') == 'nanosecond'
for freq in ['A', 'Q', 'M', 'D', 'H', 'T', 'S', 'L', 'U', 'N']:
# check roundtrip
result = Reso.get_freq(Reso.get_str_from_freq(freq))
assert freq == result
for freq in ['D', 'H', 'T', 'S', 'L', 'U']:
result = Reso.get_freq(Reso.get_str(Reso.get_reso_from_freq(freq)))
assert freq == result
def test_resolution_bumping(self):
# see gh-14378
Reso = frequencies.Resolution
assert Reso.get_stride_from_decimal(1.5, 'T') == (90, 'S')
assert Reso.get_stride_from_decimal(62.4, 'T') == (3744, 'S')
assert Reso.get_stride_from_decimal(1.04, 'H') == (3744, 'S')
assert Reso.get_stride_from_decimal(1, 'D') == (1, 'D')
assert (Reso.get_stride_from_decimal(0.342931, 'H') ==
(1234551600, 'U'))
assert Reso.get_stride_from_decimal(1.2345, 'D') == (106660800, 'L')
with pytest.raises(ValueError):
Reso.get_stride_from_decimal(0.5, 'N')
# too much precision in the input can prevent
with pytest.raises(ValueError):
Reso.get_stride_from_decimal(0.3429324798798269273987982, 'H')
def test_get_freq_code(self):
# frequency str
assert (frequencies.get_freq_code('A') ==
(frequencies.get_freq('A'), 1))
assert (frequencies.get_freq_code('3D') ==
(frequencies.get_freq('D'), 3))
assert (frequencies.get_freq_code('-2M') ==
(frequencies.get_freq('M'), -2))
# tuple
assert (frequencies.get_freq_code(('D', 1)) ==
(frequencies.get_freq('D'), 1))
assert (frequencies.get_freq_code(('A', 3)) ==
(frequencies.get_freq('A'), 3))
assert (frequencies.get_freq_code(('M', -2)) ==
(frequencies.get_freq('M'), -2))
# numeric tuple
assert frequencies.get_freq_code((1000, 1)) == (1000, 1)
# offsets
assert (frequencies.get_freq_code(offsets.Day()) ==
(frequencies.get_freq('D'), 1))
assert (frequencies.get_freq_code(offsets.Day(3)) ==
(frequencies.get_freq('D'), 3))
assert (frequencies.get_freq_code(offsets.Day(-2)) ==
(frequencies.get_freq('D'), -2))
assert (frequencies.get_freq_code(offsets.MonthEnd()) ==
(frequencies.get_freq('M'), 1))
assert (frequencies.get_freq_code(offsets.MonthEnd(3)) ==
(frequencies.get_freq('M'), 3))
assert (frequencies.get_freq_code(offsets.MonthEnd(-2)) ==
(frequencies.get_freq('M'), -2))
assert (frequencies.get_freq_code(offsets.Week()) ==
(frequencies.get_freq('W'), 1))
assert (frequencies.get_freq_code(offsets.Week(3)) ==
(frequencies.get_freq('W'), 3))
assert (frequencies.get_freq_code(offsets.Week(-2)) ==
(frequencies.get_freq('W'), -2))
# Monday is weekday=0
assert (frequencies.get_freq_code(offsets.Week(weekday=1)) ==
(frequencies.get_freq('W-TUE'), 1))
assert (frequencies.get_freq_code(offsets.Week(3, weekday=0)) ==
(frequencies.get_freq('W-MON'), 3))
assert (frequencies.get_freq_code(offsets.Week(-2, weekday=4)) ==
(frequencies.get_freq('W-FRI'), -2))
def test_frequency_misc(self):
assert (resolution.get_freq_group('T') ==
frequencies.FreqGroup.FR_MIN)
code, stride = frequencies.get_freq_code(offsets.Hour())
assert code == frequencies.FreqGroup.FR_HR
code, stride = frequencies.get_freq_code((5, 'T'))
assert code == frequencies.FreqGroup.FR_MIN
assert stride == 5
offset = offsets.Hour()
result = frequencies.to_offset(offset)
assert result == offset
result = frequencies.to_offset((5, 'T'))
expected = offsets.Minute(5)
assert result == expected
with tm.assert_raises_regex(ValueError, 'Invalid frequency'):
frequencies.get_freq_code((5, 'baz'))
with tm.assert_raises_regex(ValueError, 'Invalid frequency'):
frequencies.to_offset('100foo')
with tm.assert_raises_regex(ValueError, 'Could not evaluate'):
frequencies.to_offset(('', ''))
_dti = DatetimeIndex
class TestFrequencyInference(object):
def test_raise_if_period_index(self):
index = PeriodIndex(start="1/1/1990", periods=20, freq="M")
pytest.raises(TypeError, frequencies.infer_freq, index)
def test_raise_if_too_few(self):
index = _dti(['12/31/1998', '1/3/1999'])
pytest.raises(ValueError, frequencies.infer_freq, index)
def test_business_daily(self):
index = _dti(['01/01/1999', '1/4/1999', '1/5/1999'])
assert frequencies.infer_freq(index) == 'B'
def test_business_daily_look_alike(self):
# GH 16624, do not infer 'B' when 'weekend' (2-day gap) in wrong place
index = _dti(['12/31/1998', '1/3/1999', '1/4/1999'])
assert frequencies.infer_freq(index) is None
def test_day(self):
self._check_tick(timedelta(1), 'D')
def test_day_corner(self):
index = _dti(['1/1/2000', '1/2/2000', '1/3/2000'])
assert frequencies.infer_freq(index) == 'D'
def test_non_datetimeindex(self):
dates = to_datetime(['1/1/2000', '1/2/2000', '1/3/2000'])
assert frequencies.infer_freq(dates) == 'D'
def test_hour(self):
self._check_tick(timedelta(hours=1), 'H')
def test_minute(self):
self._check_tick(timedelta(minutes=1), 'T')
def test_second(self):
self._check_tick(timedelta(seconds=1), 'S')
def test_millisecond(self):
self._check_tick(timedelta(microseconds=1000), 'L')
def test_microsecond(self):
self._check_tick(timedelta(microseconds=1), 'U')
def test_nanosecond(self):
self._check_tick(np.timedelta64(1, 'ns'), 'N')
def _check_tick(self, base_delta, code):
b = Timestamp(datetime.now())
for i in range(1, 5):
inc = base_delta * i
index = _dti([b + inc * j for j in range(3)])
if i > 1:
exp_freq = '%d%s' % (i, code)
else:
exp_freq = code
assert frequencies.infer_freq(index) == exp_freq
index = _dti([b + base_delta * 7] + [b + base_delta * j for j in range(
3)])
assert frequencies.infer_freq(index) is None
index = _dti([b + base_delta * j for j in range(3)] + [b + base_delta *
7])
assert frequencies.infer_freq(index) is None
def test_weekly(self):
days = ['MON', 'TUE', 'WED', 'THU', 'FRI', 'SAT', 'SUN']
for day in days:
self._check_generated_range('1/1/2000', 'W-%s' % day)
def test_week_of_month(self):
days = ['MON', 'TUE', 'WED', 'THU', 'FRI', 'SAT', 'SUN']
for day in days:
for i in range(1, 5):
self._check_generated_range('1/1/2000', 'WOM-%d%s' % (i, day))
def test_fifth_week_of_month(self):
# Only supports freq up to WOM-4. See #9425
func = lambda: date_range('2014-01-01', freq='WOM-5MON')
pytest.raises(ValueError, func)
def test_fifth_week_of_month_infer(self):
# Only attempts to infer up to WOM-4. See #9425
index = DatetimeIndex(["2014-03-31", "2014-06-30", "2015-03-30"])
assert frequencies.infer_freq(index) is None
def test_week_of_month_fake(self):
# All of these dates are on same day of week and are 4 or 5 weeks apart
index = DatetimeIndex(["2013-08-27", "2013-10-01", "2013-10-29",
"2013-11-26"])
assert frequencies.infer_freq(index) != 'WOM-4TUE'
def test_monthly(self):
self._check_generated_range('1/1/2000', 'M')
def test_monthly_ambiguous(self):
rng = _dti(['1/31/2000', '2/29/2000', '3/31/2000'])
assert rng.inferred_freq == 'M'
def test_business_monthly(self):
self._check_generated_range('1/1/2000', 'BM')
def test_business_start_monthly(self):
self._check_generated_range('1/1/2000', 'BMS')
def test_quarterly(self):
for month in ['JAN', 'FEB', 'MAR']:
self._check_generated_range('1/1/2000', 'Q-%s' % month)
def test_annual(self):
for month in MONTHS:
self._check_generated_range('1/1/2000', 'A-%s' % month)
def test_business_annual(self):
for month in MONTHS:
self._check_generated_range('1/1/2000', 'BA-%s' % month)
def test_annual_ambiguous(self):
rng = _dti(['1/31/2000', '1/31/2001', '1/31/2002'])
assert rng.inferred_freq == 'A-JAN'
def _check_generated_range(self, start, freq):
freq = freq.upper()
gen = date_range(start, periods=7, freq=freq)
index = _dti(gen.values)
if not freq.startswith('Q-'):
assert frequencies.infer_freq(index) == gen.freqstr
else:
inf_freq = frequencies.infer_freq(index)
is_dec_range = inf_freq == 'Q-DEC' and gen.freqstr in (
'Q', 'Q-DEC', 'Q-SEP', 'Q-JUN', 'Q-MAR')
is_nov_range = inf_freq == 'Q-NOV' and gen.freqstr in (
'Q-NOV', 'Q-AUG', 'Q-MAY', 'Q-FEB')
is_oct_range = inf_freq == 'Q-OCT' and gen.freqstr in (
'Q-OCT', 'Q-JUL', 'Q-APR', 'Q-JAN')
assert is_dec_range or is_nov_range or is_oct_range
gen = date_range(start, periods=5, freq=freq)
index = _dti(gen.values)
if not freq.startswith('Q-'):
assert frequencies.infer_freq(index) == gen.freqstr
else:
inf_freq = frequencies.infer_freq(index)
is_dec_range = inf_freq == 'Q-DEC' and gen.freqstr in (
'Q', 'Q-DEC', 'Q-SEP', 'Q-JUN', 'Q-MAR')
is_nov_range = inf_freq == 'Q-NOV' and gen.freqstr in (
'Q-NOV', 'Q-AUG', 'Q-MAY', 'Q-FEB')
is_oct_range = inf_freq == 'Q-OCT' and gen.freqstr in (
'Q-OCT', 'Q-JUL', 'Q-APR', 'Q-JAN')
assert is_dec_range or is_nov_range or is_oct_range
def test_infer_freq(self):
rng = period_range('1959Q2', '2009Q3', freq='Q')
rng = Index(rng.to_timestamp('D', how='e').astype(object))
assert rng.inferred_freq == 'Q-DEC'
rng = period_range('1959Q2', '2009Q3', freq='Q-NOV')
rng = Index(rng.to_timestamp('D', how='e').astype(object))
assert rng.inferred_freq == 'Q-NOV'
rng = period_range('1959Q2', '2009Q3', freq='Q-OCT')
rng = Index(rng.to_timestamp('D', how='e').astype(object))
assert rng.inferred_freq == 'Q-OCT'
def test_infer_freq_tz(self):
freqs = {'AS-JAN':
['2009-01-01', '2010-01-01', '2011-01-01', '2012-01-01'],
'Q-OCT':
['2009-01-31', '2009-04-30', '2009-07-31', '2009-10-31'],
'M': ['2010-11-30', '2010-12-31', '2011-01-31', '2011-02-28'],
'W-SAT':
['2010-12-25', '2011-01-01', '2011-01-08', '2011-01-15'],
'D': ['2011-01-01', '2011-01-02', '2011-01-03', '2011-01-04'],
'H': ['2011-12-31 22:00', '2011-12-31 23:00',
'2012-01-01 00:00', '2012-01-01 01:00']}
# GH 7310
for tz in [None, 'Australia/Sydney', 'Asia/Tokyo', 'Europe/Paris',
'US/Pacific', 'US/Eastern']:
for expected, dates in compat.iteritems(freqs):
idx = DatetimeIndex(dates, tz=tz)
assert idx.inferred_freq == expected
def test_infer_freq_tz_transition(self):
# Tests for #8772
date_pairs = [['2013-11-02', '2013-11-5'], # Fall DST
['2014-03-08', '2014-03-11'], # Spring DST
['2014-01-01', '2014-01-03']] # Regular Time
freqs = ['3H', '10T', '3601S', '3600001L', '3600000001U',
'3600000000001N']
for tz in [None, 'Australia/Sydney', 'Asia/Tokyo', 'Europe/Paris',
'US/Pacific', 'US/Eastern']:
for date_pair in date_pairs:
for freq in freqs:
idx = date_range(date_pair[0], date_pair[
1], freq=freq, tz=tz)
assert idx.inferred_freq == freq
index = date_range("2013-11-03", periods=5,
freq="3H").tz_localize("America/Chicago")
assert index.inferred_freq is None
def test_infer_freq_businesshour(self):
# GH 7905
idx = DatetimeIndex(
['2014-07-01 09:00', '2014-07-01 10:00', '2014-07-01 11:00',
'2014-07-01 12:00', '2014-07-01 13:00', '2014-07-01 14:00'])
# hourly freq in a day must result in 'H'
assert idx.inferred_freq == 'H'
idx = DatetimeIndex(
['2014-07-01 09:00', '2014-07-01 10:00', '2014-07-01 11:00',
'2014-07-01 12:00', '2014-07-01 13:00', '2014-07-01 14:00',
'2014-07-01 15:00', '2014-07-01 16:00', '2014-07-02 09:00',
'2014-07-02 10:00', '2014-07-02 11:00'])
assert idx.inferred_freq == 'BH'
idx = DatetimeIndex(
['2014-07-04 09:00', '2014-07-04 10:00', '2014-07-04 11:00',
'2014-07-04 12:00', '2014-07-04 13:00', '2014-07-04 14:00',
'2014-07-04 15:00', '2014-07-04 16:00', '2014-07-07 09:00',
'2014-07-07 10:00', '2014-07-07 11:00'])
assert idx.inferred_freq == 'BH'
idx = DatetimeIndex(
['2014-07-04 09:00', '2014-07-04 10:00', '2014-07-04 11:00',
'2014-07-04 12:00', '2014-07-04 13:00', '2014-07-04 14:00',
'2014-07-04 15:00', '2014-07-04 16:00', '2014-07-07 09:00',
'2014-07-07 10:00', '2014-07-07 11:00', '2014-07-07 12:00',
'2014-07-07 13:00', '2014-07-07 14:00', '2014-07-07 15:00',
'2014-07-07 16:00', '2014-07-08 09:00', '2014-07-08 10:00',
'2014-07-08 11:00', '2014-07-08 12:00', '2014-07-08 13:00',
'2014-07-08 14:00', '2014-07-08 15:00', '2014-07-08 16:00'])
assert idx.inferred_freq == 'BH'
def test_not_monotonic(self):
rng = _dti(['1/31/2000', '1/31/2001', '1/31/2002'])
rng = rng[::-1]
assert rng.inferred_freq == '-1A-JAN'
def test_non_datetimeindex2(self):
rng = _dti(['1/31/2000', '1/31/2001', '1/31/2002'])
vals = rng.to_pydatetime()
result = frequencies.infer_freq(vals)
assert result == rng.inferred_freq
def test_invalid_index_types(self):
# test all index types
for i in [tm.makeIntIndex(10), tm.makeFloatIndex(10),
tm.makePeriodIndex(10)]:
pytest.raises(TypeError, lambda: frequencies.infer_freq(i))
# GH 10822
# odd error message on conversions to datetime for unicode
if not is_platform_windows():
for i in [tm.makeStringIndex(10), tm.makeUnicodeIndex(10)]:
pytest.raises(ValueError, lambda: frequencies.infer_freq(i))
def test_string_datetimelike_compat(self):
# GH 6463
expected = frequencies.infer_freq(['2004-01', '2004-02', '2004-03',
'2004-04'])
result = frequencies.infer_freq(Index(['2004-01', '2004-02', '2004-03',
'2004-04']))
assert result == expected
def test_series(self):
# GH6407
# inferring series
# invalid type of Series
for s in [Series(np.arange(10)), Series(np.arange(10.))]:
pytest.raises(TypeError, lambda: frequencies.infer_freq(s))
# a non-convertible string
pytest.raises(ValueError, lambda: frequencies.infer_freq(
Series(['foo', 'bar'])))
# cannot infer on PeriodIndex
for freq in [None, 'L']:
s = Series(period_range('2013', periods=10, freq=freq))
pytest.raises(TypeError, lambda: frequencies.infer_freq(s))
# DateTimeIndex
for freq in ['M', 'L', 'S']:
s = Series(date_range('20130101', periods=10, freq=freq))
inferred = frequencies.infer_freq(s)
assert inferred == freq
s = Series(date_range('20130101', '20130110'))
inferred = frequencies.infer_freq(s)
assert inferred == 'D'
def test_legacy_offset_warnings(self):
freqs = ['WEEKDAY', 'EOM', 'W@MON', 'W@TUE', 'W@WED', 'W@THU',
'W@FRI', 'W@SAT', 'W@SUN', 'Q@JAN', 'Q@FEB', 'Q@MAR',
'A@JAN', 'A@FEB', 'A@MAR', 'A@APR', 'A@MAY', 'A@JUN',
'A@JUL', 'A@AUG', 'A@SEP', 'A@OCT', 'A@NOV', 'A@DEC',
'Y@JAN', 'WOM@1MON', 'WOM@2MON', 'WOM@3MON',
'WOM@4MON', 'WOM@1TUE', 'WOM@2TUE', 'WOM@3TUE',
'WOM@4TUE', 'WOM@1WED', 'WOM@2WED', 'WOM@3WED',
'WOM@4WED', 'WOM@1THU', 'WOM@2THU', 'WOM@3THU',
'WOM@4THU', 'WOM@1FRI', 'WOM@2FRI', 'WOM@3FRI',
'WOM@4FRI']
msg = INVALID_FREQ_ERR_MSG
for freq in freqs:
with tm.assert_raises_regex(ValueError, msg):
frequencies.get_offset(freq)
with tm.assert_raises_regex(ValueError, msg):
date_range('2011-01-01', periods=5, freq=freq)
|
bsd-3-clause
|
PredictiveScienceLab/GPy
|
GPy/examples/regression.py
|
8
|
18746
|
# Copyright (c) 2012-2014, GPy authors (see AUTHORS.txt).
# Licensed under the BSD 3-clause license (see LICENSE.txt)
"""
Gaussian Processes regression examples
"""
try:
from matplotlib import pyplot as pb
except:
pass
import numpy as np
import GPy
def olympic_marathon_men(optimize=True, plot=True):
"""Run a standard Gaussian process regression on the Olympic marathon data."""
try:import pods
except ImportError:
print('pods unavailable, see https://github.com/sods/ods for example datasets')
return
data = pods.datasets.olympic_marathon_men()
# create simple GP Model
m = GPy.models.GPRegression(data['X'], data['Y'])
# set the lengthscale to be something sensible (defaults to 1)
m.kern.lengthscale = 10.
if optimize:
m.optimize('bfgs', max_iters=200)
if plot:
m.plot(plot_limits=(1850, 2050))
return m
def coregionalization_toy(optimize=True, plot=True):
"""
A simple demonstration of coregionalization on two sinusoidal functions.
"""
#build a design matrix with a column of integers indicating the output
X1 = np.random.rand(50, 1) * 8
X2 = np.random.rand(30, 1) * 5
#build a suitable set of observed variables
Y1 = np.sin(X1) + np.random.randn(*X1.shape) * 0.05
Y2 = np.sin(X2) + np.random.randn(*X2.shape) * 0.05 + 2.
m = GPy.models.GPCoregionalizedRegression(X_list=[X1,X2], Y_list=[Y1,Y2])
if optimize:
m.optimize('bfgs', max_iters=100)
if plot:
slices = GPy.util.multioutput.get_slices([X1,X2])
m.plot(fixed_inputs=[(1,0)],which_data_rows=slices[0],Y_metadata={'output_index':0})
m.plot(fixed_inputs=[(1,1)],which_data_rows=slices[1],Y_metadata={'output_index':1},ax=pb.gca())
return m
def coregionalization_sparse(optimize=True, plot=True):
"""
A simple demonstration of coregionalization on two sinusoidal functions using sparse approximations.
"""
#build a design matrix with a column of integers indicating the output
X1 = np.random.rand(50, 1) * 8
X2 = np.random.rand(30, 1) * 5
#build a suitable set of observed variables
Y1 = np.sin(X1) + np.random.randn(*X1.shape) * 0.05
Y2 = np.sin(X2) + np.random.randn(*X2.shape) * 0.05 + 2.
m = GPy.models.SparseGPCoregionalizedRegression(X_list=[X1,X2], Y_list=[Y1,Y2])
if optimize:
m.optimize('bfgs', max_iters=100)
if plot:
slices = GPy.util.multioutput.get_slices([X1,X2])
m.plot(fixed_inputs=[(1,0)],which_data_rows=slices[0],Y_metadata={'output_index':0})
m.plot(fixed_inputs=[(1,1)],which_data_rows=slices[1],Y_metadata={'output_index':1},ax=pb.gca())
pb.ylim(-3,)
return m
def epomeo_gpx(max_iters=200, optimize=True, plot=True):
"""
Perform Gaussian process regression on the latitude and longitude data
from the Mount Epomeo runs. Requires gpxpy to be installed on your system
to load in the data.
"""
try:import pods
except ImportError:
print('pods unavailable, see https://github.com/sods/ods for example datasets')
return
data = pods.datasets.epomeo_gpx()
num_data_list = []
for Xpart in data['X']:
num_data_list.append(Xpart.shape[0])
num_data_array = np.array(num_data_list)
num_data = num_data_array.sum()
Y = np.zeros((num_data, 2))
t = np.zeros((num_data, 2))
start = 0
for Xpart, index in zip(data['X'], range(len(data['X']))):
end = start+Xpart.shape[0]
t[start:end, :] = np.hstack((Xpart[:, 0:1],
index*np.ones((Xpart.shape[0], 1))))
Y[start:end, :] = Xpart[:, 1:3]
num_inducing = 200
Z = np.hstack((np.linspace(t[:,0].min(), t[:, 0].max(), num_inducing)[:, None],
np.random.randint(0, 4, num_inducing)[:, None]))
k1 = GPy.kern.RBF(1)
k2 = GPy.kern.Coregionalize(output_dim=5, rank=5)
k = k1**k2
m = GPy.models.SparseGPRegression(t, Y, kernel=k, Z=Z, normalize_Y=True)
m.constrain_fixed('.*variance', 1.)
m.inducing_inputs.constrain_fixed()
m.Gaussian_noise.variance.constrain_bounded(1e-3, 1e-1)
m.optimize(max_iters=max_iters,messages=True)
return m
def multiple_optima(gene_number=937, resolution=80, model_restarts=10, seed=10000, max_iters=300, optimize=True, plot=True):
"""
Show an example of a multimodal error surface for Gaussian process
regression. Gene 939 has bimodal behaviour where the noisy mode is
higher.
"""
# Contour over a range of length scales and signal/noise ratios.
length_scales = np.linspace(0.1, 60., resolution)
log_SNRs = np.linspace(-3., 4., resolution)
try:import pods
except ImportError:
print('pods unavailable, see https://github.com/sods/ods for example datasets')
return
data = pods.datasets.della_gatta_TRP63_gene_expression(data_set='della_gatta',gene_number=gene_number)
# data['Y'] = data['Y'][0::2, :]
# data['X'] = data['X'][0::2, :]
data['Y'] = data['Y'] - np.mean(data['Y'])
lls = GPy.examples.regression._contour_data(data, length_scales, log_SNRs, GPy.kern.RBF)
if plot:
pb.contour(length_scales, log_SNRs, np.exp(lls), 20, cmap=pb.cm.jet)
ax = pb.gca()
pb.xlabel('length scale')
pb.ylabel('log_10 SNR')
xlim = ax.get_xlim()
ylim = ax.get_ylim()
# Now run a few optimizations
models = []
optim_point_x = np.empty(2)
optim_point_y = np.empty(2)
np.random.seed(seed=seed)
for i in range(0, model_restarts):
# kern = GPy.kern.RBF(1, variance=np.random.exponential(1.), lengthscale=np.random.exponential(50.))
kern = GPy.kern.RBF(1, variance=np.random.uniform(1e-3, 1), lengthscale=np.random.uniform(5, 50))
m = GPy.models.GPRegression(data['X'], data['Y'], kernel=kern)
m.likelihood.variance = np.random.uniform(1e-3, 1)
optim_point_x[0] = m.rbf.lengthscale
optim_point_y[0] = np.log10(m.rbf.variance) - np.log10(m.likelihood.variance);
# optimize
if optimize:
m.optimize('scg', xtol=1e-6, ftol=1e-6, max_iters=max_iters)
optim_point_x[1] = m.rbf.lengthscale
optim_point_y[1] = np.log10(m.rbf.variance) - np.log10(m.likelihood.variance);
if plot:
pb.arrow(optim_point_x[0], optim_point_y[0], optim_point_x[1] - optim_point_x[0], optim_point_y[1] - optim_point_y[0], label=str(i), head_length=1, head_width=0.5, fc='k', ec='k')
models.append(m)
if plot:
ax.set_xlim(xlim)
ax.set_ylim(ylim)
return m # (models, lls)
def _contour_data(data, length_scales, log_SNRs, kernel_call=GPy.kern.RBF):
"""
Evaluate the GP objective function for a given data set for a range of
signal to noise ratios and a range of lengthscales.
:data_set: A data set from the utils.datasets director.
:length_scales: a list of length scales to explore for the contour plot.
:log_SNRs: a list of base 10 logarithm signal to noise ratios to explore for the contour plot.
:kernel: a kernel to use for the 'signal' portion of the data.
"""
lls = []
total_var = np.var(data['Y'])
kernel = kernel_call(1, variance=1., lengthscale=1.)
model = GPy.models.GPRegression(data['X'], data['Y'], kernel=kernel)
for log_SNR in log_SNRs:
SNR = 10.**log_SNR
noise_var = total_var / (1. + SNR)
signal_var = total_var - noise_var
model.kern['.*variance'] = signal_var
model.likelihood.variance = noise_var
length_scale_lls = []
for length_scale in length_scales:
model['.*lengthscale'] = length_scale
length_scale_lls.append(model.log_likelihood())
lls.append(length_scale_lls)
return np.array(lls)
def olympic_100m_men(optimize=True, plot=True):
"""Run a standard Gaussian process regression on the Rogers and Girolami olympics data."""
try:import pods
except ImportError:
print('pods unavailable, see https://github.com/sods/ods for example datasets')
return
data = pods.datasets.olympic_100m_men()
# create simple GP Model
m = GPy.models.GPRegression(data['X'], data['Y'])
# set the lengthscale to be something sensible (defaults to 1)
m.rbf.lengthscale = 10
if optimize:
m.optimize('bfgs', max_iters=200)
if plot:
m.plot(plot_limits=(1850, 2050))
return m
def toy_rbf_1d(optimize=True, plot=True):
"""Run a simple demonstration of a standard Gaussian process fitting it to data sampled from an RBF covariance."""
try:import pods
except ImportError:
print('pods unavailable, see https://github.com/sods/ods for example datasets')
return
data = pods.datasets.toy_rbf_1d()
# create simple GP Model
m = GPy.models.GPRegression(data['X'], data['Y'])
if optimize:
m.optimize('bfgs')
if plot:
m.plot()
return m
def toy_rbf_1d_50(optimize=True, plot=True):
"""Run a simple demonstration of a standard Gaussian process fitting it to data sampled from an RBF covariance."""
try:import pods
except ImportError:
print('pods unavailable, see https://github.com/sods/ods for example datasets')
return
data = pods.datasets.toy_rbf_1d_50()
# create simple GP Model
m = GPy.models.GPRegression(data['X'], data['Y'])
if optimize:
m.optimize('bfgs')
if plot:
m.plot()
return m
def toy_poisson_rbf_1d_laplace(optimize=True, plot=True):
"""Run a simple demonstration of a standard Gaussian process fitting it to data sampled from an RBF covariance."""
optimizer='scg'
x_len = 30
X = np.linspace(0, 10, x_len)[:, None]
f_true = np.random.multivariate_normal(np.zeros(x_len), GPy.kern.RBF(1).K(X))
Y = np.array([np.random.poisson(np.exp(f)) for f in f_true])[:,None]
kern = GPy.kern.RBF(1)
poisson_lik = GPy.likelihoods.Poisson()
laplace_inf = GPy.inference.latent_function_inference.Laplace()
# create simple GP Model
m = GPy.core.GP(X, Y, kernel=kern, likelihood=poisson_lik, inference_method=laplace_inf)
if optimize:
m.optimize(optimizer)
if plot:
m.plot()
# plot the real underlying rate function
pb.plot(X, np.exp(f_true), '--k', linewidth=2)
return m
def toy_ARD(max_iters=1000, kernel_type='linear', num_samples=300, D=4, optimize=True, plot=True):
# Create an artificial dataset where the values in the targets (Y)
# only depend in dimensions 1 and 3 of the inputs (X). Run ARD to
# see if this dependency can be recovered
X1 = np.sin(np.sort(np.random.rand(num_samples, 1) * 10, 0))
X2 = np.cos(np.sort(np.random.rand(num_samples, 1) * 10, 0))
X3 = np.exp(np.sort(np.random.rand(num_samples, 1), 0))
X4 = np.log(np.sort(np.random.rand(num_samples, 1), 0))
X = np.hstack((X1, X2, X3, X4))
Y1 = np.asarray(2 * X[:, 0] + 3).reshape(-1, 1)
Y2 = np.asarray(4 * (X[:, 2] - 1.5 * X[:, 0])).reshape(-1, 1)
Y = np.hstack((Y1, Y2))
Y = np.dot(Y, np.random.rand(2, D));
Y = Y + 0.2 * np.random.randn(Y.shape[0], Y.shape[1])
Y -= Y.mean()
Y /= Y.std()
if kernel_type == 'linear':
kernel = GPy.kern.Linear(X.shape[1], ARD=1)
elif kernel_type == 'rbf_inv':
kernel = GPy.kern.RBF_inv(X.shape[1], ARD=1)
else:
kernel = GPy.kern.RBF(X.shape[1], ARD=1)
kernel += GPy.kern.White(X.shape[1]) + GPy.kern.Bias(X.shape[1])
m = GPy.models.GPRegression(X, Y, kernel)
# len_prior = GPy.priors.inverse_gamma(1,18) # 1, 25
# m.set_prior('.*lengthscale',len_prior)
if optimize:
m.optimize(optimizer='scg', max_iters=max_iters)
if plot:
m.kern.plot_ARD()
return m
def toy_ARD_sparse(max_iters=1000, kernel_type='linear', num_samples=300, D=4, optimize=True, plot=True):
# Create an artificial dataset where the values in the targets (Y)
# only depend in dimensions 1 and 3 of the inputs (X). Run ARD to
# see if this dependency can be recovered
X1 = np.sin(np.sort(np.random.rand(num_samples, 1) * 10, 0))
X2 = np.cos(np.sort(np.random.rand(num_samples, 1) * 10, 0))
X3 = np.exp(np.sort(np.random.rand(num_samples, 1), 0))
X4 = np.log(np.sort(np.random.rand(num_samples, 1), 0))
X = np.hstack((X1, X2, X3, X4))
Y1 = np.asarray(2 * X[:, 0] + 3)[:, None]
Y2 = np.asarray(4 * (X[:, 2] - 1.5 * X[:, 0]))[:, None]
Y = np.hstack((Y1, Y2))
Y = np.dot(Y, np.random.rand(2, D));
Y = Y + 0.2 * np.random.randn(Y.shape[0], Y.shape[1])
Y -= Y.mean()
Y /= Y.std()
if kernel_type == 'linear':
kernel = GPy.kern.Linear(X.shape[1], ARD=1)
elif kernel_type == 'rbf_inv':
kernel = GPy.kern.RBF_inv(X.shape[1], ARD=1)
else:
kernel = GPy.kern.RBF(X.shape[1], ARD=1)
#kernel += GPy.kern.Bias(X.shape[1])
X_variance = np.ones(X.shape) * 0.5
m = GPy.models.SparseGPRegression(X, Y, kernel, X_variance=X_variance)
# len_prior = GPy.priors.inverse_gamma(1,18) # 1, 25
# m.set_prior('.*lengthscale',len_prior)
if optimize:
m.optimize(optimizer='scg', max_iters=max_iters)
if plot:
m.kern.plot_ARD()
return m
def robot_wireless(max_iters=100, kernel=None, optimize=True, plot=True):
"""Predict the location of a robot given wirelss signal strength readings."""
try:import pods
except ImportError:
print('pods unavailable, see https://github.com/sods/ods for example datasets')
return
data = pods.datasets.robot_wireless()
# create simple GP Model
m = GPy.models.GPRegression(data['Y'], data['X'], kernel=kernel)
# optimize
if optimize:
m.optimize(max_iters=max_iters)
Xpredict = m.predict(data['Ytest'])[0]
if plot:
pb.plot(data['Xtest'][:, 0], data['Xtest'][:, 1], 'r-')
pb.plot(Xpredict[:, 0], Xpredict[:, 1], 'b-')
pb.axis('equal')
pb.title('WiFi Localization with Gaussian Processes')
pb.legend(('True Location', 'Predicted Location'))
sse = ((data['Xtest'] - Xpredict)**2).sum()
print(('Sum of squares error on test data: ' + str(sse)))
return m
def silhouette(max_iters=100, optimize=True, plot=True):
"""Predict the pose of a figure given a silhouette. This is a task from Agarwal and Triggs 2004 ICML paper."""
try:import pods
except ImportError:
print('pods unavailable, see https://github.com/sods/ods for example datasets')
return
data = pods.datasets.silhouette()
# create simple GP Model
m = GPy.models.GPRegression(data['X'], data['Y'])
# optimize
if optimize:
m.optimize(messages=True, max_iters=max_iters)
print(m)
return m
def sparse_GP_regression_1D(num_samples=400, num_inducing=5, max_iters=100, optimize=True, plot=True, checkgrad=False):
"""Run a 1D example of a sparse GP regression."""
# sample inputs and outputs
X = np.random.uniform(-3., 3., (num_samples, 1))
Y = np.sin(X) + np.random.randn(num_samples, 1) * 0.05
# construct kernel
rbf = GPy.kern.RBF(1)
# create simple GP Model
m = GPy.models.SparseGPRegression(X, Y, kernel=rbf, num_inducing=num_inducing)
if checkgrad:
m.checkgrad()
if optimize:
m.optimize('tnc', max_iters=max_iters)
if plot:
m.plot()
return m
def sparse_GP_regression_2D(num_samples=400, num_inducing=50, max_iters=100, optimize=True, plot=True, nan=False):
"""Run a 2D example of a sparse GP regression."""
np.random.seed(1234)
X = np.random.uniform(-3., 3., (num_samples, 2))
Y = np.sin(X[:, 0:1]) * np.sin(X[:, 1:2]) + np.random.randn(num_samples, 1) * 0.05
if nan:
inan = np.random.binomial(1,.2,size=Y.shape)
Y[inan] = np.nan
# construct kernel
rbf = GPy.kern.RBF(2)
# create simple GP Model
m = GPy.models.SparseGPRegression(X, Y, kernel=rbf, num_inducing=num_inducing)
# contrain all parameters to be positive (but not inducing inputs)
m['.*len'] = 2.
m.checkgrad()
# optimize
if optimize:
m.optimize('tnc', messages=1, max_iters=max_iters)
# plot
if plot:
m.plot()
print(m)
return m
def uncertain_inputs_sparse_regression(max_iters=200, optimize=True, plot=True):
"""Run a 1D example of a sparse GP regression with uncertain inputs."""
fig, axes = pb.subplots(1, 2, figsize=(12, 5), sharex=True, sharey=True)
# sample inputs and outputs
S = np.ones((20, 1))
X = np.random.uniform(-3., 3., (20, 1))
Y = np.sin(X) + np.random.randn(20, 1) * 0.05
# likelihood = GPy.likelihoods.Gaussian(Y)
Z = np.random.uniform(-3., 3., (7, 1))
k = GPy.kern.RBF(1)
# create simple GP Model - no input uncertainty on this one
m = GPy.models.SparseGPRegression(X, Y, kernel=k, Z=Z)
if optimize:
m.optimize('scg', messages=1, max_iters=max_iters)
if plot:
m.plot(ax=axes[0])
axes[0].set_title('no input uncertainty')
print(m)
# the same Model with uncertainty
m = GPy.models.SparseGPRegression(X, Y, kernel=GPy.kern.RBF(1), Z=Z, X_variance=S)
if optimize:
m.optimize('scg', messages=1, max_iters=max_iters)
if plot:
m.plot(ax=axes[1])
axes[1].set_title('with input uncertainty')
fig.canvas.draw()
print(m)
return m
def simple_mean_function(max_iters=100, optimize=True, plot=True):
"""
The simplest possible mean function. No parameters, just a simple Sinusoid.
"""
#create simple mean function
mf = GPy.core.Mapping(1,1)
mf.f = np.sin
mf.update_gradients = lambda a,b: None
X = np.linspace(0,10,50).reshape(-1,1)
Y = np.sin(X) + 0.5*np.cos(3*X) + 0.1*np.random.randn(*X.shape)
k =GPy.kern.RBF(1)
lik = GPy.likelihoods.Gaussian()
m = GPy.core.GP(X, Y, kernel=k, likelihood=lik, mean_function=mf)
if optimize:
m.optimize(max_iters=max_iters)
if plot:
m.plot(plot_limits=(-10,15))
return m
def parametric_mean_function(max_iters=100, optimize=True, plot=True):
"""
A linear mean function with parameters that we'll learn alongside the kernel
"""
#create simple mean function
mf = GPy.core.Mapping(1,1)
mf.f = np.sin
X = np.linspace(0,10,50).reshape(-1,1)
Y = np.sin(X) + 0.5*np.cos(3*X) + 0.1*np.random.randn(*X.shape) + 3*X
mf = GPy.mappings.Linear(1,1)
k =GPy.kern.RBF(1)
lik = GPy.likelihoods.Gaussian()
m = GPy.core.GP(X, Y, kernel=k, likelihood=lik, mean_function=mf)
if optimize:
m.optimize(max_iters=max_iters)
if plot:
m.plot()
return m
|
bsd-3-clause
|
ningchi/scikit-learn
|
sklearn/utils/validation.py
|
66
|
23629
|
"""Utilities for input validation"""
# Authors: Olivier Grisel
# Gael Varoquaux
# Andreas Mueller
# Lars Buitinck
# Alexandre Gramfort
# Nicolas Tresegnie
# License: BSD 3 clause
import warnings
import numbers
import numpy as np
import scipy.sparse as sp
from ..externals import six
from inspect import getargspec
FLOAT_DTYPES = (np.float64, np.float32, np.float16)
class DataConversionWarning(UserWarning):
"""A warning on implicit data conversions happening in the code"""
pass
warnings.simplefilter("always", DataConversionWarning)
class NonBLASDotWarning(UserWarning):
"""A warning on implicit dispatch to numpy.dot"""
class NotFittedError(ValueError, AttributeError):
"""Exception class to raise if estimator is used before fitting
This class inherits from both ValueError and AttributeError to help with
exception handling and backward compatibility.
"""
# Silenced by default to reduce verbosity. Turn on at runtime for
# performance profiling.
warnings.simplefilter('ignore', NonBLASDotWarning)
def _assert_all_finite(X):
"""Like assert_all_finite, but only for ndarray."""
X = np.asanyarray(X)
# First try an O(n) time, O(1) space solution for the common case that
# everything is finite; fall back to O(n) space np.isfinite to prevent
# false positives from overflow in sum method.
if (X.dtype.char in np.typecodes['AllFloat'] and not np.isfinite(X.sum())
and not np.isfinite(X).all()):
raise ValueError("Input contains NaN, infinity"
" or a value too large for %r." % X.dtype)
def assert_all_finite(X):
"""Throw a ValueError if X contains NaN or infinity.
Input MUST be an np.ndarray instance or a scipy.sparse matrix."""
_assert_all_finite(X.data if sp.issparse(X) else X)
def as_float_array(X, copy=True, force_all_finite=True):
"""Converts an array-like to an array of floats
The new dtype will be np.float32 or np.float64, depending on the original
type. The function can create a copy or modify the argument depending
on the argument copy.
Parameters
----------
X : {array-like, sparse matrix}
copy : bool, optional
If True, a copy of X will be created. If False, a copy may still be
returned if X's dtype is not a floating point type.
force_all_finite : boolean (default=True)
Whether to raise an error on np.inf and np.nan in X.
Returns
-------
XT : {array, sparse matrix}
An array of type np.float
"""
if isinstance(X, np.matrix) or (not isinstance(X, np.ndarray)
and not sp.issparse(X)):
return check_array(X, ['csr', 'csc', 'coo'], dtype=np.float64,
copy=copy, force_all_finite=force_all_finite,
ensure_2d=False)
elif sp.issparse(X) and X.dtype in [np.float32, np.float64]:
return X.copy() if copy else X
elif X.dtype in [np.float32, np.float64]: # is numpy array
return X.copy('F' if X.flags['F_CONTIGUOUS'] else 'C') if copy else X
else:
return X.astype(np.float32 if X.dtype == np.int32 else np.float64)
def _is_arraylike(x):
"""Returns whether the input is array-like"""
return (hasattr(x, '__len__') or
hasattr(x, 'shape') or
hasattr(x, '__array__'))
def _num_samples(x):
"""Return number of samples in array-like x."""
if hasattr(x, 'fit'):
# Don't get num_samples from an ensembles length!
raise TypeError('Expected sequence or array-like, got '
'estimator %s' % x)
if not hasattr(x, '__len__') and not hasattr(x, 'shape'):
if hasattr(x, '__array__'):
x = np.asarray(x)
else:
raise TypeError("Expected sequence or array-like, got %s" %
type(x))
if hasattr(x, 'shape'):
if len(x.shape) == 0:
raise TypeError("Singleton array %r cannot be considered"
" a valid collection." % x)
return x.shape[0]
else:
return len(x)
def _shape_repr(shape):
"""Return a platform independent reprensentation of an array shape
Under Python 2, the `long` type introduces an 'L' suffix when using the
default %r format for tuples of integers (typically used to store the shape
of an array).
Under Windows 64 bit (and Python 2), the `long` type is used by default
in numpy shapes even when the integer dimensions are well below 32 bit.
The platform specific type causes string messages or doctests to change
from one platform to another which is not desirable.
Under Python 3, there is no more `long` type so the `L` suffix is never
introduced in string representation.
>>> _shape_repr((1, 2))
'(1, 2)'
>>> one = 2 ** 64 / 2 ** 64 # force an upcast to `long` under Python 2
>>> _shape_repr((one, 2 * one))
'(1, 2)'
>>> _shape_repr((1,))
'(1,)'
>>> _shape_repr(())
'()'
"""
if len(shape) == 0:
return "()"
joined = ", ".join("%d" % e for e in shape)
if len(shape) == 1:
# special notation for singleton tuples
joined += ','
return "(%s)" % joined
def check_consistent_length(*arrays):
"""Check that all arrays have consistent first dimensions.
Checks whether all objects in arrays have the same shape or length.
Parameters
----------
*arrays : list or tuple of input objects.
Objects that will be checked for consistent length.
"""
uniques = np.unique([_num_samples(X) for X in arrays if X is not None])
if len(uniques) > 1:
raise ValueError("Found arrays with inconsistent numbers of samples: "
"%s" % str(uniques))
def indexable(*iterables):
"""Make arrays indexable for cross-validation.
Checks consistent length, passes through None, and ensures that everything
can be indexed by converting sparse matrices to csr and converting
non-interable objects to arrays.
Parameters
----------
*iterables : lists, dataframes, arrays, sparse matrices
List of objects to ensure sliceability.
"""
result = []
for X in iterables:
if sp.issparse(X):
result.append(X.tocsr())
elif hasattr(X, "__getitem__") or hasattr(X, "iloc"):
result.append(X)
elif X is None:
result.append(X)
else:
result.append(np.array(X))
check_consistent_length(*result)
return result
def _ensure_sparse_format(spmatrix, accept_sparse, dtype, copy,
force_all_finite):
"""Convert a sparse matrix to a given format.
Checks the sparse format of spmatrix and converts if necessary.
Parameters
----------
spmatrix : scipy sparse matrix
Input to validate and convert.
accept_sparse : string, list of string or None (default=None)
String[s] representing allowed sparse matrix formats ('csc',
'csr', 'coo', 'dok', 'bsr', 'lil', 'dia'). None means that sparse
matrix input will raise an error. If the input is sparse but not in
the allowed format, it will be converted to the first listed format.
dtype : string, type or None (default=none)
Data type of result. If None, the dtype of the input is preserved.
copy : boolean (default=False)
Whether a forced copy will be triggered. If copy=False, a copy might
be triggered by a conversion.
force_all_finite : boolean (default=True)
Whether to raise an error on np.inf and np.nan in X.
Returns
-------
spmatrix_converted : scipy sparse matrix.
Matrix that is ensured to have an allowed type.
"""
if accept_sparse in [None, False]:
raise TypeError('A sparse matrix was passed, but dense '
'data is required. Use X.toarray() to '
'convert to a dense numpy array.')
if dtype is None:
dtype = spmatrix.dtype
changed_format = False
if (isinstance(accept_sparse, (list, tuple))
and spmatrix.format not in accept_sparse):
# create new with correct sparse
spmatrix = spmatrix.asformat(accept_sparse[0])
changed_format = True
if dtype != spmatrix.dtype:
# convert dtype
spmatrix = spmatrix.astype(dtype)
elif copy and not changed_format:
# force copy
spmatrix = spmatrix.copy()
if force_all_finite:
if not hasattr(spmatrix, "data"):
warnings.warn("Can't check %s sparse matrix for nan or inf."
% spmatrix.format)
else:
_assert_all_finite(spmatrix.data)
return spmatrix
def check_array(array, accept_sparse=None, dtype="numeric", order=None,
copy=False, force_all_finite=True, ensure_2d=True,
allow_nd=False, ensure_min_samples=1, ensure_min_features=1,
warn_on_dtype=False, estimator=None):
"""Input validation on an array, list, sparse matrix or similar.
By default, the input is converted to an at least 2nd numpy array.
If the dtype of the array is object, attempt converting to float,
raising on failure.
Parameters
----------
array : object
Input object to check / convert.
accept_sparse : string, list of string or None (default=None)
String[s] representing allowed sparse matrix formats, such as 'csc',
'csr', etc. None means that sparse matrix input will raise an error.
If the input is sparse but not in the allowed format, it will be
converted to the first listed format.
dtype : string, type, list of types or None (default="numeric")
Data type of result. If None, the dtype of the input is preserved.
If "numeric", dtype is preserved unless array.dtype is object.
If dtype is a list of types, conversion on the first type is only
performed if the dtype of the input is not in the list.
order : 'F', 'C' or None (default=None)
Whether an array will be forced to be fortran or c-style.
copy : boolean (default=False)
Whether a forced copy will be triggered. If copy=False, a copy might
be triggered by a conversion.
force_all_finite : boolean (default=True)
Whether to raise an error on np.inf and np.nan in X.
ensure_2d : boolean (default=True)
Whether to make X at least 2d.
allow_nd : boolean (default=False)
Whether to allow X.ndim > 2.
ensure_min_samples : int (default=1)
Make sure that the array has a minimum number of samples in its first
axis (rows for a 2D array). Setting to 0 disables this check.
ensure_min_features : int (default=1)
Make sure that the 2D array has some minimum number of features
(columns). The default value of 1 rejects empty datasets.
This check is only enforced when the input data has effectively 2
dimensions or is originally 1D and ``ensure_2d`` is True. Setting to 0
disables this check.
warn_on_dtype : boolean (default=False)
Raise DataConversionWarning if the dtype of the input data structure
does not match the requested dtype, causing a memory copy.
estimator : str or estimator instance (default=None)
If passed, include the name of the estimator in warning messages.
Returns
-------
X_converted : object
The converted and validated X.
"""
if isinstance(accept_sparse, str):
accept_sparse = [accept_sparse]
# store whether originally we wanted numeric dtype
dtype_numeric = dtype == "numeric"
dtype_orig = getattr(array, "dtype", None)
if not hasattr(dtype_orig, 'kind'):
# not a data type (e.g. a column named dtype in a pandas DataFrame)
dtype_orig = None
if dtype_numeric:
if dtype_orig is not None and dtype_orig.kind == "O":
# if input is object, convert to float.
dtype = np.float64
else:
dtype = None
if isinstance(dtype, (list, tuple)):
if dtype_orig is not None and dtype_orig in dtype:
# no dtype conversion required
dtype = None
else:
# dtype conversion required. Let's select the first element of the
# list of accepted types.
dtype = dtype[0]
if sp.issparse(array):
array = _ensure_sparse_format(array, accept_sparse, dtype, copy,
force_all_finite)
else:
if ensure_2d:
array = np.atleast_2d(array)
array = np.array(array, dtype=dtype, order=order, copy=copy)
# make sure we actually converted to numeric:
if dtype_numeric and array.dtype.kind == "O":
array = array.astype(np.float64)
if not allow_nd and array.ndim >= 3:
raise ValueError("Found array with dim %d. Expected <= 2" %
array.ndim)
if force_all_finite:
_assert_all_finite(array)
shape_repr = _shape_repr(array.shape)
if ensure_min_samples > 0:
n_samples = _num_samples(array)
if n_samples < ensure_min_samples:
raise ValueError("Found array with %d sample(s) (shape=%s) while a"
" minimum of %d is required."
% (n_samples, shape_repr, ensure_min_samples))
if ensure_min_features > 0 and array.ndim == 2:
n_features = array.shape[1]
if n_features < ensure_min_features:
raise ValueError("Found array with %d feature(s) (shape=%s) while"
" a minimum of %d is required."
% (n_features, shape_repr, ensure_min_features))
if warn_on_dtype and dtype_orig is not None and array.dtype != dtype_orig:
msg = ("Data with input dtype %s was converted to %s"
% (dtype_orig, array.dtype))
if estimator is not None:
if not isinstance(estimator, six.string_types):
estimator = estimator.__class__.__name__
msg += " by %s" % estimator
warnings.warn(msg, DataConversionWarning)
return array
def check_X_y(X, y, accept_sparse=None, dtype="numeric", order=None, copy=False,
force_all_finite=True, ensure_2d=True, allow_nd=False,
multi_output=False, ensure_min_samples=1,
ensure_min_features=1, y_numeric=False,
warn_on_dtype=False, estimator=None):
"""Input validation for standard estimators.
Checks X and y for consistent length, enforces X 2d and y 1d.
Standard input checks are only applied to y. For multi-label y,
set multi_output=True to allow 2d and sparse y.
If the dtype of X is object, attempt converting to float,
raising on failure.
Parameters
----------
X : nd-array, list or sparse matrix
Input data.
y : nd-array, list or sparse matrix
Labels.
accept_sparse : string, list of string or None (default=None)
String[s] representing allowed sparse matrix formats, such as 'csc',
'csr', etc. None means that sparse matrix input will raise an error.
If the input is sparse but not in the allowed format, it will be
converted to the first listed format.
dtype : string, type, list of types or None (default="numeric")
Data type of result. If None, the dtype of the input is preserved.
If "numeric", dtype is preserved unless array.dtype is object.
If dtype is a list of types, conversion on the first type is only
performed if the dtype of the input is not in the list.
order : 'F', 'C' or None (default=None)
Whether an array will be forced to be fortran or c-style.
copy : boolean (default=False)
Whether a forced copy will be triggered. If copy=False, a copy might
be triggered by a conversion.
force_all_finite : boolean (default=True)
Whether to raise an error on np.inf and np.nan in X.
ensure_2d : boolean (default=True)
Whether to make X at least 2d.
allow_nd : boolean (default=False)
Whether to allow X.ndim > 2.
multi_output : boolean (default=False)
Whether to allow 2-d y (array or sparse matrix). If false, y will be
validated as a vector.
ensure_min_samples : int (default=1)
Make sure that X has a minimum number of samples in its first
axis (rows for a 2D array).
ensure_min_features : int (default=1)
Make sure that the 2D array has some minimum number of features
(columns). The default value of 1 rejects empty datasets.
This check is only enforced when X has effectively 2 dimensions or
is originally 1D and ``ensure_2d`` is True. Setting to 0 disables
this check.
y_numeric : boolean (default=False)
Whether to ensure that y has a numeric type. If dtype of y is object,
it is converted to float64. Should only be used for regression
algorithms.
warn_on_dtype : boolean (default=False)
Raise DataConversionWarning if the dtype of the input data structure
does not match the requested dtype, causing a memory copy.
estimator : str or estimator instance (default=None)
If passed, include the name of the estimator in warning messages.
Returns
-------
X_converted : object
The converted and validated X.
"""
X = check_array(X, accept_sparse, dtype, order, copy, force_all_finite,
ensure_2d, allow_nd, ensure_min_samples,
ensure_min_features, warn_on_dtype, estimator)
if multi_output:
y = check_array(y, 'csr', force_all_finite=True, ensure_2d=False,
dtype=None)
else:
y = column_or_1d(y, warn=True)
_assert_all_finite(y)
if y_numeric and y.dtype.kind == 'O':
y = y.astype(np.float64)
check_consistent_length(X, y)
return X, y
def column_or_1d(y, warn=False):
""" Ravel column or 1d numpy array, else raises an error
Parameters
----------
y : array-like
warn : boolean, default False
To control display of warnings.
Returns
-------
y : array
"""
shape = np.shape(y)
if len(shape) == 1:
return np.ravel(y)
if len(shape) == 2 and shape[1] == 1:
if warn:
warnings.warn("A column-vector y was passed when a 1d array was"
" expected. Please change the shape of y to "
"(n_samples, ), for example using ravel().",
DataConversionWarning, stacklevel=2)
return np.ravel(y)
raise ValueError("bad input shape {0}".format(shape))
def check_random_state(seed):
"""Turn seed into a np.random.RandomState instance
If seed is None, return the RandomState singleton used by np.random.
If seed is an int, return a new RandomState instance seeded with seed.
If seed is already a RandomState instance, return it.
Otherwise raise ValueError.
"""
if seed is None or seed is np.random:
return np.random.mtrand._rand
if isinstance(seed, (numbers.Integral, np.integer)):
return np.random.RandomState(seed)
if isinstance(seed, np.random.RandomState):
return seed
raise ValueError('%r cannot be used to seed a numpy.random.RandomState'
' instance' % seed)
def has_fit_parameter(estimator, parameter):
"""Checks whether the estimator's fit method supports the given parameter.
Examples
--------
>>> from sklearn.svm import SVC
>>> has_fit_parameter(SVC(), "sample_weight")
True
"""
return parameter in getargspec(estimator.fit)[0]
def check_symmetric(array, tol=1E-10, raise_warning=True,
raise_exception=False):
"""Make sure that array is 2D, square and symmetric.
If the array is not symmetric, then a symmetrized version is returned.
Optionally, a warning or exception is raised if the matrix is not
symmetric.
Parameters
----------
array : nd-array or sparse matrix
Input object to check / convert. Must be two-dimensional and square,
otherwise a ValueError will be raised.
tol : float
Absolute tolerance for equivalence of arrays. Default = 1E-10.
raise_warning : boolean (default=True)
If True then raise a warning if conversion is required.
raise_exception : boolean (default=False)
If True then raise an exception if array is not symmetric.
Returns
-------
array_sym : ndarray or sparse matrix
Symmetrized version of the input array, i.e. the average of array
and array.transpose(). If sparse, then duplicate entries are first
summed and zeros are eliminated.
"""
if (array.ndim != 2) or (array.shape[0] != array.shape[1]):
raise ValueError("array must be 2-dimensional and square. "
"shape = {0}".format(array.shape))
if sp.issparse(array):
diff = array - array.T
# only csr, csc, and coo have `data` attribute
if diff.format not in ['csr', 'csc', 'coo']:
diff = diff.tocsr()
symmetric = np.all(abs(diff.data) < tol)
else:
symmetric = np.allclose(array, array.T, atol=tol)
if not symmetric:
if raise_exception:
raise ValueError("Array must be symmetric")
if raise_warning:
warnings.warn("Array is not symmetric, and will be converted "
"to symmetric by average with its transpose.")
if sp.issparse(array):
conversion = 'to' + array.format
array = getattr(0.5 * (array + array.T), conversion)()
else:
array = 0.5 * (array + array.T)
return array
def check_is_fitted(estimator, attributes, msg=None, all_or_any=all):
"""Perform is_fitted validation for estimator.
Checks if the estimator is fitted by verifying the presence of
"all_or_any" of the passed attributes and raises a NotFittedError with the
given message.
Parameters
----------
estimator : estimator instance.
estimator instance for which the check is performed.
attributes : attribute name(s) given as string or a list/tuple of strings
Eg. : ["coef_", "estimator_", ...], "coef_"
msg : string
The default error message is, "This %(name)s instance is not fitted
yet. Call 'fit' with appropriate arguments before using this method."
For custom messages if "%(name)s" is present in the message string,
it is substituted for the estimator name.
Eg. : "Estimator, %(name)s, must be fitted before sparsifying".
all_or_any : callable, {all, any}, default all
Specify whether all or any of the given attributes must exist.
"""
if msg is None:
msg = ("This %(name)s instance is not fitted yet. Call 'fit' with "
"appropriate arguments before using this method.")
if not hasattr(estimator, 'fit'):
raise TypeError("%s is not an estimator instance." % (estimator))
if not isinstance(attributes, (list, tuple)):
attributes = [attributes]
if not all_or_any([hasattr(estimator, attr) for attr in attributes]):
raise NotFittedError(msg % {'name': type(estimator).__name__})
|
bsd-3-clause
|
NDKoehler/DataScienceBowl2017_7th_place
|
dsb3/steps/include_nodule_distr.py
|
1
|
4656
|
import numpy as np
import json
from collections import OrderedDict
from scipy.spatial import distance
from numpy.linalg import eigh
from matplotlib import pyplot as plt
import sklearn.metrics
import xgboost as xgb
from .. import pipeline as pipe
import numpy.random as random
import sys
import pandas as pd
np.random.seed(132)
def run_xgboost(dtrain, dtest):
xg_params = {
"objective": "binary:logistic",
"booster" : "gbtree",
"eval_metric" : "logloss",
"eta": random.uniform(0.01, 0.3),
"max_depth": random.randint(2, 4),
"subsample": random.uniform(0.5, 0.95),
"colsample_bytree": random.uniform(0.5, 0.95),
"silent": 1,
"seed": 0,
"nthread" : 5
}
num_boost_round = 1000
early_stopping_rounds = 25
evallist = [(dtest, 'test')]
bst = xgb.train(xg_params, dtrain, 1000, evals=evallist, early_stopping_rounds = 20)
#calculating predcition not necessary, score already saved
#predictions = bst.predict(dtest, ntree_limit = bst.best_ntree_limit)
#uses early stopping to determine optimal epoch
log_loss = bst.best_score
return log_loss, xg_params, bst
def logloss(prediction, label):
eps = 1e-7
prediction = np.maximum(np.minimum(prediction, 1-eps), eps)
return -np.mean( label*np.log(prediction) + (1-label)*np.log(1-prediction) ) # eval formula from kaggle
def sort_and_reverse_1Darray(array):
return np.sort(array)[::-1]
def load_data(lst_path, n_candidates):
# load lst
candidates_DF = pd.read_csv(lst_path, sep='\t', header=None)
candidates_DF = candidates_DF.rename(columns={0:'id',1:'label',2:'xxx', 3:'cand_score'})
patients_lst = list(np.random.permutation(list(set(candidates_DF['id'].str.split('_').str[0].values.tolist()))))
all_candidates_scores = np.zeros((len(patients_lst), n_candidates), dtype=np.float32)
all_labels = np.zeros((len(patients_lst)), dtype=np.float32)
for pa_cnt, patient in enumerate(patients_lst):
patient_scores = sort_and_reverse_1Darray(candidates_DF[candidates_DF['id'].str.split('_').str[0]==patient]['cand_score'].values)[:n_candidates]
labels = candidates_DF[candidates_DF['id'].str.split('_').str[0]==patient]['label'].values.tolist()
if len(list(set(labels)))!= 1:
print ('ERROR!!! with labels')
all_labels[pa_cnt] = labels[0]
all_candidates_scores[pa_cnt,:len(patient_scores)] = patient_scores.copy()
return np.array(patients_lst), all_candidates_scores, all_labels
def run(lists_to_predict, n_candidates=20, bin_size=0.05, kernel_width=0.2, xg_max_depth=2, xg_eta=1, xg_num_round=2, sample_submission_lst_path=None):
splits = [lst2pred.split('/')[-1].split('.')[0].split('_')[0] for lst2pred in lists_to_predict]
data = {}
for lst_cnt, lst_type in enumerate(splits):
data[lst_type] = {}
patients_lst, all_candidates_scores, all_labels = load_data(lists_to_predict[lst_cnt],n_candidates)
data[lst_type]['patients'] = patients_lst.copy()
data[lst_type]['all_candidates_scores'] = all_candidates_scores.copy()
data[lst_type]['all_labels'] = all_labels.copy()
dtrain = xgb.DMatrix(data['tr']['all_candidates_scores'].copy(), label=data['tr']['all_labels'].copy())
dtest = xgb.DMatrix(data['va']['all_candidates_scores'].copy(), label=data['va']['all_labels'].copy())
scores_params_bsts = []
for i in range(1000):
score_params_bst = run_xgboost(dtrain, dtest)
scores_params_bsts.append(score_params_bst)
sorted_by_score = sorted(scores_params_bsts, key=lambda tup: tup[0])
print("best_score: ", sorted_by_score[0][0])
print("best params: ", sorted_by_score[0][1])
best_bst = sorted_by_score[0][2]
print ('-----------------------')
# predict validation_lst
predictions_va = best_bst.predict(dtest, ntree_limit = best_bst.best_ntree_limit)
logloss_va = logloss(np.array(predictions_va),data['va']['all_labels'])
print ('\nvalidation_logloss',logloss_va)
#print ('pred_va',predictions_va)
print ('-----------------------')
# predict holdout
sample_submission = pd.read_csv(sample_submission_lst_path)
dpred = xgb.DMatrix(data['ho']['all_candidates_scores'].copy())
predictions_sub = best_bst.predict(dpred, ntree_limit = best_bst.best_ntree_limit)
for patient in sample_submission['id'].values.tolist():
sample_submission['cancer'][sample_submission['id']==patient] = float(predictions_sub[data['ho']['patients']==patient][0])
sample_submission.to_csv('submission.csv',index=False,columns=['id','cancer'])
|
mit
|
liyu1990/sklearn
|
sklearn/decomposition/tests/test_nmf.py
|
26
|
8544
|
import numpy as np
from scipy import linalg
from sklearn.decomposition import (NMF, ProjectedGradientNMF,
non_negative_factorization)
from sklearn.decomposition import nmf # For testing internals
from scipy.sparse import csc_matrix
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_false
from sklearn.utils.testing import assert_raise_message
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import ignore_warnings
from sklearn.base import clone
random_state = np.random.mtrand.RandomState(0)
def test_initialize_nn_output():
# Test that initialization does not return negative values
data = np.abs(random_state.randn(10, 10))
for init in ('random', 'nndsvd', 'nndsvda', 'nndsvdar'):
W, H = nmf._initialize_nmf(data, 10, init=init, random_state=0)
assert_false((W < 0).any() or (H < 0).any())
@ignore_warnings
def test_parameter_checking():
A = np.ones((2, 2))
name = 'spam'
msg = "Invalid solver parameter: got 'spam' instead of one of"
assert_raise_message(ValueError, msg, NMF(solver=name).fit, A)
msg = "Invalid init parameter: got 'spam' instead of one of"
assert_raise_message(ValueError, msg, NMF(init=name).fit, A)
msg = "Invalid sparseness parameter: got 'spam' instead of one of"
assert_raise_message(ValueError, msg, NMF(sparseness=name).fit, A)
msg = "Negative values in data passed to"
assert_raise_message(ValueError, msg, NMF().fit, -A)
assert_raise_message(ValueError, msg, nmf._initialize_nmf, -A,
2, 'nndsvd')
clf = NMF(2, tol=0.1).fit(A)
assert_raise_message(ValueError, msg, clf.transform, -A)
def test_initialize_close():
# Test NNDSVD error
# Test that _initialize_nmf error is less than the standard deviation of
# the entries in the matrix.
A = np.abs(random_state.randn(10, 10))
W, H = nmf._initialize_nmf(A, 10, init='nndsvd')
error = linalg.norm(np.dot(W, H) - A)
sdev = linalg.norm(A - A.mean())
assert_true(error <= sdev)
def test_initialize_variants():
# Test NNDSVD variants correctness
# Test that the variants 'nndsvda' and 'nndsvdar' differ from basic
# 'nndsvd' only where the basic version has zeros.
data = np.abs(random_state.randn(10, 10))
W0, H0 = nmf._initialize_nmf(data, 10, init='nndsvd')
Wa, Ha = nmf._initialize_nmf(data, 10, init='nndsvda')
War, Har = nmf._initialize_nmf(data, 10, init='nndsvdar',
random_state=0)
for ref, evl in ((W0, Wa), (W0, War), (H0, Ha), (H0, Har)):
assert_true(np.allclose(evl[ref != 0], ref[ref != 0]))
@ignore_warnings
def test_nmf_fit_nn_output():
# Test that the decomposition does not contain negative values
A = np.c_[5 * np.ones(5) - np.arange(1, 6),
5 * np.ones(5) + np.arange(1, 6)]
for solver in ('pg', 'cd'):
for init in (None, 'nndsvd', 'nndsvda', 'nndsvdar'):
model = NMF(n_components=2, solver=solver, init=init,
random_state=0)
transf = model.fit_transform(A)
assert_false((model.components_ < 0).any() or
(transf < 0).any())
@ignore_warnings
def test_nmf_fit_close():
# Test that the fit is not too far away
for solver in ('pg', 'cd'):
pnmf = NMF(5, solver=solver, init='nndsvd', random_state=0)
X = np.abs(random_state.randn(6, 5))
assert_less(pnmf.fit(X).reconstruction_err_, 0.05)
def test_nls_nn_output():
# Test that NLS solver doesn't return negative values
A = np.arange(1, 5).reshape(1, -1)
Ap, _, _ = nmf._nls_subproblem(np.dot(A.T, -A), A.T, A, 0.001, 100)
assert_false((Ap < 0).any())
def test_nls_close():
# Test that the NLS results should be close
A = np.arange(1, 5).reshape(1, -1)
Ap, _, _ = nmf._nls_subproblem(np.dot(A.T, A), A.T, np.zeros_like(A),
0.001, 100)
assert_true((np.abs(Ap - A) < 0.01).all())
@ignore_warnings
def test_nmf_transform():
# Test that NMF.transform returns close values
A = np.abs(random_state.randn(6, 5))
for solver in ('pg', 'cd'):
m = NMF(solver=solver, n_components=4, init='nndsvd', random_state=0)
ft = m.fit_transform(A)
t = m.transform(A)
assert_array_almost_equal(ft, t, decimal=2)
@ignore_warnings
def test_n_components_greater_n_features():
# Smoke test for the case of more components than features.
A = np.abs(random_state.randn(30, 10))
NMF(n_components=15, random_state=0, tol=1e-2).fit(A)
@ignore_warnings
def test_projgrad_nmf_sparseness():
# Test sparseness
# Test that sparsity constraints actually increase sparseness in the
# part where they are applied.
tol = 1e-2
A = np.abs(random_state.randn(10, 10))
m = ProjectedGradientNMF(n_components=5, random_state=0, tol=tol).fit(A)
data_sp = ProjectedGradientNMF(n_components=5, sparseness='data',
random_state=0,
tol=tol).fit(A).data_sparseness_
comp_sp = ProjectedGradientNMF(n_components=5, sparseness='components',
random_state=0,
tol=tol).fit(A).comp_sparseness_
assert_greater(data_sp, m.data_sparseness_)
assert_greater(comp_sp, m.comp_sparseness_)
@ignore_warnings
def test_sparse_input():
# Test that sparse matrices are accepted as input
from scipy.sparse import csc_matrix
A = np.abs(random_state.randn(10, 10))
A[:, 2 * np.arange(5)] = 0
A_sparse = csc_matrix(A)
for solver in ('pg', 'cd'):
est1 = NMF(solver=solver, n_components=5, init='random',
random_state=0, tol=1e-2)
est2 = clone(est1)
W1 = est1.fit_transform(A)
W2 = est2.fit_transform(A_sparse)
H1 = est1.components_
H2 = est2.components_
assert_array_almost_equal(W1, W2)
assert_array_almost_equal(H1, H2)
@ignore_warnings
def test_sparse_transform():
# Test that transform works on sparse data. Issue #2124
A = np.abs(random_state.randn(3, 2))
A[A > 1.0] = 0
A = csc_matrix(A)
for solver in ('pg', 'cd'):
model = NMF(solver=solver, random_state=0, tol=1e-4, n_components=2)
A_fit_tr = model.fit_transform(A)
A_tr = model.transform(A)
assert_array_almost_equal(A_fit_tr, A_tr, decimal=1)
@ignore_warnings
def test_non_negative_factorization_consistency():
# Test that the function is called in the same way, either directly
# or through the NMF class
A = np.abs(random_state.randn(10, 10))
A[:, 2 * np.arange(5)] = 0
for solver in ('pg', 'cd'):
W_nmf, H, _ = non_negative_factorization(
A, solver=solver, random_state=1, tol=1e-2)
W_nmf_2, _, _ = non_negative_factorization(
A, H=H, update_H=False, solver=solver, random_state=1, tol=1e-2)
model_class = NMF(solver=solver, random_state=1, tol=1e-2)
W_cls = model_class.fit_transform(A)
W_cls_2 = model_class.transform(A)
assert_array_almost_equal(W_nmf, W_cls, decimal=10)
assert_array_almost_equal(W_nmf_2, W_cls_2, decimal=10)
@ignore_warnings
def test_non_negative_factorization_checking():
A = np.ones((2, 2))
# Test parameters checking is public function
nnmf = non_negative_factorization
msg = "Number of components must be positive; got (n_components='2')"
assert_raise_message(ValueError, msg, nnmf, A, A, A, '2')
msg = "Negative values in data passed to NMF (input H)"
assert_raise_message(ValueError, msg, nnmf, A, A, -A, 2, 'custom')
msg = "Negative values in data passed to NMF (input W)"
assert_raise_message(ValueError, msg, nnmf, A, -A, A, 2, 'custom')
msg = "Array passed to NMF (input H) is full of zeros"
assert_raise_message(ValueError, msg, nnmf, A, A, 0 * A, 2, 'custom')
def test_safe_compute_error():
A = np.abs(random_state.randn(10, 10))
A[:, 2 * np.arange(5)] = 0
A_sparse = csc_matrix(A)
W, H = nmf._initialize_nmf(A, 5, init='random', random_state=0)
error = nmf._safe_compute_error(A, W, H)
error_sparse = nmf._safe_compute_error(A_sparse, W, H)
assert_almost_equal(error, error_sparse)
|
bsd-3-clause
|
Mendeley/mrec
|
mrec/tests/test_base_recommender.py
|
3
|
2137
|
try:
import cPickle as pickle
except ImportError:
import pickle
import tempfile
import os
import numpy as np
from nose.tools import assert_less_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_array_equal
from mrec.testing import get_random_coo_matrix
from mrec.base_recommender import BaseRecommender
class MyRecommender(BaseRecommender):
def __init__(self):
self.foo = np.ndarray(range(10))
self.description = 'my recommender'
def _create_archive(self):
tmp = self.foo
self.foo = None
m = pickle.dumps(self)
self.foo = tmp
return {'model':m,'foo':self.foo}
def _load_archive(self,archive):
self.foo = archive['foo']
def save_load(r):
f,path = tempfile.mkstemp(suffix='.npz')
r.save(path)
return BaseRecommender.load(path)
def check_read_description(r):
f,path = tempfile.mkstemp(suffix='.npz')
r.save(path)
d = BaseRecommender.read_recommender_description(path)
assert_equal(str(r),d)
def test_save_filepath_condition():
r = BaseRecommender()
invalid_filepath = 'no suffix'
assert_raises(ValueError,r.save,invalid_filepath)
def test_save_load():
r = save_load(BaseRecommender())
assert_equal(type(r),BaseRecommender)
r = MyRecommender()
r2 = save_load(r)
assert_equal(type(r2),type(r))
assert_array_equal(r2.foo,r.foo)
assert_equal(r2.description,r.description)
def test_read_recommender_description():
check_read_description(BaseRecommender())
check_read_description(MyRecommender())
def test_zero_known_item_scores():
train = get_random_coo_matrix().tocsr()
predictions = np.random.random_sample(train.shape)
r = BaseRecommender()
safe = r._zero_known_item_scores(predictions,train)
num_users,num_items = predictions.shape
for u in xrange(num_users):
for i in xrange(num_items):
if i in train[u].indices:
assert_less_equal(safe[u,i],0)
else:
assert_equal(safe[u,i],predictions[u,i])
|
bsd-3-clause
|
vortex-ape/scikit-learn
|
sklearn/decomposition/tests/test_fastica.py
|
10
|
9453
|
"""
Test the fastica algorithm.
"""
import itertools
import warnings
import numpy as np
from scipy import stats
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_warns
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_raises_regex
from sklearn.decomposition import FastICA, fastica, PCA
from sklearn.decomposition.fastica_ import _gs_decorrelation
from sklearn.externals.six import moves
from sklearn.exceptions import ConvergenceWarning
def center_and_norm(x, axis=-1):
""" Centers and norms x **in place**
Parameters
-----------
x: ndarray
Array with an axis of observations (statistical units) measured on
random variables.
axis: int, optional
Axis along which the mean and variance are calculated.
"""
x = np.rollaxis(x, axis)
x -= x.mean(axis=0)
x /= x.std(axis=0)
def test_gs():
# Test gram schmidt orthonormalization
# generate a random orthogonal matrix
rng = np.random.RandomState(0)
W, _, _ = np.linalg.svd(rng.randn(10, 10))
w = rng.randn(10)
_gs_decorrelation(w, W, 10)
assert_less((w ** 2).sum(), 1.e-10)
w = rng.randn(10)
u = _gs_decorrelation(w, W, 5)
tmp = np.dot(u, W.T)
assert_less((tmp[:5] ** 2).sum(), 1.e-10)
def test_fastica_simple(add_noise=False):
# Test the FastICA algorithm on very simple data.
rng = np.random.RandomState(0)
# scipy.stats uses the global RNG:
np.random.seed(0)
n_samples = 1000
# Generate two sources:
s1 = (2 * np.sin(np.linspace(0, 100, n_samples)) > 0) - 1
s2 = stats.t.rvs(1, size=n_samples)
s = np.c_[s1, s2].T
center_and_norm(s)
s1, s2 = s
# Mixing angle
phi = 0.6
mixing = np.array([[np.cos(phi), np.sin(phi)],
[np.sin(phi), -np.cos(phi)]])
m = np.dot(mixing, s)
if add_noise:
m += 0.1 * rng.randn(2, 1000)
center_and_norm(m)
# function as fun arg
def g_test(x):
return x ** 3, (3 * x ** 2).mean(axis=-1)
algos = ['parallel', 'deflation']
nls = ['logcosh', 'exp', 'cube', g_test]
whitening = [True, False]
for algo, nl, whiten in itertools.product(algos, nls, whitening):
if whiten:
k_, mixing_, s_ = fastica(m.T, fun=nl, algorithm=algo)
assert_raises(ValueError, fastica, m.T, fun=np.tanh,
algorithm=algo)
else:
X = PCA(n_components=2, whiten=True).fit_transform(m.T)
k_, mixing_, s_ = fastica(X, fun=nl, algorithm=algo, whiten=False)
assert_raises(ValueError, fastica, X, fun=np.tanh,
algorithm=algo)
s_ = s_.T
# Check that the mixing model described in the docstring holds:
if whiten:
assert_almost_equal(s_, np.dot(np.dot(mixing_, k_), m))
center_and_norm(s_)
s1_, s2_ = s_
# Check to see if the sources have been estimated
# in the wrong order
if abs(np.dot(s1_, s2)) > abs(np.dot(s1_, s1)):
s2_, s1_ = s_
s1_ *= np.sign(np.dot(s1_, s1))
s2_ *= np.sign(np.dot(s2_, s2))
# Check that we have estimated the original sources
if not add_noise:
assert_almost_equal(np.dot(s1_, s1) / n_samples, 1, decimal=2)
assert_almost_equal(np.dot(s2_, s2) / n_samples, 1, decimal=2)
else:
assert_almost_equal(np.dot(s1_, s1) / n_samples, 1, decimal=1)
assert_almost_equal(np.dot(s2_, s2) / n_samples, 1, decimal=1)
# Test FastICA class
_, _, sources_fun = fastica(m.T, fun=nl, algorithm=algo, random_state=0)
ica = FastICA(fun=nl, algorithm=algo, random_state=0)
sources = ica.fit_transform(m.T)
assert_equal(ica.components_.shape, (2, 2))
assert_equal(sources.shape, (1000, 2))
assert_array_almost_equal(sources_fun, sources)
assert_array_almost_equal(sources, ica.transform(m.T))
assert_equal(ica.mixing_.shape, (2, 2))
for fn in [np.tanh, "exp(-.5(x^2))"]:
ica = FastICA(fun=fn, algorithm=algo, random_state=0)
assert_raises(ValueError, ica.fit, m.T)
assert_raises(TypeError, FastICA(fun=moves.xrange(10)).fit, m.T)
def test_fastica_nowhiten():
m = [[0, 1], [1, 0]]
# test for issue #697
ica = FastICA(n_components=1, whiten=False, random_state=0)
assert_warns(UserWarning, ica.fit, m)
assert_true(hasattr(ica, 'mixing_'))
def test_fastica_convergence_fail():
# Test the FastICA algorithm on very simple data
# (see test_non_square_fastica).
# Ensure a ConvergenceWarning raised if the tolerance is sufficiently low.
rng = np.random.RandomState(0)
n_samples = 1000
# Generate two sources:
t = np.linspace(0, 100, n_samples)
s1 = np.sin(t)
s2 = np.ceil(np.sin(np.pi * t))
s = np.c_[s1, s2].T
center_and_norm(s)
s1, s2 = s
# Mixing matrix
mixing = rng.randn(6, 2)
m = np.dot(mixing, s)
# Do fastICA with tolerance 0. to ensure failing convergence
ica = FastICA(algorithm="parallel", n_components=2, random_state=rng,
max_iter=2, tol=0.)
assert_warns(ConvergenceWarning, ica.fit, m.T)
def test_non_square_fastica(add_noise=False):
# Test the FastICA algorithm on very simple data.
rng = np.random.RandomState(0)
n_samples = 1000
# Generate two sources:
t = np.linspace(0, 100, n_samples)
s1 = np.sin(t)
s2 = np.ceil(np.sin(np.pi * t))
s = np.c_[s1, s2].T
center_and_norm(s)
s1, s2 = s
# Mixing matrix
mixing = rng.randn(6, 2)
m = np.dot(mixing, s)
if add_noise:
m += 0.1 * rng.randn(6, n_samples)
center_and_norm(m)
k_, mixing_, s_ = fastica(m.T, n_components=2, random_state=rng)
s_ = s_.T
# Check that the mixing model described in the docstring holds:
assert_almost_equal(s_, np.dot(np.dot(mixing_, k_), m))
center_and_norm(s_)
s1_, s2_ = s_
# Check to see if the sources have been estimated
# in the wrong order
if abs(np.dot(s1_, s2)) > abs(np.dot(s1_, s1)):
s2_, s1_ = s_
s1_ *= np.sign(np.dot(s1_, s1))
s2_ *= np.sign(np.dot(s2_, s2))
# Check that we have estimated the original sources
if not add_noise:
assert_almost_equal(np.dot(s1_, s1) / n_samples, 1, decimal=3)
assert_almost_equal(np.dot(s2_, s2) / n_samples, 1, decimal=3)
def test_fit_transform():
# Test FastICA.fit_transform
rng = np.random.RandomState(0)
X = rng.random_sample((100, 10))
for whiten, n_components in [[True, 5], [False, None]]:
n_components_ = (n_components if n_components is not None else
X.shape[1])
ica = FastICA(n_components=n_components, whiten=whiten, random_state=0)
Xt = ica.fit_transform(X)
assert_equal(ica.components_.shape, (n_components_, 10))
assert_equal(Xt.shape, (100, n_components_))
ica = FastICA(n_components=n_components, whiten=whiten, random_state=0)
ica.fit(X)
assert_equal(ica.components_.shape, (n_components_, 10))
Xt2 = ica.transform(X)
assert_array_almost_equal(Xt, Xt2)
def test_inverse_transform():
# Test FastICA.inverse_transform
n_features = 10
n_samples = 100
n1, n2 = 5, 10
rng = np.random.RandomState(0)
X = rng.random_sample((n_samples, n_features))
expected = {(True, n1): (n_features, n1),
(True, n2): (n_features, n2),
(False, n1): (n_features, n2),
(False, n2): (n_features, n2)}
for whiten in [True, False]:
for n_components in [n1, n2]:
n_components_ = (n_components if n_components is not None else
X.shape[1])
ica = FastICA(n_components=n_components, random_state=rng,
whiten=whiten)
with warnings.catch_warnings(record=True):
# catch "n_components ignored" warning
Xt = ica.fit_transform(X)
expected_shape = expected[(whiten, n_components_)]
assert_equal(ica.mixing_.shape, expected_shape)
X2 = ica.inverse_transform(Xt)
assert_equal(X.shape, X2.shape)
# reversibility test in non-reduction case
if n_components == X.shape[1]:
assert_array_almost_equal(X, X2)
def test_fastica_errors():
n_features = 3
n_samples = 10
rng = np.random.RandomState(0)
X = rng.random_sample((n_samples, n_features))
w_init = rng.randn(n_features + 1, n_features + 1)
assert_raises_regex(ValueError, 'max_iter should be greater than 1',
FastICA, max_iter=0)
assert_raises_regex(ValueError, r'alpha must be in \[1,2\]',
fastica, X, fun_args={'alpha': 0})
assert_raises_regex(ValueError, 'w_init has invalid shape.+'
r'should be \(3L?, 3L?\)',
fastica, X, w_init=w_init)
assert_raises_regex(ValueError,
'Invalid algorithm.+must be.+parallel.+or.+deflation',
fastica, X, algorithm='pizza')
|
bsd-3-clause
|
vacaciones/vacaciones
|
Quandl/organize_bonds_data.py
|
1
|
5194
|
# -*- coding: utf-8 -*-
"""
Created on Sat Aug 06 13:52:21 2016
@author: ialbuq01
"""
#get brazillian bonds from debentures.com.br (anbima)
import pandas as pd
import pandas.io.data as web
from string import maketrans
import numpy as np
from xlrd import open_workbook
import pickle
from re import sub
from decimal import Decimal
import math
import matplotlib.pyplot as plt
import copy
import datetime as dt
f = open('debentures.txt')
data = f.readlines()[2:]
f.close()
#this data is structured as follows: (date,company,code,isinN,volume,transactions,minPrice,medPrice,maxPrice,percPU)
#our data will be a list of companies each representaded by dictionaries. Each item in a dictionary will be a pandas object corresponding to a debt security!
ls_companies = [] #list
companies = []
pd_security = pd.DataFrame([])#pandas
for row,a_line in enumerate(data[1:]):
#first get the info
(date,company,code,isinN,volume,transactions,minPrice,medPrice,maxPrice,percPU) = a_line.strip().split('\t')
try:
Pperc = float(percPU[1:-1].replace(',' ,'.'))
except ValueError:
Pperc = np.nan
try:
Pmin = float(Decimal(sub(r'[^\d.]', '', minPrice[1:-1].translate(maketrans('.,',',.')))))
Pmed = float(Decimal(sub(r'[^\d.]', '', medPrice[1:-1].translate(maketrans('.,',',.')))))
Pmax = float(Decimal(sub(r'[^\d.]', '', maxPrice[1:-1].translate(maketrans('.,',',.')))))
except:
Pmin = float(Decimal(sub(r'[^\d.]', '', minPrice[1:-1]))*1000)
Pmed = float(Decimal(sub(r'[^\d.]', '', medPrice[1:-1]))*1000)
Pmax = float(Decimal(sub(r'[^\d.]', '', maxPrice[1:-1]))*1000)
debt_instrument = code
ls_values = [float(volume),float(transactions),Pmin,Pmed,Pmax,Pperc]
dt_date = dt.datetime.strptime(date,'%d/%m/%Y')
values = np.array(ls_values,ndmin = 2)
pd_newEntry = pd.DataFrame(values,index = [dt_date], columns = ['volume','transaction','low','med','high','percPU'])
#second we check if this company was already identified
try:
company_index = companies.index(company)
except ValueError:
companies.append(company)
#well this is the first time we read in this company. lets add a new dictionary for it and then append it into the list of companies(ls_companies)
#also certainly, since this is just new found it only has, so far, a single debt instrument
d_company = {'name': company, code : pd_newEntry}
ls_companies.append(d_company)
company_index = -1
if company_index != -1: #that means we need to look in the correct list index, and check whether or not this debt instrument already exists
#check if debt instrument already exists
if debt_instrument in ls_companies[company_index].keys():
ls_companies[company_index][debt_instrument] = ls_companies[company_index][debt_instrument].append(pd_newEntry)
else:
ls_companies[company_index][debt_instrument] = pd_newEntry
#once all the data was loaded let's fill up daily
ls_liquid_companies = []
liquid_companies = []
for idx_company,this_company in enumerate(ls_companies):
for each_debt in this_company.keys():
print each_debt
if each_debt != 'name':
this_company[each_debt] = this_company[each_debt].sort(ascending = True)
#For a given debt instrument if there are less than 10 entries or the last trade was done in 2015 let's just ignore it!
if this_company[each_debt].index.shape[0] > 10 and this_company[each_debt].index[-1] > dt.datetime(2016,1,1):
newtimeRange = pd.date_range(start = this_company[each_debt].index[0], end = this_company[each_debt].index[-1], freq = 'd')
this_company[each_debt] = this_company[each_debt].reindex(index = newtimeRange, columns = ['volume','transaction','low','med','high','percPU'], method = 'pad')
else:
print 'Debt instrument: ' + each_debt + ' was deleted'
this_company.pop(each_debt, None)
#check if all debt instruments were deleted. if that's the case let's delete the whole company
# if len(this_company.keys()) < 2:
# print 'Company : ' + this_company['name'] + ' was deleted due to either lack of data or securities that are not longer traded'
# ls_companies.pop(idx_company)
# companies.pop(idx_company)
if len(this_company.keys()) > 1:
print 'Company : ' + this_company['name'] + ' was added due since it is actively traded up to 2016'
ls_liquid_companies.append(ls_companies[idx_company])
liquid_companies.append(companies[idx_company])
del ls_companies
ls_companies = copy.copy(ls_liquid_companies)
pickle.dump((ls_companies), open('debentures.p','wb'))
|
mit
|
brguez/TEIBA
|
src/python/sourceElement_validationSamples.py
|
1
|
13301
|
#!/usr/bin/env python
#coding: utf-8
#### FUNCTIONS ####
def header(string):
"""
Display header
"""
timeInfo = time.strftime("%Y-%m-%d %H:%M")
print '\n', timeInfo, "****", string, "****"
def subHeader(string):
"""
Display subheader
"""
timeInfo = time.strftime("%Y-%m-%d %H:%M")
print timeInfo, "**", string, "**"
def info(string):
"""
Display basic information
"""
timeInfo = time.strftime("%Y-%m-%d %H:%M")
print timeInfo, string
def genotypes2df(VCFObj):
"""
"""
donorGtList = []
## For each MEI in the VCF
for MEIObj in VCFObj.lineList:
# Create a series of genotype (donorId labeled)
end = (MEIObj.infoDict["BKPB"] if "BKPB" in MEIObj.infoDict else "UNK")
sourceElementId = MEIObj.chrom + ':' + str(MEIObj.pos) + '-' + str(end)
donorGt = pd.Series(MEIObj.genotypesDict, name=sourceElementId)
# Add the series to the list of series
donorGtList.append(donorGt)
## Merge line series into dataframe (row <- donor_ids, columns <- MEI_ids):
df1 = pd.concat(donorGtList, axis=1)
## Transpose dataframe (row <- MEI_ids, columns <- donor_ids)
df2 = df1.transpose()
return df2
def gt2binary(gtString):
"""
"""
genotype = gtString.split(':')[0]
# A) Homozygous reference (for unknown genotypes con)
if (genotype == '0') or (genotype == '0|0') or (genotype == '0/0') or (genotype == './.'):
boolean = 0
# B) Heterozygous or homozygous MEI (carrier/no_carrier)
else:
boolean = 1
return boolean
def series2binary(integer):
"""
"""
if (integer > 0):
boolean = 1
else:
boolean = 0
return boolean
def selectDonorSet(nbAbsentSrc, binaryGenotypes):
"""
"""
nbDonors = binaryGenotypes.shape[1]
nbSrcElements = binaryGenotypes.shape[0]
percCovered = 0
accumulatedSeries = pd.Series([0] * nbSrcElements, index=binaryGenotypes.index)
for iteration in range(1, (nbDonors + 1)):
print "** Iteration nb. ", iteration, " **"
bestDonor, newPercCovered, accumulatedSeries = selectBestDonor(nbAbsentSrc, percCovered, accumulatedSeries, binaryGenotypes)
# b)
if (newPercCovered > percCovered):
percCovered = newPercCovered
selectedDonorList.append(bestDonor)
## Discard selected donor from the dataframe;
binaryGenotypes = binaryGenotypes.drop(bestDonor, axis=1)
print "tioo: ", percCovered, len(selectedDonorList), selectedDonorList
# b)
else:
print "Stop! No percentage increase"
break
def selectBestDonor(nbAbsentSrc, percCovered, accumulatedSeries, binaryGenotypes):
"""
"""
# A key per donorId. The value will be the percentage of source elements covered after adding the candidate donors to the list of selected donors.
percCoveredDict = {}
# A key per donorId. The value will be a series containing for each source element its binary status (1: covered by the selected set of donors and 0: not covered )
unionSeriesDict = {}
for donorId in binaryGenotypes:
candidateDonorSeries = binaryGenotypes[donorId]
tmpSeries = accumulatedSeries.add(candidateDonorSeries)
unionSeries = tmpSeries.apply(series2binary)
unionSeriesDict[donorId] = unionSeries
nbCoveredCandidate = candidateDonorSeries.sum() # not needed
nbCoveredAccumulated = unionSeries.sum()
percCoveredDict[donorId] = float(nbCoveredAccumulated)/float(nbAbsentSrc)*100
## Select the donor contributing to the highest percentage of covered source elements
# Note: if there are several possible donors, select one randomly
bestDonor = max(percCoveredDict, key=percCoveredDict.get)
print "bestDonor: ", bestDonor, percCoveredDict[bestDonor]
return(bestDonor, percCoveredDict[bestDonor], unionSeriesDict[bestDonor])
#### MAIN ####
## Import modules ##
import argparse
import sys
import os.path
import formats
import time
import pandas as pd
import numpy as np
from matplotlib import pyplot as plt
## Get user's input ##
parser = argparse.ArgumentParser(description= """""")
parser.add_argument('sourceElementGt', help='')
parser.add_argument('donorMetadata', help='')
parser.add_argument('sourceElementMetadata', help='')
parser.add_argument('-o', '--outDir', default=os.getcwd(), dest='outDir', help='output directory. Default: current working directory.' )
args = parser.parse_args()
sourceElementGt = args.sourceElementGt
donorMetadata = args.donorMetadata
sourceElementMetadata = args.sourceElementMetadata
outDir = args.outDir
scriptName = os.path.basename(sys.argv[0])
## Display configuration to standard output ##
print
print "***** ", scriptName, " configuration *****"
print "inputVCF: ", sourceElementGt
print "donorMetadata: ", donorMetadata
print "sourceElementMetadata: ", sourceElementMetadata
print "outDir: ", outDir
print
print "***** Executing ", scriptName, ".... *****"
print
## Start ##
#### 1. Read donor metadata file
#################################
# Initialize a dictionary with the following structure:
# - dict: key(donorId) -> projectCode
header("1. Read donor metadata file")
metadataFile = open(donorMetadata, 'r')
donorIdProjectCodeDict = {}
for line in metadataFile:
line = line.rstrip('\r\n')
if not line.startswith("#"):
line = line.split('\t')
donorId = line[0]
tumorType = line[9]
donorIdProjectCodeDict[donorId] = tumorType
#print "test: ", donorId, tumorType
#print "donorIdProjectCodeDict: ", donorIdProjectCodeDict
#### 2. Compute the allele count of each source element in EOPC-DE
###################################################################
## EOPC-DE is the tumor type with available samples for the validation of L1 source elements.
# Initialize a dictionary with the following structure:
# - dict1: key(sourceElementId) -> dict2: key1("alleleCount") -> value1(alleleCount value)
# key2("donorIdList") -> list of donor ids containing the insertion
# sourceElementId: chr:beg-end
header("2. Compute the allele count of each source element in EOPC-DE")
VCFObj = formats.VCF()
donorIdList = VCFObj.read_VCF_multiSample(sourceElementGt)
alleleCountsDict = {}
## For each MEI:
for MEIObj in VCFObj.lineList:
end = (MEIObj.infoDict["BKPB"] if "BKPB" in MEIObj.infoDict else "UNK")
sourceElementId = MEIObj.chrom + ':' + str(MEIObj.pos) + '-' + str(end)
print "** source element ** ", sourceElementId
## Initialize source element dictionary
alleleCountsDict[sourceElementId] = {}
alleleCountsDict[sourceElementId]["alleleCount"] = 0
alleleCountsDict[sourceElementId]["donorIdList"] = []
## For each donor:
for donorId, genotypeField in MEIObj.genotypesDict.iteritems():
genotypeFieldList = genotypeField.split(":")
genotype = genotypeFieldList[0]
#print donorId
## Project code available for current donors
if (donorId in donorIdProjectCodeDict):
projectCode = donorIdProjectCodeDict[donorId]
# Select EOPC-DE tumor types
if (projectCode == "EOPC-DE"):
#print "insertion-Gt: ", genotype
## A) Insertion absent in reference genome
if (MEIObj.alt == "<MEI>"):
# a) Heterozygous
if (genotype == "0/1"):
alleleCountsDict[sourceElementId]["alleleCount"] += 1
alleleCountsDict[sourceElementId]["donorIdList"].append(donorId)
# b) Homozygous alternative
elif (genotype == "1/1"):
alleleCountsDict[sourceElementId]["alleleCount"] += 2
alleleCountsDict[sourceElementId]["donorIdList"].append(donorId)
# Note c) possibility would be missing allele (./.)
#print "Insertion absent in reference genome", sourceElementId, donorId, projectCode, alleleCountsDict[sourceElementId]
## B) Insertion in reference genome and absent in donor genome
elif (MEIObj.ref == "<MEI>"):
#print "Insertion in reference genome", donorId, genotype, projectCode
# a) Heterozygous
if (genotype == "0/1"):
alleleCountsDict[sourceElementId]["alleleCount"] += 1
alleleCountsDict[sourceElementId]["donorIdList"].append(donorId)
# b) Homozygous reference
elif (genotype == "0/0"):
alleleCountsDict[sourceElementId]["alleleCount"] += 2
alleleCountsDict[sourceElementId]["donorIdList"].append(donorId)
# b) Project code not available for current donors (affects to few donors)
# I don't know why this happens... check later
else:
print "[ERROR] Unknown donor tumor type: ", donorId
#### 3. Make output file containing source element metadata + allele count
##############################################################################
header("3. Make output file containing source element metadata + allele count")
metadataFile = open(sourceElementMetadata, 'r')
# Open output file
outFilePath = outDir + '/sourceElements_alleleCount_EOPCDE.tsv'
outFile = open(outFilePath, 'w')
# Write header:
row = '#cytobandId' + "\t" + 'sourceIdNew' + "\t" + 'sourceIdOld' + "\t" + 'refStatus' + "\t" + 'strand' + "\t" + 'TSDlen' + "\t" + 'novelty' + "\t" + 'activityStatus' + "\t" + 'alleleCount' + "\t" + 'donorIdList' + "\n"
outFile.write(row)
for line in metadataFile:
line = line.rstrip('\r\n')
if not line.startswith("#"):
line = line.split('\t')
print "line: ", line
cytobandId, sourceIdNew, sourceIdOld, novelty, activityStatus = line
## If inconsistencies between source element identifier raise an error an skip
# Problem only affects one element
if sourceIdNew not in alleleCountsDict:
continue
print "[ERROR] source element coordenate not found "
alleleCount = alleleCountsDict[sourceIdNew]["alleleCount"]
donorIdList = (','.join(alleleCountsDict[sourceIdNew]["donorIdList"]) if len(alleleCountsDict[sourceIdNew]["donorIdList"]) > 0 else "-")
row = cytobandId + "\t" + sourceIdNew + "\t" + sourceIdOld + "\t" + novelty + "\t" + activityStatus + "\t" + str(alleleCount) + "\t" + donorIdList + "\n"
outFile.write(row)
#### 4. Make genotyping binary matrix for EOPC-DE donors
##########################################################
# The binary matrix will only contain those source elements
# that are absent in the reference genome
## carrier: 1, no_carrier: 0
header("4. Make genotyping binary matrix for EOPC-DE donors")
#### Select MEI absent in the reference genome
VCFAbsentObj = formats.VCF()
## For each MEI:
for MEIObj in VCFObj.lineList:
if (MEIObj.alt == "<MEI>"):
VCFAbsentObj.addLine(MEIObj)
#### Make binary matrix for all the donors
gtAbsentDfPCAWG = genotypes2df(VCFAbsentObj)
gtAbsentBinaryDfPCAWG = gtAbsentDfPCAWG.applymap(gt2binary)
#### Filter binary matrix selecting EOPC-DE donors
# print "gtBinaryDfPCAWG: ", gtBinaryDfPCAWG
## Make list with EOPC donor ids
EOPCdonorIdList = []
for donorId in donorIdProjectCodeDict:
projectCode = donorIdProjectCodeDict[donorId]
# Select EOPC-DE tumor types
if (projectCode == "EOPC-DE"):
EOPCdonorIdList.append(donorId)
binaryGtAbsentSrcEOPCdf = gtAbsentBinaryDfPCAWG[EOPCdonorIdList]
#### 5. Find the collection of donors maximizing the number of
################################################################
# source elements absent in the reference genome
##################################################
header("5. Find the collection of donors maximizing the number of source elements absent in the reference genome")
nbAbsentSrc = len(VCFAbsentObj.lineList)
selectedDonorList = []
print "nbAbsentSrc: ", nbAbsentSrc
selectDonorSet(nbAbsentSrc, binaryGtAbsentSrcEOPCdf)
#### 6. Report the number of source L1 in each EOPC
####################################################
# Open output file
outFilePath = outDir + '/donorId_nbSourceL1_EOPCDE.tsv'
outFile = open(outFilePath, 'w')
for donorId in binaryGtAbsentSrcEOPCdf:
sourceL1list = [ sourceId for sourceId, active in binaryGtAbsentSrcEOPCdf[donorId].iteritems() if active == 1]
nbSourceL1 = len(sourceL1list)
sourceL1Str = ",".join(sourceL1list)
row = donorId + "\t" + str(nbSourceL1) + "\t" + sourceL1Str + "\n"
outFile.write(row)
# medianNVHom = np.median([float(genotype[1]) for genotype in genotypesList if genotype[0] == '1/1'])
#binaryGtAbsentSrcEOPCdf = binaryGtAbsentSrcEOPCdf
#print "binaryGtAbsentSrcEOPCdf: ", binaryGtAbsentSrcEOPCdf
####
header("Finished")
|
gpl-3.0
|
kshedstrom/pyroms
|
examples/Palau_HYCOM/remap.py
|
1
|
4890
|
import numpy as np
import os
try:
import netCDF4 as netCDF
except:
import netCDF3 as netCDF
import matplotlib.pyplot as plt
import time
from datetime import datetime
from matplotlib.dates import date2num, num2date
import pyroms
import pyroms_toolbox
import _remapping
class nctime(object):
pass
def remap(src_file, src_varname, src_grd, dst_grd, dxy=20, cdepth=0, kk=0, dst_dir='./'):
# get time
nctime.long_name = 'time'
nctime.units = 'days since 1900-01-01 00:00:00'
# time reference "days since 1900-01-01 00:00:00"
# ref = datetime(1900, 1, 1, 0, 0, 0)
# ref = date2num(ref)
# tag = src_file.rsplit('/')[-1].rsplit('_')[-2].rsplit('-')[0]
# print tag
# year = int(tag[:4])
# month = int(tag[4:6])
# day = int(tag[6:])
# time = datetime(year, month, day, 0, 0, 0)
# time = date2num(time)
# time = time - ref
# time = time + 2.5 # 5-day average
cdf = netCDF.Dataset(src_file)
src_var = cdf.variables[src_varname][0]
time = cdf.variables['ocean_time'][0]
# create IC file
dst_file = src_file.rsplit('/')[-1]
dst_file = dst_dir + dst_file[:-3] + '_' + src_varname + '_ic_' + dst_grd.name + '.nc'
print '\nCreating file', dst_file
if os.path.exists(dst_file) is True:
os.remove(dst_file)
pyroms_toolbox.nc_create_roms_file(dst_file, dst_grd, nctime)
# open IC file
nc = netCDF.Dataset(dst_file, 'a', format='NETCDF3_64BIT')
#load var
cdf = netCDF.Dataset(src_file)
src_var = cdf.variables[src_varname]
#get missing value
spval = src_var._FillValue
src_var = src_var[0]
# determine variable dimension
#ndim = len(src_var.dimensions)
ndim = len(src_var.shape)
if src_varname == 'ssh':
pos = 't'
Cpos = 'rho'
z = src_grd.z_t
Mp, Lp = dst_grd.hgrid.mask_rho.shape
wts_file = 'remap_weights_GLBa0.08_to_PALAU1_bilinear_t_to_rho.nc'
dst_varname = 'zeta'
dimensions = ('ocean_time', 'eta_rho', 'xi_rho')
long_name = 'free-surface'
units = 'meter'
field = 'free-surface, scalar, series'
elif src_varname == 'temp':
pos = 't'
Cpos = 'rho'
z = src_grd.z_t
Mp, Lp = dst_grd.hgrid.mask_rho.shape
wts_file = 'remap_weights_GLBa0.08_to_PALAU1_bilinear_t_to_rho.nc'
dst_varname = 'temp'
dimensions = ('ocean_time', 's_rho', 'eta_rho', 'xi_rho')
long_name = 'potential temperature'
units = 'Celsius'
field = 'temperature, scalar, series'
elif src_varname == 'salt':
pos = 't'
Cpos = 'rho'
z = src_grd.z_t
Mp, Lp = dst_grd.hgrid.mask_rho.shape
wts_file = 'remap_weights_GLBa0.08_to_PALAU1_bilinear_t_to_rho.nc'
dst_varname = 'salt'
dimensions = ('ocean_time', 's_rho', 'eta_rho', 'xi_rho')
long_name = 'salinity'
units = 'PSU'
field = 'salinity, scalar, series'
else:
raise ValueError, 'Undefined src_varname'
if ndim == 3:
# build intermediate zgrid
zlevel = -z[::-1,0,0]
nzlevel = len(zlevel)
dst_zcoord = pyroms.vgrid.z_coordinate(dst_grd.vgrid.h, zlevel, nzlevel)
dst_grdz = pyroms.grid.ROMS_Grid(dst_grd.name+'_Z', dst_grd.hgrid, dst_zcoord)
# create variable in file
print 'Creating variable', dst_varname
nc.createVariable(dst_varname, 'f8', dimensions, fill_value=spval)
nc.variables[dst_varname].long_name = long_name
nc.variables[dst_varname].units = units
nc.variables[dst_varname].field = field
# remapping
print 'remapping', dst_varname, 'from', src_grd.name, \
'to', dst_grd.name
print 'time =', time
if ndim == 3:
# flood the grid
print 'flood the grid'
src_varz = pyroms_toolbox.Grid_HYCOM.flood_fast(src_var, src_grd, pos=pos, spval=spval, \
dxy=dxy, cdepth=cdepth, kk=kk)
else:
src_varz = src_var
# horizontal interpolation using scrip weights
print 'horizontal interpolation using scrip weights'
print 'about to call remap ' + wts_file
print src_varz.shape
dst_varz = pyroms.remapping.remap(src_varz, wts_file, \
spval=spval)
if ndim == 3:
# vertical interpolation from standard z level to sigma
print 'vertical interpolation from standard z level to sigma'
dst_var = pyroms.remapping.z2roms(dst_varz[::-1,:,:], dst_grdz, \
dst_grd, Cpos=Cpos, spval=spval, flood=False)
else:
dst_var = dst_varz
# write data in destination file
print 'write data in destination file'
nc.variables['ocean_time'][0] = time
nc.variables[dst_varname][0] = dst_var
# close destination file
nc.close()
if src_varname == 'ssh':
return dst_varz
|
bsd-3-clause
|
PeterRochford/SkillMetrics
|
skill_metrics/get_target_diagram_options.py
|
1
|
6350
|
def get_target_diagram_options(**kwargs):
'''
Get optional arguments for target_diagram function.
Retrieves the optional arguments supplied to the TARGET_DIAGRAM
function as a variable-length keyword argument list (*KWARGS), and
returns the values in an OPTION dictionary. Default values are
assigned to selected optional arguments. The function will terminate
with an error if an unrecognized optional argument is supplied.
INPUTS:
*kwargs : variable-length keyword argument list. The keywords by
definition are dictionaries with keys that must correspond to
one choices given in OUTPUTS below.
OUTPUTS:
option : dictionary containing option values. (Refer to
display_target_diagram_options function for more information.)
option['alpha'] : blending of symbol face color (0.0
transparent through 1.0 opaque). (Default : 1.0)
option['axismax'] : maximum for the Bias & uRMSD axis
option['circlelinespec'] : circle line specification (default dashed
black, '--k')
option['circlelinewidth'] : circle line width specification (default 0.5)
option['circles'] : radii of circles to draw to indicate
isopleths of standard deviation (empty by default)
option['cmapzdata'] : data values to use for color mapping of
markers, e.g. RMSD or BIAS. (Default empty)
option['colormap'] : 'on'/'off' switch to map color shading of
markers to CMapZData values ('on') or min to
max range of CMapZData values ('off').
(Default : 'on')
option['equalAxes'] : 'on'/'off' switch to set axes to be equal
option['locationcolorbar'] : location for the colorbar, 'NorthOutside' or
'EastOutside'
option['markerdisplayed'] : markers to use for individual experiments
option['markerlabel'] : name of the experiment to use for marker
option['markerlabelcolor'] : marker label color (Default 'k')
option['markerlegend'] : 'on'/'off' switch to display marker legend
(Default 'off')
option['markersize'] : marker size (Default 10)
option['markersymbol'] : marker symbol (Default 'o')
option['normalized'] : statistics supplied are normalized with
respect to the standard deviation of reference
values (Default 'off')
option['obsUncertainty'] : Observational Uncertainty (default of 0)
option['overlay'] : 'on'/'off' switch to overlay current
statistics on Taylor diagram (Default 'off')
Only markers will be displayed.
option['ticks'] : define tick positions (default is that used
by the axis function)
option['titlecolorbar'] : title for the colorbar
option['xticklabelpos'] : position of the tick labels along the x-axis
(empty by default)
option['yticklabelpos'] : position of the tick labels along the y-axis
(empty by default)
Author: Peter A. Rochford
Symplectic, LLC
www.thesymplectic.com
prochford@thesymplectic.com
Created on Nov 25, 2016
'''
from skill_metrics import check_on_off
from matplotlib import rcParams
nargin = len(kwargs)
# Set default parameters for all options
option = {}
option['alpha'] = 1.0
option['axismax'] = 0.0
option['circlelinespec'] = 'k--'
option['circlelinewidth'] = rcParams.get('lines.linewidth')
option['circles'] = []
option['cmapzdata'] = []
option['colormap'] = 'on'
option['equalaxes'] = 'on'
option['locationcolorbar'] = 'NorthOutside'
option['markercolor'] = 'r'
option['markerdisplayed'] = 'marker'
option['markerlabel'] = ''
option['markerlabelcolor'] = 'k'
option['markerlegend'] = 'off'
option['markersize'] = 10
option['markersymbol'] = 'o'
option['normalized'] = 'off'
option['obsuncertainty'] = 0.0
option['overlay'] = 'off'
option['ticks'] = []
option['titlecolorbar'] = ''
option['xticklabelpos'] = []
option['yticklabelpos'] = []
if nargin == 0:
# No options requested, so return with only defaults
return option
# Check for valid keys and values in dictionary
for optname, optvalue in kwargs.items():
optname = optname.lower()
if optname == 'nonrmsdz':
raise ValueError('nonrmsdz is an obsolete option. Use cmapzdata instead.')
if not optname in option:
raise ValueError('Unrecognized option: ' + optname)
else:
# Replace option value with that from arguments
option[optname] = optvalue
# Check values for specific options
if optname == 'cmapzdata':
if isinstance(option[optname], str):
raise ValueError('cmapzdata cannot be a string!')
elif isinstance(option[optname], bool):
raise ValueError('cmapzdata cannot be a boolean!')
option['cmapzdata'] = optvalue
elif optname == 'equalaxes':
option['equalaxes'] = check_on_off(option['equalaxes'])
elif optname == 'markerlabel':
if type(optvalue) is list:
option['markerlabel'] = optvalue
elif type(optvalue) is dict:
option['markerlabel'] = optvalue
else:
raise ValueError('markerlabel value is not a list or dictionary: ' +
str(optvalue))
elif optname == 'markerlegend':
option['markerlegend'] = check_on_off(option['markerlegend'])
elif optname == 'normalized':
option['normalized'] = check_on_off(option['normalized'])
elif optname == 'overlay':
option['overlay'] = check_on_off(option['overlay'])
return option
|
gpl-3.0
|
imperial-genomics-facility/data-management-python
|
igf_data/utils/tools/ppqt_utils.py
|
1
|
5010
|
import os,subprocess
from shlex import quote
import pandas as pd
from igf_data.utils.fileutils import check_file_path, get_temp_dir,copy_local_file,remove_dir
class Ppqt_tools:
'''
A class for running Phantom quality control tools (PPQT)
'''
def __init__(self,rscript_path,ppqt_exe,threads=1,use_ephemeral_space=0):
self.rscript_path = rscript_path
self.ppqt_exe = ppqt_exe
self.threads = threads
self.use_ephemeral_space = use_ephemeral_space
def run_ppqt(self,input_bam,output_dir,output_spp_name,output_pdf_name):
'''
A method for running PPQT on input bam
:param input_bam: Input bam file
:param output_spp_name: Output spp out file
:param output_pdf_name: Output pdf plot
:param output_dir: Destination output dir
:returns: PPQT run command as list,spp and pdf output path and a list or dictionary for spp.out matrics
'''
try:
temp_dir = \
get_temp_dir(use_ephemeral_space=self.use_ephemeral_space)
run_cmd = \
self._pre_process(\
input_bam=input_bam,
output_spp_name=output_spp_name,
output_pdf_name=output_pdf_name,
output_dir=temp_dir,
temp_dir=temp_dir) # preprocess and fetch run cmd
subprocess.check_call(\
' '.join(run_cmd),
shell=True) # run ppqt and capture stdout
spp_output, pdf_output = \
self._post_process(\
output_spp_name=output_spp_name,
output_pdf_name=output_pdf_name,
output_dir=output_dir,
temp_dir=temp_dir) # copy files from temp dir
remove_dir(temp_dir) # clean up temp dir
spp_data = self._parse_spp_output(spp_file=spp_output)
return run_cmd,spp_output, pdf_output,spp_data
except:
raise
def _pre_process(self,input_bam,output_spp_name,output_pdf_name,output_dir,temp_dir):
'''
An internal method for preprocessing before the exe run
:param input_bam: Input bam file
:param output_spp_name: Output spp filename
:param output_pdf_name: Output pdf filename
:param output_dir: Destination output dir
:param temp_dir: Source temp dir
'''
try:
check_file_path(self.rscript_path)
check_file_path(self.ppqt_exe)
if not os.path.exists(output_dir):
os.makedirs(output_dir,mode=0o770)
output_pdf = os.path.join(temp_dir,output_pdf_name)
output_spp = os.path.join(temp_dir,output_spp_name)
run_cmd = \
[quote(self.rscript_path),
quote(self.ppqt_exe),
quote('-c={0}'.format(input_bam)),
quote('-rf'),
quote('-p={0}'.format(str(self.threads))),
quote('-savp={0}'.format(output_pdf)),
quote('-out={0}'.format(output_spp)),
quote('-tmpdir={0}'.format(temp_dir)),
quote('-odir={0}'.format(output_dir))]
return run_cmd
except:
raise
@staticmethod
def _post_process(output_spp_name,output_pdf_name,output_dir,temp_dir):
'''
A static method for post processing ppqt analysis
:param output_spp_name: Output spp filename
:param output_pdf_name: Output pdf filename
:param output_dir: Destination output dir
:param temp_dir: Source temp dir
:returns: spp output path and pdf output path
'''
try:
tmp_spp_file = os.path.join(temp_dir,output_spp_name)
dest_spp_file = os.path.join(output_dir,output_spp_name)
tmp_pdf_file = os.path.join(temp_dir,output_pdf_name)
dest_pdf_file = os.path.join(output_dir,output_pdf_name)
check_file_path(tmp_spp_file)
check_file_path(tmp_pdf_file)
copy_local_file(\
source_path=tmp_spp_file,
destinationa_path=dest_spp_file,
force=True)
copy_local_file(\
source_path=tmp_pdf_file,
destinationa_path=dest_pdf_file,
force=True)
return dest_spp_file,dest_pdf_file
except:
raise
@staticmethod
def _parse_spp_output(spp_file):
'''
An internal static method for parsing PPQC spp out file
:param spp_file: A spp.out filepath
:returns: A list of dictionary
'''
try:
check_file_path(spp_file)
column_names = \
["PPQT_Filename",
"PPQT_numReads",
"PPQT_estFragLen",
"PPQT_corr_estFragLen",
"PPQT_PhantomPeak",
"PPQT_corr_phantomPeak",
"PPQT_argmin_corr",
"PPQT_min_corr",
"PPQT_Normalized_SCC_NSC",
"PPQT_Relative_SCC_RSC",
"PPQT_QualityTag"]
data = \
pd.read_csv(\
spp_file,
sep='\t',
dtype=object,
names=column_names)
return data.to_dict(orient='records')
except Exception as e:
raise ValueError('Failed to parse file {0}, got error {1}'.\
format(spp_file,e))
|
apache-2.0
|
simonsfoundation/CaImAn
|
demos/obsolete/1_1/demo_caiman_basic_1_1.py
|
2
|
6172
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Stripped demo for running the CNMF source extraction algorithm with CaImAn and
evaluation the components. The analysis can be run either in the whole FOV
or in patches. For a complete pipeline (including motion correction)
check demo_pipeline.py
Data courtesy of W. Yang, D. Peterka and R. Yuste (Columbia University)
This demo is designed to be run under spyder or jupyter; its plotting functions
are tailored for that environment.
@authors: @agiovann and @epnev
"""
from __future__ import print_function
from builtins import range
import cv2
try:
cv2.setNumThreads(0)
except():
pass
try:
if __IPYTHON__:
print("Detected iPython")
get_ipython().magic('load_ext autoreload')
get_ipython().magic('autoreload 2')
except NameError:
pass
import numpy as np
import os
import glob
import matplotlib.pyplot as plt
from copy import deepcopy
import caiman as cm
from caiman.source_extraction.cnmf import cnmf as cnmf
from caiman.paths import caiman_datadir
from caiman.source_extraction.cnmf import params as params
#%%
def main():
pass # For compatibility between running under Spyder and the CLI
#%% start a cluster
c, dview, n_processes =\
cm.cluster.setup_cluster(backend='local', n_processes=None,
single_thread=False)
#%% save files to be processed
# This datafile is distributed with Caiman
fnames = [os.path.join(caiman_datadir(), 'example_movies', 'demoMovie.tif')]
# location of dataset (can actually be a list of filed to be concatenated)
add_to_movie = -np.min(cm.load(fnames[0], subindices=range(200))).astype(float)
# determine minimum value on a small chunk of data
add_to_movie = np.maximum(add_to_movie, 0)
# if minimum is negative subtract to make the data non-negative
base_name = 'Yr'
fname_new = cm.save_memmap(fnames, dview=dview, base_name=base_name,
order='C',
add_to_movie=add_to_movie)
#%% LOAD MEMORY MAPPABLE FILE
Yr, dims, T = cm.load_memmap(fname_new)
d1, d2 = dims
images = np.reshape(Yr.T, [T] + list(dims), order='F')
#%% play movie, press q to quit
play_movie = False
if play_movie:
cm.movie(images).play(fr=50, magnification=4, gain=3.)
#%% correlation image. From here infer neuron size and density
Cn = cm.movie(images).local_correlations(swap_dim=False)
plt.imshow(Cn, cmap='gray')
plt.title('Correlation Image')
#%% set up some parameters
is_patches = True # flag for processing in patches or not
fr = 10 # approximate frame rate of data
decay_time = 5.0 # length of transient
if is_patches: # PROCESS IN PATCHES AND THEN COMBINE
rf = 10 # half size of each patch
stride = 4 # overlap between patches
K = 4 # number of components in each patch
else: # PROCESS THE WHOLE FOV AT ONCE
rf = None # setting these parameters to None
stride = None # will run CNMF on the whole FOV
K = 30 # number of neurons expected (in the whole FOV)
gSig = [6, 6] # expected half size of neurons
merge_thresh = 0.80 # merging threshold, max correlation allowed
p = 2 # order of the autoregressive system
gnb = 2 # global background order
params_dict = {'fnames': fnames,
'fr': fr,
'decay_time': decay_time,
'rf': rf,
'stride': stride,
'K': K,
'gSig': gSig,
'merge_thr': merge_thresh,
'p': p,
'nb': gnb}
# opts = params.CNMFParams(dims=dims,
# method_init='greedy_roi', gSig=gSig,
# merge_thresh=merge_thresh, p=p, gnb=gnb, k=K,
# rf=rf, stride=stride, rolling_sum=False,
# fr=fr, decay_time=decay_time)
opts = params.CNMFParams(params_dict=params_dict)
#%% Now RUN CNMF
cnm = cnmf.CNMF(n_processes, params=opts, dview=dview)
cnm = cnm.fit(images)
#%% plot contour plots of components
cnm.estimates.plot_contours(img=Cn)
#%% copy into a new object and refit
cnm.dview = None
cnm2 = deepcopy(cnm)
cnm2.dview = dview
cnm2.params.set('patch', {'rf': None})
cnm2 = cnm2.fit(images)
#%% COMPONENT EVALUATION
# the components are evaluated in three ways:
# a) the shape of each component must be correlated with the data
# b) a minimum peak SNR is required over the length of a transient
# c) each shape passes a CNN based classifier (this will pick up only neurons
# and filter out active processes)
min_SNR = 2.5 # peak SNR for accepted components (if above this, acept)
rval_thr = 0.90 # space correlation threshold (if above this, accept)
use_cnn = True # use the CNN classifier
min_cnn_thr = 0.95 # if cnn classifier predicts below this value, reject
cnm2.params.set('quality', {'min_SNR': min_SNR,
'rval_thr': rval_thr,
'use_cnn': use_cnn,
'min_cnn_thr': min_cnn_thr})
cnm2.estimates.evaluate_components(images, cnm2.params, dview=dview)
#%% visualize selected and rejected components
cnm2.estimates.plot_contours(img=Cn, idx=cnm2.estimates.idx_components)
#%% visualize selected components
cnm2.estimates.view_components(images, idx=cnm2.estimates.idx_components, img=Cn)
#%% play movie with results
cnm2.estimates.play_movie(images, magnification=4)
#%% STOP CLUSTER and clean up log files
cm.stop_server(dview=dview)
log_files = glob.glob('Yr*_LOG_*')
for log_file in log_files:
os.remove(log_file)
#%%
# This is to mask the differences between running this demo in Spyder
# versus from the CLI
if __name__ == "__main__":
main()
|
gpl-2.0
|
timmie/cartopy
|
lib/cartopy/tests/mpl/test_gridliner.py
|
2
|
6239
|
# (C) British Crown Copyright 2011 - 2016, Met Office
#
# This file is part of cartopy.
#
# cartopy is free software: you can redistribute it and/or modify it under
# the terms of the GNU Lesser General Public License as published by the
# Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# cartopy is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with cartopy. If not, see <https://www.gnu.org/licenses/>.
from __future__ import (absolute_import, division, print_function)
import matplotlib as mpl
from matplotlib.backends.backend_agg import FigureCanvasAgg
import matplotlib.pyplot as plt
import matplotlib.ticker as mticker
try:
from unittest import mock
except ImportError:
import mock
from nose.tools import assert_raises
import numpy as np
import cartopy.crs as ccrs
from cartopy.mpl.geoaxes import GeoAxes
from cartopy.mpl.gridliner import LATITUDE_FORMATTER, LONGITUDE_FORMATTER
from cartopy.tests import _proj4_version
from cartopy.tests.mpl import ImageTesting
@ImageTesting(['gridliner1'])
def test_gridliner():
ny, nx = 2, 4
plt.figure(figsize=(10, 10))
ax = plt.subplot(nx, ny, 1, projection=ccrs.PlateCarree())
ax.set_global()
ax.coastlines()
ax.gridlines()
ax = plt.subplot(nx, ny, 2, projection=ccrs.OSGB())
ax.set_global()
ax.coastlines()
ax.gridlines()
ax = plt.subplot(nx, ny, 3, projection=ccrs.OSGB())
ax.set_global()
ax.coastlines()
ax.gridlines(ccrs.PlateCarree(), color='blue', linestyle='-')
ax.gridlines(ccrs.OSGB())
ax = plt.subplot(nx, ny, 4, projection=ccrs.PlateCarree())
ax.set_global()
ax.coastlines()
ax.gridlines(ccrs.NorthPolarStereo(), alpha=0.5,
linewidth=1.5, linestyle='-')
ax = plt.subplot(nx, ny, 5, projection=ccrs.PlateCarree())
ax.set_global()
ax.coastlines()
osgb = ccrs.OSGB()
ax.set_extent(tuple(osgb.x_limits) + tuple(osgb.y_limits), crs=osgb)
ax.gridlines(osgb)
ax = plt.subplot(nx, ny, 6, projection=ccrs.NorthPolarStereo())
ax.set_global()
ax.coastlines()
ax.gridlines(alpha=0.5, linewidth=1.5, linestyle='-')
ax = plt.subplot(nx, ny, 7, projection=ccrs.NorthPolarStereo())
ax.set_global()
ax.coastlines()
osgb = ccrs.OSGB()
ax.set_extent(tuple(osgb.x_limits) + tuple(osgb.y_limits), crs=osgb)
ax.gridlines(osgb)
ax = plt.subplot(nx, ny, 8,
projection=ccrs.Robinson(central_longitude=135))
ax.set_global()
ax.coastlines()
ax.gridlines(ccrs.PlateCarree(), alpha=0.5, linewidth=1.5, linestyle='-')
delta = 1.5e-2
plt.subplots_adjust(left=0 + delta, right=1 - delta,
top=1 - delta, bottom=0 + delta)
def test_gridliner_specified_lines():
xs = [0, 60, 120, 180, 240, 360]
ys = [-90, -60, -30, 0, 30, 60, 90]
ax = mock.Mock(_gridliners=[], spec=GeoAxes)
gl = GeoAxes.gridlines(ax, xlocs=xs, ylocs=ys)
assert isinstance(gl.xlocator, mticker.FixedLocator)
assert isinstance(gl.ylocator, mticker.FixedLocator)
assert gl.xlocator.tick_values(None, None).tolist() == xs
assert gl.ylocator.tick_values(None, None).tolist() == ys
# The tolerance on this test is particularly high because of the high number
# of text objects. A new testing strategy is needed for this kind of test.
@ImageTesting(['gridliner_labels'
if mpl.__version__ >= '1.5' else
'gridliner_labels_pre_mpl_1.5'])
def test_grid_labels():
plt.figure(figsize=(8, 10))
crs_pc = ccrs.PlateCarree()
crs_merc = ccrs.Mercator()
crs_osgb = ccrs.OSGB()
ax = plt.subplot(3, 2, 1, projection=crs_pc)
ax.coastlines()
ax.gridlines(draw_labels=True)
# Check that adding labels to Mercator gridlines gives an error.
# (Currently can only label PlateCarree gridlines.)
ax = plt.subplot(3, 2, 2,
projection=ccrs.PlateCarree(central_longitude=180))
ax.coastlines()
with assert_raises(TypeError):
ax.gridlines(crs=crs_merc, draw_labels=True)
ax.set_title('Known bug')
gl = ax.gridlines(crs=crs_pc, draw_labels=True)
gl.xlabels_top = False
gl.ylabels_left = False
gl.xlines = False
ax = plt.subplot(3, 2, 3, projection=crs_merc)
ax.coastlines()
ax.gridlines(draw_labels=True)
# Check that labelling the gridlines on an OSGB plot gives an error.
# (Currently can only draw these on PlateCarree or Mercator plots.)
ax = plt.subplot(3, 2, 4, projection=crs_osgb)
ax.coastlines()
with assert_raises(TypeError):
ax.gridlines(draw_labels=True)
ax = plt.subplot(3, 2, 4, projection=crs_pc)
ax.coastlines()
gl = ax.gridlines(
crs=crs_pc, linewidth=2, color='gray', alpha=0.5, linestyle='--')
gl.xlabels_bottom = True
gl.ylabels_right = True
gl.xlines = False
gl.xlocator = mticker.FixedLocator([-180, -45, 45, 180])
gl.xformatter = LONGITUDE_FORMATTER
gl.yformatter = LATITUDE_FORMATTER
gl.xlabel_style = {'size': 15, 'color': 'gray'}
gl.xlabel_style = {'color': 'red'}
# trigger a draw at this point and check the appropriate artists are
# populated on the gridliner instance
FigureCanvasAgg(plt.gcf()).draw()
assert len(gl.xlabel_artists) == 4
assert len(gl.ylabel_artists) == 5
assert len(gl.ylabel_artists) == 5
assert len(gl.xline_artists) == 0
ax = plt.subplot(3, 2, 5, projection=crs_pc)
ax.set_extent([-20, 10.0, 45.0, 70.0])
ax.coastlines()
ax.gridlines(draw_labels=True)
ax = plt.subplot(3, 2, 6, projection=crs_merc)
ax.set_extent([-20, 10.0, 45.0, 70.0], crs=crs_pc)
ax.coastlines()
ax.gridlines(draw_labels=True)
# Increase margins between plots to stop them bumping into one another.
plt.subplots_adjust(wspace=0.25, hspace=0.25)
if __name__ == '__main__':
import nose
nose.runmodule(argv=['-s', '--with-doctest'], exit=False)
|
gpl-3.0
|
Clyde-fare/scikit-learn
|
examples/linear_model/plot_ols.py
|
220
|
1940
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=========================================================
Linear Regression Example
=========================================================
This example uses the only the first feature of the `diabetes` dataset, in
order to illustrate a two-dimensional plot of this regression technique. The
straight line can be seen in the plot, showing how linear regression attempts
to draw a straight line that will best minimize the residual sum of squares
between the observed responses in the dataset, and the responses predicted by
the linear approximation.
The coefficients, the residual sum of squares and the variance score are also
calculated.
"""
print(__doc__)
# Code source: Jaques Grobler
# License: BSD 3 clause
import matplotlib.pyplot as plt
import numpy as np
from sklearn import datasets, linear_model
# Load the diabetes dataset
diabetes = datasets.load_diabetes()
# Use only one feature
diabetes_X = diabetes.data[:, np.newaxis, 2]
# Split the data into training/testing sets
diabetes_X_train = diabetes_X[:-20]
diabetes_X_test = diabetes_X[-20:]
# Split the targets into training/testing sets
diabetes_y_train = diabetes.target[:-20]
diabetes_y_test = diabetes.target[-20:]
# Create linear regression object
regr = linear_model.LinearRegression()
# Train the model using the training sets
regr.fit(diabetes_X_train, diabetes_y_train)
# The coefficients
print('Coefficients: \n', regr.coef_)
# The mean square error
print("Residual sum of squares: %.2f"
% np.mean((regr.predict(diabetes_X_test) - diabetes_y_test) ** 2))
# Explained variance score: 1 is perfect prediction
print('Variance score: %.2f' % regr.score(diabetes_X_test, diabetes_y_test))
# Plot outputs
plt.scatter(diabetes_X_test, diabetes_y_test, color='black')
plt.plot(diabetes_X_test, regr.predict(diabetes_X_test), color='blue',
linewidth=3)
plt.xticks(())
plt.yticks(())
plt.show()
|
bsd-3-clause
|
sandeepgupta2k4/tensorflow
|
tensorflow/contrib/learn/python/learn/estimators/estimator.py
|
5
|
55283
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Base Estimator class."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import abc
import copy
import os
import tempfile
import numpy as np
import six
from tensorflow.contrib import framework as contrib_framework
from tensorflow.contrib import layers
from tensorflow.contrib import metrics as metrics_lib
from tensorflow.contrib.framework import deprecated
from tensorflow.contrib.framework import deprecated_args
from tensorflow.contrib.framework import list_variables
from tensorflow.contrib.framework import load_variable
from tensorflow.contrib.framework.python.ops import variables as contrib_variables
from tensorflow.contrib.learn.python.learn import evaluable
from tensorflow.contrib.learn.python.learn import metric_spec
from tensorflow.contrib.learn.python.learn import monitors as monitor_lib
from tensorflow.contrib.learn.python.learn import trainable
from tensorflow.contrib.learn.python.learn.estimators import _sklearn as sklearn
from tensorflow.contrib.learn.python.learn.estimators import constants
from tensorflow.contrib.learn.python.learn.estimators import metric_key
from tensorflow.contrib.learn.python.learn.estimators import model_fn as model_fn_lib
from tensorflow.contrib.learn.python.learn.estimators import run_config
from tensorflow.contrib.learn.python.learn.estimators import tensor_signature
from tensorflow.contrib.learn.python.learn.estimators._sklearn import NotFittedError
from tensorflow.contrib.learn.python.learn.learn_io import data_feeder
from tensorflow.contrib.learn.python.learn.utils import export
from tensorflow.contrib.learn.python.learn.utils import saved_model_export_utils
from tensorflow.contrib.training.python.training import evaluation
from tensorflow.core.framework import summary_pb2
from tensorflow.core.protobuf import config_pb2
from tensorflow.python.client import session as tf_session
from tensorflow.python.framework import ops
from tensorflow.python.framework import random_seed
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import lookup_ops
from tensorflow.python.ops import resources
from tensorflow.python.ops import variables
from tensorflow.python.platform import gfile
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.saved_model import builder as saved_model_builder
from tensorflow.python.saved_model import tag_constants
from tensorflow.python.training import basic_session_run_hooks
from tensorflow.python.training import device_setter
from tensorflow.python.training import monitored_session
from tensorflow.python.training import saver
from tensorflow.python.training import summary_io
from tensorflow.python.util import compat
from tensorflow.python.util import tf_decorator
from tensorflow.python.util import tf_inspect
AS_ITERABLE_DATE = '2016-09-15'
AS_ITERABLE_INSTRUCTIONS = (
'The default behavior of predict() is changing. The default value for\n'
'as_iterable will change to True, and then the flag will be removed\n'
'altogether. The behavior of this flag is described below.')
SCIKIT_DECOUPLE_DATE = '2016-12-01'
SCIKIT_DECOUPLE_INSTRUCTIONS = (
'Estimator is decoupled from Scikit Learn interface by moving into\n'
'separate class SKCompat. Arguments x, y and batch_size are only\n'
'available in the SKCompat class, Estimator will only accept input_fn.\n'
'Example conversion:\n'
' est = Estimator(...) -> est = SKCompat(Estimator(...))')
def _verify_input_args(x, y, input_fn, feed_fn, batch_size):
"""Verifies validity of co-existance of input arguments."""
if input_fn is None:
if x is None:
raise ValueError('Either x or input_fn must be provided.')
if contrib_framework.is_tensor(x) or (y is not None and
contrib_framework.is_tensor(y)):
raise ValueError('Inputs cannot be tensors. Please provide input_fn.')
if feed_fn is not None:
raise ValueError('Can not provide both feed_fn and x or y.')
else:
if (x is not None) or (y is not None):
raise ValueError('Can not provide both input_fn and x or y.')
if batch_size is not None:
raise ValueError('Can not provide both input_fn and batch_size.')
def _get_input_fn(x, y, input_fn, feed_fn, batch_size, shuffle=False, epochs=1):
"""Make inputs into input and feed functions.
Args:
x: Numpy, Pandas or Dask matrix or iterable.
y: Numpy, Pandas or Dask matrix or iterable.
input_fn: Pre-defined input function for training data.
feed_fn: Pre-defined data feeder function.
batch_size: Size to split data into parts. Must be >= 1.
shuffle: Whether to shuffle the inputs.
epochs: Number of epochs to run.
Returns:
Data input and feeder function based on training data.
Raises:
ValueError: Only one of `(x & y)` or `input_fn` must be provided.
"""
_verify_input_args(x, y, input_fn, feed_fn, batch_size)
if input_fn is not None:
return input_fn, feed_fn
df = data_feeder.setup_train_data_feeder(
x,
y,
n_classes=None,
batch_size=batch_size,
shuffle=shuffle,
epochs=epochs)
return df.input_builder, df.get_feed_dict_fn()
def infer_real_valued_columns_from_input_fn(input_fn):
"""Creates `FeatureColumn` objects for inputs defined by `input_fn`.
This interprets all inputs as dense, fixed-length float values. This creates
a local graph in which it calls `input_fn` to build the tensors, then discards
it.
Args:
input_fn: Input function returning a tuple of:
features - Dictionary of string feature name to `Tensor` or `Tensor`.
labels - `Tensor` of label values.
Returns:
List of `FeatureColumn` objects.
"""
with ops.Graph().as_default():
features, _ = input_fn()
return layers.infer_real_valued_columns(features)
def infer_real_valued_columns_from_input(x):
"""Creates `FeatureColumn` objects for inputs defined by input `x`.
This interprets all inputs as dense, fixed-length float values.
Args:
x: Real-valued matrix of shape [n_samples, n_features...]. Can be
iterator that returns arrays of features.
Returns:
List of `FeatureColumn` objects.
"""
input_fn, _ = _get_input_fn(
x=x, y=None, input_fn=None, feed_fn=None, batch_size=None)
return infer_real_valued_columns_from_input_fn(input_fn)
def _model_fn_args(fn):
"""Get argument names for function-like object.
Args:
fn: Function, or function-like object (e.g., result of `functools.partial`).
Returns:
`tuple` of string argument names.
Raises:
ValueError: if partial function has positionally bound arguments
"""
_, fn = tf_decorator.unwrap(fn)
if hasattr(fn, 'func') and hasattr(fn, 'keywords') and hasattr(fn, 'args'):
# Handle functools.partial and similar objects.
return tuple([
arg for arg in tf_inspect.getargspec(fn.func).args[len(fn.args):]
if arg not in set(fn.keywords.keys())
])
# Handle function.
return tuple(tf_inspect.getargspec(fn).args)
def _get_replica_device_setter(config):
"""Creates a replica device setter if required.
Args:
config: A RunConfig instance.
Returns:
A replica device setter, or None.
"""
ps_ops = [
'Variable', 'VariableV2', 'AutoReloadVariable', 'MutableHashTable',
'MutableHashTableOfTensors', 'MutableDenseHashTable'
]
if config.task_type:
worker_device = '/job:%s/task:%d' % (config.task_type, config.task_id)
else:
worker_device = '/job:worker'
if config.num_ps_replicas > 0:
return device_setter.replica_device_setter(
ps_tasks=config.num_ps_replicas, worker_device=worker_device,
merge_devices=True, ps_ops=ps_ops, cluster=config.cluster_spec)
else:
return None
def _make_metrics_ops(metrics, features, labels, predictions):
"""Add metrics based on `features`, `labels`, and `predictions`.
`metrics` contains a specification for how to run metrics. It is a dict
mapping friendly names to either `MetricSpec` objects, or directly to a metric
function (assuming that `predictions` and `labels` are single tensors), or to
`(pred_name, metric)` `tuple`, which passes `predictions[pred_name]` and
`labels` to `metric` (assuming `labels` is a single tensor).
Users are encouraged to use `MetricSpec` objects, which are more flexible and
cleaner. They also lead to clearer errors.
Args:
metrics: A dict mapping names to metrics specification, for example
`MetricSpec` objects.
features: A dict of tensors returned from an input_fn as features/inputs.
labels: A single tensor or a dict of tensors returned from an input_fn as
labels.
predictions: A single tensor or a dict of tensors output from a model as
predictions.
Returns:
A dict mapping the friendly given in `metrics` to the result of calling the
given metric function.
Raises:
ValueError: If metrics specifications do not work with the type of
`features`, `labels`, or `predictions` provided. Mostly, a dict is given
but no pred_name specified.
"""
metrics = metrics or {}
# If labels is a dict with a single key, unpack into a single tensor.
labels_tensor_or_dict = labels
if isinstance(labels, dict) and len(labels) == 1:
labels_tensor_or_dict = labels[list(labels.keys())[0]]
result = {}
# Iterate in lexicographic order, so the graph is identical among runs.
for name, metric in sorted(six.iteritems(metrics)):
if isinstance(metric, metric_spec.MetricSpec):
result[name] = metric.create_metric_ops(features, labels, predictions)
continue
# TODO(b/31229024): Remove the rest of this loop
logging.warning('Please specify metrics using MetricSpec. Using bare '
'functions or (key, fn) tuples is deprecated and support '
'for it will be removed on Oct 1, 2016.')
if isinstance(name, tuple):
# Multi-head metrics.
if len(name) != 2:
raise ValueError('Invalid metric for {}. It returned a tuple with '
'len {}, expected 2.'.format(name, len(name)))
if not isinstance(predictions, dict):
raise ValueError(
'Metrics passed provide (name, prediction), '
'but predictions are not dict. '
'Metrics: %s, Predictions: %s.' % (metrics, predictions))
# Here are two options: labels are single Tensor or a dict.
if isinstance(labels, dict) and name[1] in labels:
# If labels are dict and the prediction name is in it, apply metric.
result[name[0]] = metric(predictions[name[1]], labels[name[1]])
else:
# Otherwise pass the labels to the metric.
result[name[0]] = metric(predictions[name[1]], labels_tensor_or_dict)
else:
# Single head metrics.
if isinstance(predictions, dict):
raise ValueError(
'Metrics passed provide only name, no prediction, '
'but predictions are dict. '
'Metrics: %s, Labels: %s.' % (metrics, labels_tensor_or_dict))
result[name] = metric(predictions, labels_tensor_or_dict)
return result
def _dict_to_str(dictionary):
"""Get a `str` representation of a `dict`.
Args:
dictionary: The `dict` to be represented as `str`.
Returns:
A `str` representing the `dictionary`.
"""
return ', '.join('%s = %s' % (k, v) for k, v in sorted(dictionary.items()))
def _write_dict_to_summary(output_dir,
dictionary,
current_global_step):
"""Writes a `dict` into summary file in given output directory.
Args:
output_dir: `str`, directory to write the summary file in.
dictionary: the `dict` to be written to summary file.
current_global_step: `int`, the current global step.
"""
logging.info('Saving dict for global step %d: %s', current_global_step,
_dict_to_str(dictionary))
summary_writer = summary_io.SummaryWriterCache.get(output_dir)
summary_proto = summary_pb2.Summary()
for key in dictionary:
if dictionary[key] is None:
continue
value = summary_proto.value.add()
value.tag = key
if (isinstance(dictionary[key], np.float32) or
isinstance(dictionary[key], float)):
value.simple_value = float(dictionary[key])
else:
logging.warn('Skipping summary for %s, must be a float or np.float32.',
key)
summary_writer.add_summary(summary_proto, current_global_step)
summary_writer.flush()
class BaseEstimator(
sklearn.BaseEstimator, evaluable.Evaluable, trainable.Trainable):
"""Abstract BaseEstimator class to train and evaluate TensorFlow models.
Users should not instantiate or subclass this class. Instead, use `Estimator`.
"""
__metaclass__ = abc.ABCMeta
# Note that for Google users, this is overriden with
# learn_runner.EstimatorConfig.
# TODO(wicke): Remove this once launcher takes over config functionality
_Config = run_config.RunConfig # pylint: disable=invalid-name
def __init__(self, model_dir=None, config=None):
"""Initializes a BaseEstimator instance.
Args:
model_dir: Directory to save model parameters, graph and etc. This can
also be used to load checkpoints from the directory into a estimator to
continue training a previously saved model. If `None`, the model_dir in
`config` will be used if set. If both are set, they must be same.
config: A RunConfig instance.
"""
# Create a run configuration.
if config is None:
self._config = BaseEstimator._Config()
logging.info('Using default config.')
else:
self._config = config
if self._config.session_config is None:
self._session_config = config_pb2.ConfigProto(allow_soft_placement=True)
else:
self._session_config = self._config.session_config
# Model directory.
if (model_dir is not None) and (self._config.model_dir is not None):
if model_dir != self._config.model_dir:
# TODO(b/9965722): remove this suppression after it is no longer
# necessary.
# pylint: disable=g-doc-exception
raise ValueError(
"model_dir are set both in constructor and RunConfig, but with "
"different values. In constructor: '{}', in RunConfig: "
"'{}' ".format(model_dir, self._config.model_dir))
self._model_dir = model_dir or self._config.model_dir
if self._model_dir is None:
self._model_dir = tempfile.mkdtemp()
logging.warning('Using temporary folder as model directory: %s',
self._model_dir)
if self._config.model_dir is None:
self._config = self._config.replace(model_dir=self._model_dir)
logging.info('Using config: %s', str(vars(self._config)))
# Set device function depending if there are replicas or not.
self._device_fn = _get_replica_device_setter(self._config)
# Features and labels TensorSignature objects.
# TODO(wicke): Rename these to something more descriptive
self._features_info = None
self._labels_info = None
self._graph = None
@property
def config(self):
# TODO(wicke): make RunConfig immutable, and then return it without a copy.
return copy.deepcopy(self._config)
@deprecated_args(
SCIKIT_DECOUPLE_DATE, SCIKIT_DECOUPLE_INSTRUCTIONS, ('x', None),
('y', None), ('batch_size', None)
)
def fit(self, x=None, y=None, input_fn=None, steps=None, batch_size=None,
monitors=None, max_steps=None):
# pylint: disable=g-doc-args,g-doc-return-or-yield
"""See `Trainable`.
Raises:
ValueError: If `x` or `y` are not `None` while `input_fn` is not `None`.
ValueError: If both `steps` and `max_steps` are not `None`.
"""
if (steps is not None) and (max_steps is not None):
raise ValueError('Can not provide both steps and max_steps.')
_verify_input_args(x, y, input_fn, None, batch_size)
if x is not None:
SKCompat(self).fit(x, y, batch_size, steps, max_steps, monitors)
return self
if max_steps is not None:
try:
start_step = load_variable(self._model_dir, ops.GraphKeys.GLOBAL_STEP)
if max_steps <= start_step:
logging.info('Skipping training since max_steps has already saved.')
return self
except: # pylint: disable=bare-except
pass
hooks = monitor_lib.replace_monitors_with_hooks(monitors, self)
if steps is not None or max_steps is not None:
hooks.append(basic_session_run_hooks.StopAtStepHook(steps, max_steps))
loss = self._train_model(input_fn=input_fn, hooks=hooks)
logging.info('Loss for final step: %s.', loss)
return self
@deprecated_args(
SCIKIT_DECOUPLE_DATE, SCIKIT_DECOUPLE_INSTRUCTIONS, ('x', None),
('y', None), ('batch_size', None)
)
def partial_fit(
self, x=None, y=None, input_fn=None, steps=1, batch_size=None,
monitors=None):
"""Incremental fit on a batch of samples.
This method is expected to be called several times consecutively
on different or the same chunks of the dataset. This either can
implement iterative training or out-of-core/online training.
This is especially useful when the whole dataset is too big to
fit in memory at the same time. Or when model is taking long time
to converge, and you want to split up training into subparts.
Args:
x: Matrix of shape [n_samples, n_features...]. Can be iterator that
returns arrays of features. The training input samples for fitting the
model. If set, `input_fn` must be `None`.
y: Vector or matrix [n_samples] or [n_samples, n_outputs]. Can be
iterator that returns array of labels. The training label values
(class labels in classification, real numbers in regression). If set,
`input_fn` must be `None`.
input_fn: Input function. If set, `x`, `y`, and `batch_size` must be
`None`.
steps: Number of steps for which to train model. If `None`, train forever.
batch_size: minibatch size to use on the input, defaults to first
dimension of `x`. Must be `None` if `input_fn` is provided.
monitors: List of `BaseMonitor` subclass instances. Used for callbacks
inside the training loop.
Returns:
`self`, for chaining.
Raises:
ValueError: If at least one of `x` and `y` is provided, and `input_fn` is
provided.
"""
logging.warning('The current implementation of partial_fit is not optimized'
' for use in a loop. Consider using fit() instead.')
return self.fit(x=x, y=y, input_fn=input_fn, steps=steps,
batch_size=batch_size, monitors=monitors)
@deprecated_args(
SCIKIT_DECOUPLE_DATE, SCIKIT_DECOUPLE_INSTRUCTIONS, ('x', None),
('y', None), ('batch_size', None)
)
def evaluate(self,
x=None,
y=None,
input_fn=None,
feed_fn=None,
batch_size=None,
steps=None,
metrics=None,
name=None,
checkpoint_path=None,
hooks=None,
log_progress=True):
# pylint: disable=g-doc-args,g-doc-return-or-yield
"""See `Evaluable`.
Raises:
ValueError: If at least one of `x` or `y` is provided, and at least one of
`input_fn` or `feed_fn` is provided.
Or if `metrics` is not `None` or `dict`.
"""
_verify_input_args(x, y, input_fn, feed_fn, batch_size)
if x is not None:
return SKCompat(self).score(x, y, batch_size, steps, metrics)
if metrics is not None and not isinstance(metrics, dict):
raise ValueError('Metrics argument should be None or dict. '
'Got %s.' % metrics)
eval_results, global_step = self._evaluate_model(
input_fn=input_fn,
feed_fn=feed_fn,
steps=steps,
metrics=metrics,
name=name,
checkpoint_path=checkpoint_path,
hooks=hooks,
log_progress=log_progress)
if eval_results is not None:
eval_results.update({'global_step': global_step})
return eval_results
@deprecated_args(
SCIKIT_DECOUPLE_DATE, SCIKIT_DECOUPLE_INSTRUCTIONS, ('x', None),
('batch_size', None), ('as_iterable', True)
)
def predict(
self, x=None, input_fn=None, batch_size=None, outputs=None,
as_iterable=True):
"""Returns predictions for given features.
Args:
x: Matrix of shape [n_samples, n_features...]. Can be iterator that
returns arrays of features. The training input samples for fitting the
model. If set, `input_fn` must be `None`.
input_fn: Input function. If set, `x` and 'batch_size' must be `None`.
batch_size: Override default batch size. If set, 'input_fn' must be
'None'.
outputs: list of `str`, name of the output to predict.
If `None`, returns all.
as_iterable: If True, return an iterable which keeps yielding predictions
for each example until inputs are exhausted. Note: The inputs must
terminate if you want the iterable to terminate (e.g. be sure to pass
num_epochs=1 if you are using something like read_batch_features).
Returns:
A numpy array of predicted classes or regression values if the
constructor's `model_fn` returns a `Tensor` for `predictions` or a `dict`
of numpy arrays if `model_fn` returns a `dict`. Returns an iterable of
predictions if as_iterable is True.
Raises:
ValueError: If x and input_fn are both provided or both `None`.
"""
_verify_input_args(x, None, input_fn, None, batch_size)
if x is not None and not as_iterable:
return SKCompat(self).predict(x, batch_size)
input_fn, feed_fn = _get_input_fn(x, None, input_fn, None, batch_size)
return self._infer_model(
input_fn=input_fn,
feed_fn=feed_fn,
outputs=outputs,
as_iterable=as_iterable)
def get_variable_value(self, name):
"""Returns value of the variable given by name.
Args:
name: string, name of the tensor.
Returns:
Numpy array - value of the tensor.
"""
return load_variable(self.model_dir, name)
def get_variable_names(self):
"""Returns list of all variable names in this model.
Returns:
List of names.
"""
return [name for name, _ in list_variables(self.model_dir)]
@property
def model_dir(self):
return self._model_dir
@deprecated('2017-03-25', 'Please use Estimator.export_savedmodel() instead.')
def export(self,
export_dir,
input_fn=export._default_input_fn, # pylint: disable=protected-access
input_feature_key=None,
use_deprecated_input_fn=True,
signature_fn=None,
prediction_key=None,
default_batch_size=1,
exports_to_keep=None,
checkpoint_path=None):
"""Exports inference graph into given dir.
Args:
export_dir: A string containing a directory to write the exported graph
and checkpoints.
input_fn: If `use_deprecated_input_fn` is true, then a function that given
`Tensor` of `Example` strings, parses it into features that are then
passed to the model. Otherwise, a function that takes no argument and
returns a tuple of (features, labels), where features is a dict of
string key to `Tensor` and labels is a `Tensor` that's currently not
used (and so can be `None`).
input_feature_key: Only used if `use_deprecated_input_fn` is false. String
key into the features dict returned by `input_fn` that corresponds to a
the raw `Example` strings `Tensor` that the exported model will take as
input. Can only be `None` if you're using a custom `signature_fn` that
does not use the first arg (examples).
use_deprecated_input_fn: Determines the signature format of `input_fn`.
signature_fn: Function that returns a default signature and a named
signature map, given `Tensor` of `Example` strings, `dict` of `Tensor`s
for features and `Tensor` or `dict` of `Tensor`s for predictions.
prediction_key: The key for a tensor in the `predictions` dict (output
from the `model_fn`) to use as the `predictions` input to the
`signature_fn`. Optional. If `None`, predictions will pass to
`signature_fn` without filtering.
default_batch_size: Default batch size of the `Example` placeholder.
exports_to_keep: Number of exports to keep.
checkpoint_path: the checkpoint path of the model to be exported. If it is
`None` (which is default), will use the latest checkpoint in
export_dir.
Returns:
The string path to the exported directory. NB: this functionality was
added ca. 2016/09/25; clients that depend on the return value may need
to handle the case where this function returns None because subclasses
are not returning a value.
"""
# pylint: disable=protected-access
return export._export_estimator(
estimator=self,
export_dir=export_dir,
signature_fn=signature_fn,
prediction_key=prediction_key,
input_fn=input_fn,
input_feature_key=input_feature_key,
use_deprecated_input_fn=use_deprecated_input_fn,
default_batch_size=default_batch_size,
exports_to_keep=exports_to_keep,
checkpoint_path=checkpoint_path)
@abc.abstractproperty
def _get_train_ops(self, features, labels):
"""Method that builds model graph and returns trainer ops.
Expected to be overridden by sub-classes that require custom support.
Args:
features: `Tensor` or `dict` of `Tensor` objects.
labels: `Tensor` or `dict` of `Tensor` objects.
Returns:
A `ModelFnOps` object.
"""
pass
@abc.abstractproperty
def _get_predict_ops(self, features):
"""Method that builds model graph and returns prediction ops.
Args:
features: `Tensor` or `dict` of `Tensor` objects.
Returns:
A `ModelFnOps` object.
"""
pass
def _get_eval_ops(self, features, labels, metrics):
"""Method that builds model graph and returns evaluation ops.
Expected to be overriden by sub-classes that require custom support.
Args:
features: `Tensor` or `dict` of `Tensor` objects.
labels: `Tensor` or `dict` of `Tensor` objects.
metrics: Dict of metrics to run. If None, the default metric functions
are used; if {}, no metrics are used. Otherwise, `metrics` should map
friendly names for the metric to a `MetricSpec` object defining which
model outputs to evaluate against which labels with which metric
function. Metric ops should support streaming, e.g., returning
update_op and value tensors. See more details in
`../../../../metrics/python/metrics/ops/streaming_metrics.py` and
`../metric_spec.py`.
Returns:
A `ModelFnOps` object.
"""
raise NotImplementedError('_get_eval_ops not implemented in BaseEstimator')
@deprecated(
'2016-09-23',
'The signature of the input_fn accepted by export is changing to be '
'consistent with what\'s used by tf.Learn Estimator\'s train/evaluate, '
'which makes this function useless. This will be removed after the '
'deprecation date.')
def _get_feature_ops_from_example(self, examples_batch):
"""Returns feature parser for given example batch using features info.
This function requires `fit()` has been called.
Args:
examples_batch: batch of tf.Example
Returns:
features: `Tensor` or `dict` of `Tensor` objects.
Raises:
ValueError: If `_features_info` attribute is not available (usually
because `fit()` has not been called).
"""
if self._features_info is None:
raise ValueError('Features information missing, was fit() ever called?')
return tensor_signature.create_example_parser_from_signatures(
self._features_info, examples_batch)
def _check_inputs(self, features, labels):
if self._features_info is not None:
logging.debug('Given features: %s, required signatures: %s.',
str(features), str(self._features_info))
if not tensor_signature.tensors_compatible(features, self._features_info):
raise ValueError('Features are incompatible with given information. '
'Given features: %s, required signatures: %s.' %
(str(features), str(self._features_info)))
else:
self._features_info = tensor_signature.create_signatures(features)
logging.debug('Setting feature info to %s.', str(self._features_info))
if labels is not None:
if self._labels_info is not None:
logging.debug('Given labels: %s, required signatures: %s.',
str(labels), str(self._labels_info))
if not tensor_signature.tensors_compatible(labels, self._labels_info):
raise ValueError('Labels are incompatible with given information. '
'Given labels: %s, required signatures: %s.' %
(str(labels), str(self._labels_info)))
else:
self._labels_info = tensor_signature.create_signatures(labels)
logging.debug('Setting labels info to %s', str(self._labels_info))
def _extract_metric_update_ops(self, eval_dict):
"""Separate update operations from metric value operations."""
update_ops = []
value_ops = {}
for name, metric_ops in six.iteritems(eval_dict):
if isinstance(metric_ops, (list, tuple)):
if len(metric_ops) == 2:
value_ops[name] = metric_ops[0]
update_ops.append(metric_ops[1])
else:
logging.warning(
'Ignoring metric {}. It returned a list|tuple with len {}, '
'expected 2'.format(name, len(metric_ops)))
value_ops[name] = metric_ops
else:
value_ops[name] = metric_ops
if update_ops:
update_ops = control_flow_ops.group(*update_ops)
else:
update_ops = None
return update_ops, value_ops
def _evaluate_model(self,
input_fn,
steps,
feed_fn=None,
metrics=None,
name='',
checkpoint_path=None,
hooks=None,
log_progress=True):
# TODO(wicke): Remove this once Model and associated code are gone.
if (hasattr(self._config, 'execution_mode') and
self._config.execution_mode not in ('all', 'evaluate', 'eval_evalset')):
return None, None
# Check that model has been trained (if nothing has been set explicitly).
if not checkpoint_path:
latest_path = saver.latest_checkpoint(self._model_dir)
if not latest_path:
raise NotFittedError("Couldn't find trained model at %s."
% self._model_dir)
checkpoint_path = latest_path
# Setup output directory.
eval_dir = os.path.join(self._model_dir, 'eval' if not name else
'eval_' + name)
with ops.Graph().as_default() as g:
random_seed.set_random_seed(self._config.tf_random_seed)
global_step = contrib_framework.create_global_step(g)
features, labels = input_fn()
self._check_inputs(features, labels)
model_fn_results = self._get_eval_ops(features, labels, metrics)
eval_dict = model_fn_results.eval_metric_ops
update_op, eval_dict = self._extract_metric_update_ops(eval_dict)
# We need to copy the hook array as we modify it, thus [:].
hooks = hooks[:] if hooks else []
if feed_fn:
hooks.append(basic_session_run_hooks.FeedFnHook(feed_fn))
if steps:
hooks.append(
evaluation.StopAfterNEvalsHook(
steps, log_progress=log_progress))
global_step_key = 'global_step'
while global_step_key in eval_dict:
global_step_key = '_' + global_step_key
eval_dict[global_step_key] = global_step
eval_results = evaluation.evaluate_once(
checkpoint_path=checkpoint_path,
master=self._config.evaluation_master,
scaffold=model_fn_results.scaffold,
eval_ops=update_op,
final_ops=eval_dict,
hooks=hooks,
config=self._session_config)
current_global_step = eval_results[global_step_key]
_write_dict_to_summary(eval_dir, eval_results, current_global_step)
return eval_results, current_global_step
def _get_features_from_input_fn(self, input_fn):
result = input_fn()
if isinstance(result, (list, tuple)):
return result[0]
return result
def _infer_model(self,
input_fn,
feed_fn=None,
outputs=None,
as_iterable=True,
iterate_batches=False):
# Check that model has been trained.
checkpoint_path = saver.latest_checkpoint(self._model_dir)
if not checkpoint_path:
raise NotFittedError("Couldn't find trained model at %s."
% self._model_dir)
with ops.Graph().as_default() as g:
random_seed.set_random_seed(self._config.tf_random_seed)
contrib_framework.create_global_step(g)
features = self._get_features_from_input_fn(input_fn)
infer_ops = self._get_predict_ops(features)
predictions = self._filter_predictions(infer_ops.predictions, outputs)
mon_sess = monitored_session.MonitoredSession(
session_creator=monitored_session.ChiefSessionCreator(
checkpoint_filename_with_path=checkpoint_path,
scaffold=infer_ops.scaffold,
config=self._session_config))
if not as_iterable:
with mon_sess:
if not mon_sess.should_stop():
return mon_sess.run(predictions, feed_fn() if feed_fn else None)
else:
return self._predict_generator(mon_sess, predictions, feed_fn,
iterate_batches)
def _predict_generator(self, mon_sess, predictions, feed_fn, iterate_batches):
with mon_sess:
while not mon_sess.should_stop():
preds = mon_sess.run(predictions, feed_fn() if feed_fn else None)
if iterate_batches:
yield preds
elif not isinstance(predictions, dict):
for pred in preds:
yield pred
else:
first_tensor = list(preds.values())[0]
if isinstance(first_tensor, sparse_tensor.SparseTensorValue):
batch_length = first_tensor.dense_shape[0]
else:
batch_length = first_tensor.shape[0]
for i in range(batch_length):
yield {key: value[i] for key, value in six.iteritems(preds)}
if self._is_input_constant(feed_fn, mon_sess.graph):
return
def _is_input_constant(self, feed_fn, graph):
# If there are no queue_runners, the input `predictions` is a
# constant, and we should stop after the first epoch. If,
# instead, there are queue_runners, eventually they should throw
# an `OutOfRangeError`.
if graph.get_collection(ops.GraphKeys.QUEUE_RUNNERS):
return False
# data_feeder uses feed_fn to generate `OutOfRangeError`.
if feed_fn is not None:
return False
return True
def _filter_predictions(self, predictions, outputs):
if not outputs:
return predictions
if not isinstance(predictions, dict):
raise ValueError(
'outputs argument is not valid in case of non-dict predictions.')
existing_keys = predictions.keys()
predictions = {
key: value
for key, value in six.iteritems(predictions) if key in outputs
}
if not predictions:
raise ValueError('Expected to run at least one output from %s, '
'provided %s.' % (existing_keys, outputs))
return predictions
def _train_model(self, input_fn, hooks):
all_hooks = []
self._graph = ops.Graph()
with self._graph.as_default() as g, g.device(self._device_fn):
random_seed.set_random_seed(self._config.tf_random_seed)
global_step = contrib_framework.create_global_step(g)
features, labels = input_fn()
self._check_inputs(features, labels)
model_fn_ops = self._get_train_ops(features, labels)
ops.add_to_collection(ops.GraphKeys.LOSSES, model_fn_ops.loss)
all_hooks.extend([
basic_session_run_hooks.NanTensorHook(model_fn_ops.loss),
basic_session_run_hooks.LoggingTensorHook(
{
'loss': model_fn_ops.loss,
'step': global_step
},
every_n_iter=100)
])
all_hooks.extend(hooks)
scaffold = model_fn_ops.scaffold or monitored_session.Scaffold()
if not (scaffold.saver or ops.get_collection(ops.GraphKeys.SAVERS)):
ops.add_to_collection(
ops.GraphKeys.SAVERS,
saver.Saver(
sharded=True,
max_to_keep=self._config.keep_checkpoint_max,
defer_build=True,
save_relative_paths=True))
chief_hooks = []
if (self._config.save_checkpoints_secs or
self._config.save_checkpoints_steps):
saver_hook_exists = any([
isinstance(h, basic_session_run_hooks.CheckpointSaverHook)
for h in (all_hooks + model_fn_ops.training_hooks + chief_hooks +
model_fn_ops.training_chief_hooks)
])
if not saver_hook_exists:
chief_hooks = [
basic_session_run_hooks.CheckpointSaverHook(
self._model_dir,
save_secs=self._config.save_checkpoints_secs,
save_steps=self._config.save_checkpoints_steps,
scaffold=scaffold)
]
with monitored_session.MonitoredTrainingSession(
master=self._config.master,
is_chief=self._config.is_chief,
checkpoint_dir=self._model_dir,
scaffold=scaffold,
hooks=all_hooks + model_fn_ops.training_hooks,
chief_only_hooks=chief_hooks + model_fn_ops.training_chief_hooks,
save_checkpoint_secs=0, # Saving is handled by a hook.
save_summaries_steps=self._config.save_summary_steps,
config=self._session_config
) as mon_sess:
loss = None
while not mon_sess.should_stop():
_, loss = mon_sess.run([model_fn_ops.train_op, model_fn_ops.loss])
summary_io.SummaryWriterCache.clear()
return loss
def _identity_feature_engineering_fn(features, labels):
return features, labels
class Estimator(BaseEstimator):
"""Estimator class is the basic TensorFlow model trainer/evaluator.
"""
def __init__(self,
model_fn=None,
model_dir=None,
config=None,
params=None,
feature_engineering_fn=None):
"""Constructs an `Estimator` instance.
Args:
model_fn: Model function. Follows the signature:
* Args:
* `features`: single `Tensor` or `dict` of `Tensor`s
(depending on data passed to `fit`),
* `labels`: `Tensor` or `dict` of `Tensor`s (for multi-head
models). If mode is `ModeKeys.INFER`, `labels=None` will be
passed. If the `model_fn`'s signature does not accept
`mode`, the `model_fn` must still be able to handle
`labels=None`.
* `mode`: Optional. Specifies if this training, evaluation or
prediction. See `ModeKeys`.
* `params`: Optional `dict` of hyperparameters. Will receive what
is passed to Estimator in `params` parameter. This allows
to configure Estimators from hyper parameter tuning.
* `config`: Optional configuration object. Will receive what is passed
to Estimator in `config` parameter, or the default `config`.
Allows updating things in your model_fn based on configuration
such as `num_ps_replicas`.
* `model_dir`: Optional directory where model parameters, graph etc
are saved. Will receive what is passed to Estimator in
`model_dir` parameter, or the default `model_dir`. Allows
updating things in your model_fn that expect model_dir, such as
training hooks.
* Returns:
`ModelFnOps`
Also supports a legacy signature which returns tuple of:
* predictions: `Tensor`, `SparseTensor` or dictionary of same.
Can also be any type that is convertible to a `Tensor` or
`SparseTensor`, or dictionary of same.
* loss: Scalar loss `Tensor`.
* train_op: Training update `Tensor` or `Operation`.
Supports next three signatures for the function:
* `(features, labels) -> (predictions, loss, train_op)`
* `(features, labels, mode) -> (predictions, loss, train_op)`
* `(features, labels, mode, params) -> (predictions, loss, train_op)`
* `(features, labels, mode, params, config) ->
(predictions, loss, train_op)`
* `(features, labels, mode, params, config, model_dir) ->
(predictions, loss, train_op)`
model_dir: Directory to save model parameters, graph and etc. This can
also be used to load checkpoints from the directory into a estimator to
continue training a previously saved model.
config: Configuration object.
params: `dict` of hyper parameters that will be passed into `model_fn`.
Keys are names of parameters, values are basic python types.
feature_engineering_fn: Feature engineering function. Takes features and
labels which are the output of `input_fn` and
returns features and labels which will be fed
into `model_fn`. Please check `model_fn` for
a definition of features and labels.
Raises:
ValueError: parameters of `model_fn` don't match `params`.
"""
super(Estimator, self).__init__(model_dir=model_dir, config=config)
if model_fn is not None:
# Check number of arguments of the given function matches requirements.
model_fn_args = _model_fn_args(model_fn)
if params is not None and 'params' not in model_fn_args:
raise ValueError('Estimator\'s model_fn (%s) has less than 4 '
'arguments, but not None params (%s) are passed.' %
(model_fn, params))
if params is None and 'params' in model_fn_args:
logging.warning('Estimator\'s model_fn (%s) includes params '
'argument, but params are not passed to Estimator.',
model_fn)
self._model_fn = model_fn
self.params = params
self._feature_engineering_fn = (
feature_engineering_fn or _identity_feature_engineering_fn)
def _call_model_fn(self, features, labels, mode):
"""Calls model function with support of 2, 3 or 4 arguments.
Args:
features: features dict.
labels: labels dict.
mode: ModeKeys
Returns:
A `ModelFnOps` object. If model_fn returns a tuple, wraps them up in a
`ModelFnOps` object.
Raises:
ValueError: if model_fn returns invalid objects.
"""
features, labels = self._feature_engineering_fn(features, labels)
model_fn_args = _model_fn_args(self._model_fn)
kwargs = {}
if 'mode' in model_fn_args:
kwargs['mode'] = mode
if 'params' in model_fn_args:
kwargs['params'] = self.params
if 'config' in model_fn_args:
kwargs['config'] = self.config
if 'model_dir' in model_fn_args:
kwargs['model_dir'] = self.model_dir
model_fn_results = self._model_fn(features, labels, **kwargs)
if isinstance(model_fn_results, model_fn_lib.ModelFnOps):
return model_fn_results
# Here model_fn_results should be a tuple with 3 elements.
if len(model_fn_results) != 3:
raise ValueError('Unrecognized value returned by model_fn, '
'please return ModelFnOps.')
return model_fn_lib.ModelFnOps(
mode=mode,
predictions=model_fn_results[0],
loss=model_fn_results[1],
train_op=model_fn_results[2])
def _get_train_ops(self, features, labels):
"""Method that builds model graph and returns trainer ops.
Expected to be overriden by sub-classes that require custom support.
This implementation uses `model_fn` passed as parameter to constructor to
build model.
Args:
features: `Tensor` or `dict` of `Tensor` objects.
labels: `Tensor` or `dict` of `Tensor` objects.
Returns:
`ModelFnOps` object.
"""
return self._call_model_fn(features, labels, model_fn_lib.ModeKeys.TRAIN)
def _get_eval_ops(self, features, labels, metrics):
"""Method that builds model graph and returns evaluation ops.
Expected to be overriden by sub-classes that require custom support.
This implementation uses `model_fn` passed as parameter to constructor to
build model.
Args:
features: `Tensor` or `dict` of `Tensor` objects.
labels: `Tensor` or `dict` of `Tensor` objects.
metrics: Dict of metrics to run. If None, the default metric functions
are used; if {}, no metrics are used. Otherwise, `metrics` should map
friendly names for the metric to a `MetricSpec` object defining which
model outputs to evaluate against which labels with which metric
function. Metric ops should support streaming, e.g., returning
update_op and value tensors. See more details in
`../../../../metrics/python/metrics/ops/streaming_metrics.py` and
`../metric_spec.py`.
Returns:
`ModelFnOps` object.
Raises:
ValueError: if `metrics` don't match `labels`.
"""
model_fn_ops = self._call_model_fn(
features, labels, model_fn_lib.ModeKeys.EVAL)
features, labels = self._feature_engineering_fn(features, labels)
# Custom metrics should overwrite defaults.
if metrics:
model_fn_ops.eval_metric_ops.update(_make_metrics_ops(
metrics, features, labels, model_fn_ops.predictions))
if metric_key.MetricKey.LOSS not in model_fn_ops.eval_metric_ops:
model_fn_ops.eval_metric_ops[metric_key.MetricKey.LOSS] = (
metrics_lib.streaming_mean(model_fn_ops.loss))
return model_fn_ops
def _get_predict_ops(self, features):
"""Method that builds model graph and returns prediction ops.
Expected to be overriden by sub-classes that require custom support.
This implementation uses `model_fn` passed as parameter to constructor to
build model.
Args:
features: `Tensor` or `dict` of `Tensor` objects.
Returns:
`ModelFnOps` object.
"""
labels = tensor_signature.create_placeholders_from_signatures(
self._labels_info)
return self._call_model_fn(features, labels, model_fn_lib.ModeKeys.INFER)
def export_savedmodel(
self, export_dir_base, serving_input_fn,
default_output_alternative_key=None,
assets_extra=None,
as_text=False,
checkpoint_path=None):
"""Exports inference graph as a SavedModel into given dir.
Args:
export_dir_base: A string containing a directory to write the exported
graph and checkpoints.
serving_input_fn: A function that takes no argument and
returns an `InputFnOps`.
default_output_alternative_key: the name of the head to serve when none is
specified. Not needed for single-headed models.
assets_extra: A dict specifying how to populate the assets.extra directory
within the exported SavedModel. Each key should give the destination
path (including the filename) relative to the assets.extra directory.
The corresponding value gives the full path of the source file to be
copied. For example, the simple case of copying a single file without
renaming it is specified as
`{'my_asset_file.txt': '/path/to/my_asset_file.txt'}`.
as_text: whether to write the SavedModel proto in text format.
checkpoint_path: The checkpoint path to export. If None (the default),
the most recent checkpoint found within the model directory is chosen.
Returns:
The string path to the exported directory.
Raises:
ValueError: if an unrecognized export_type is requested.
"""
if serving_input_fn is None:
raise ValueError('serving_input_fn must be defined.')
with ops.Graph().as_default() as g:
contrib_variables.create_global_step(g)
# Call the serving_input_fn and collect the input alternatives.
input_ops = serving_input_fn()
input_alternatives, features = (
saved_model_export_utils.get_input_alternatives(input_ops))
# TODO(b/34388557) This is a stopgap, pending recording model provenance.
# Record which features are expected at serving time. It is assumed that
# these are the features that were used in training.
for feature_key in input_ops.features.keys():
ops.add_to_collection(
constants.COLLECTION_DEF_KEY_FOR_INPUT_FEATURE_KEYS, feature_key)
# Call the model_fn and collect the output alternatives.
model_fn_ops = self._call_model_fn(features, None,
model_fn_lib.ModeKeys.INFER)
output_alternatives, actual_default_output_alternative_key = (
saved_model_export_utils.get_output_alternatives(
model_fn_ops, default_output_alternative_key))
# Build the SignatureDefs from all pairs of input and output alternatives
signature_def_map = saved_model_export_utils.build_all_signature_defs(
input_alternatives, output_alternatives,
actual_default_output_alternative_key)
if not checkpoint_path:
# Locate the latest checkpoint
checkpoint_path = saver.latest_checkpoint(self._model_dir)
if not checkpoint_path:
raise NotFittedError("Couldn't find trained model at %s."
% self._model_dir)
export_dir = saved_model_export_utils.get_timestamped_export_dir(
export_dir_base)
if (model_fn_ops.scaffold is not None and
model_fn_ops.scaffold.saver is not None):
saver_for_restore = model_fn_ops.scaffold.saver
else:
saver_for_restore = saver.Saver(sharded=True)
with tf_session.Session('') as session:
saver_for_restore.restore(session, checkpoint_path)
init_op = control_flow_ops.group(
variables.local_variables_initializer(),
resources.initialize_resources(resources.shared_resources()),
lookup_ops.tables_initializer())
# Perform the export
builder = saved_model_builder.SavedModelBuilder(export_dir)
builder.add_meta_graph_and_variables(
session, [tag_constants.SERVING],
signature_def_map=signature_def_map,
assets_collection=ops.get_collection(
ops.GraphKeys.ASSET_FILEPATHS),
legacy_init_op=init_op)
builder.save(as_text)
# Add the extra assets
if assets_extra:
assets_extra_path = os.path.join(compat.as_bytes(export_dir),
compat.as_bytes('assets.extra'))
for dest_relative, source in assets_extra.items():
dest_absolute = os.path.join(compat.as_bytes(assets_extra_path),
compat.as_bytes(dest_relative))
dest_path = os.path.dirname(dest_absolute)
gfile.MakeDirs(dest_path)
gfile.Copy(source, dest_absolute)
return export_dir
# For time of deprecation x,y from Estimator allow direct access.
# pylint: disable=protected-access
class SKCompat(sklearn.BaseEstimator):
"""Scikit learn wrapper for TensorFlow Learn Estimator."""
def __init__(self, estimator):
self._estimator = estimator
def fit(self, x, y, batch_size=128, steps=None, max_steps=None,
monitors=None):
input_fn, feed_fn = _get_input_fn(x, y, input_fn=None, feed_fn=None,
batch_size=batch_size, shuffle=True,
epochs=None)
all_monitors = []
if feed_fn:
all_monitors = [basic_session_run_hooks.FeedFnHook(feed_fn)]
if monitors:
all_monitors.extend(monitors)
self._estimator.fit(input_fn=input_fn,
steps=steps,
max_steps=max_steps,
monitors=all_monitors)
return self
def score(self, x, y, batch_size=128, steps=None, metrics=None):
input_fn, feed_fn = _get_input_fn(x, y, input_fn=None,
feed_fn=None, batch_size=batch_size,
shuffle=False, epochs=1)
if metrics is not None and not isinstance(metrics, dict):
raise ValueError('Metrics argument should be None or dict. '
'Got %s.' % metrics)
eval_results, global_step = self._estimator._evaluate_model(
input_fn=input_fn,
feed_fn=feed_fn,
steps=steps,
metrics=metrics,
name='score')
if eval_results is not None:
eval_results.update({'global_step': global_step})
return eval_results
def predict(self, x, batch_size=128, outputs=None):
input_fn, feed_fn = _get_input_fn(
x, None, input_fn=None, feed_fn=None, batch_size=batch_size,
shuffle=False, epochs=1)
results = list(
self._estimator._infer_model(
input_fn=input_fn,
feed_fn=feed_fn,
outputs=outputs,
as_iterable=True,
iterate_batches=True))
if not isinstance(results[0], dict):
return np.concatenate([output for output in results], axis=0)
return {
key: np.concatenate(
[output[key] for output in results], axis=0)
for key in results[0]
}
|
apache-2.0
|
mikebenfield/scikit-learn
|
examples/linear_model/plot_logistic.py
|
73
|
1568
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=========================================================
Logistic function
=========================================================
Shown in the plot is how the logistic regression would, in this
synthetic dataset, classify values as either 0 or 1,
i.e. class one or two, using the logistic curve.
"""
print(__doc__)
# Code source: Gael Varoquaux
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn import linear_model
# this is our test set, it's just a straight line with some
# Gaussian noise
xmin, xmax = -5, 5
n_samples = 100
np.random.seed(0)
X = np.random.normal(size=n_samples)
y = (X > 0).astype(np.float)
X[X > 0] *= 4
X += .3 * np.random.normal(size=n_samples)
X = X[:, np.newaxis]
# run the classifier
clf = linear_model.LogisticRegression(C=1e5)
clf.fit(X, y)
# and plot the result
plt.figure(1, figsize=(4, 3))
plt.clf()
plt.scatter(X.ravel(), y, color='black', zorder=20)
X_test = np.linspace(-5, 10, 300)
def model(x):
return 1 / (1 + np.exp(-x))
loss = model(X_test * clf.coef_ + clf.intercept_).ravel()
plt.plot(X_test, loss, color='red', linewidth=3)
ols = linear_model.LinearRegression()
ols.fit(X, y)
plt.plot(X_test, ols.coef_ * X_test + ols.intercept_, linewidth=1)
plt.axhline(.5, color='.5')
plt.ylabel('y')
plt.xlabel('X')
plt.xticks(range(-5, 10))
plt.yticks([0, 0.5, 1])
plt.ylim(-.25, 1.25)
plt.xlim(-4, 10)
plt.legend(('Logistic Regression Model', 'Linear Regression Model'),
loc="lower right", fontsize='small')
plt.show()
|
bsd-3-clause
|
bsipocz/ginga
|
examples/matplotlib/example1_mpl.py
|
1
|
5859
|
#! /usr/bin/env python
#
# example1_mpl.py -- Simple, configurable FITS viewer using a matplotlib
# QtAgg backend for Ginga and embedded in a Qt program.
#
# Eric Jeschke (eric@naoj.org)
#
# Copyright (c) Eric R. Jeschke. All rights reserved.
# This is open-source software licensed under a BSD license.
# Please see the file LICENSE.txt for details.
#
"""
Usage:
example1_mpl.py [fits file]
"""
from __future__ import print_function
import sys, os
from ginga import AstroImage
from ginga.qtw.QtHelp import QtGui, QtCore
from matplotlib.figure import Figure
from ginga.mplw.ImageViewMpl import ImageViewZoom
from ginga.mplw.FigureCanvasQt import FigureCanvas
from ginga.misc import log
class FitsViewer(QtGui.QMainWindow):
def __init__(self, logger):
super(FitsViewer, self).__init__()
self.logger = logger
fig = Figure()
w = FigureCanvas(fig)
fi = ImageViewZoom(self.logger)
fi.enable_autocuts('on')
fi.set_autocut_params('zscale')
fi.enable_autozoom('on')
#fi.set_callback('drag-drop', self.drop_file)
fi.set_callback('none-move', self.motion)
fi.set_bg(0.2, 0.2, 0.2)
fi.ui_setActive(True)
fi.set_figure(fig)
self.fitsimage = fi
fi.get_bindings().enable_all(True)
vbox = QtGui.QVBoxLayout()
vbox.setContentsMargins(QtCore.QMargins(2, 2, 2, 2))
vbox.setSpacing(1)
vbox.addWidget(w, stretch=1)
self.readout = QtGui.QLabel("")
vbox.addWidget(self.readout, stretch=0,
alignment=QtCore.Qt.AlignCenter)
hbox = QtGui.QHBoxLayout()
hbox.setContentsMargins(QtCore.QMargins(4, 2, 4, 2))
wopen = QtGui.QPushButton("Open File")
wopen.clicked.connect(self.open_file)
wquit = QtGui.QPushButton("Quit")
wquit.clicked.connect(self.close)
hbox.addStretch(1)
for w in (wopen, wquit):
hbox.addWidget(w, stretch=0)
hw = QtGui.QWidget()
hw.setLayout(hbox)
vbox.addWidget(hw, stretch=0)
vw = QtGui.QWidget()
self.setCentralWidget(vw)
vw.setLayout(vbox)
fi.configure(512, 512)
def get_fitsimage(self):
return self.fitsimage
def load_file(self, filepath):
image = AstroImage.AstroImage()
image.load_file(filepath)
self.fitsimage.set_image(image)
def open_file(self):
res = QtGui.QFileDialog.getOpenFileName(self, "Open FITS file",
".", "FITS files (*.fits)")
if isinstance(res, tuple):
fileName = res[0].encode('ascii')
else:
fileName = str(res)
self.load_file(fileName)
def drop_file(self, fitsimage, paths):
fileName = paths[0]
self.load_file(fileName)
def motion(self, fitsimage, button, data_x, data_y):
# Get the value under the data coordinates
try:
#value = fitsimage.get_data(data_x, data_y)
# We report the value across the pixel, even though the coords
# change halfway across the pixel
value = fitsimage.get_data(int(data_x+0.5), int(data_y+0.5))
except Exception:
value = None
fits_x, fits_y = data_x + 1, data_y + 1
# Calculate WCS RA
try:
# NOTE: image function operates on DATA space coords
image = fitsimage.get_image()
if image == None:
# No image loaded
return
ra_txt, dec_txt = image.pixtoradec(fits_x, fits_y,
format='str', coords='fits')
except Exception as e:
self.logger.warn("Bad coordinate conversion: %s" % (
str(e)))
ra_txt = 'BAD WCS'
dec_txt = 'BAD WCS'
text = "RA: %s DEC: %s X: %.2f Y: %.2f Value: %s" % (
ra_txt, dec_txt, fits_x, fits_y, value)
self.readout.setText(text)
def main(options, args):
#QtGui.QApplication.setGraphicsSystem('raster')
app = QtGui.QApplication(args)
logger = log.get_logger(name="example1", options=options)
viewer = FitsViewer(logger)
viewer.resize(524, 540)
viewer.show()
app.setActiveWindow(viewer)
if len(args) > 0:
viewer.load_file(args[0])
app.exec_()
if __name__ == "__main__":
# Parse command line options with nifty optparse module
from optparse import OptionParser
usage = "usage: %prog [options] cmd [args]"
optprs = OptionParser(usage=usage, version=('%%prog'))
optprs.add_option("--debug", dest="debug", default=False, action="store_true",
help="Enter the pdb debugger on main()")
optprs.add_option("--log", dest="logfile", metavar="FILE",
help="Write logging output to FILE")
optprs.add_option("--loglevel", dest="loglevel", metavar="LEVEL",
type='int', default=None,
help="Set logging level to LEVEL")
optprs.add_option("--stderr", dest="logstderr", default=False,
action="store_true",
help="Copy logging also to stderr")
optprs.add_option("--profile", dest="profile", action="store_true",
default=False,
help="Run the profiler on main()")
(options, args) = optprs.parse_args(sys.argv[1:])
# Are we debugging this?
if options.debug:
import pdb
pdb.run('main(options, args)')
# Are we profiling this?
elif options.profile:
import profile
print(("%s profile:" % sys.argv[0]))
profile.run('main(options, args)')
else:
main(options, args)
# END
|
bsd-3-clause
|
laranea/trading-with-python
|
nautilus/nautilus.py
|
77
|
5403
|
'''
Created on 26 dec. 2011
Copyright: Jev Kuznetsov
License: BSD
'''
from PyQt4.QtCore import *
from PyQt4.QtGui import *
from ib.ext.Contract import Contract
from ib.opt import ibConnection
from ib.ext.Order import Order
import tradingWithPython.lib.logger as logger
from tradingWithPython.lib.eventSystem import Sender, ExampleListener
import tradingWithPython.lib.qtpandas as qtpandas
import numpy as np
import pandas
priceTicks = {1:'bid',2:'ask',4:'last',6:'high',7:'low',9:'close', 14:'open'}
class PriceListener(qtpandas.DataFrameModel):
def __init__(self):
super(PriceListener,self).__init__()
self._header = ['position','bid','ask','last']
def addSymbol(self,symbol):
data = dict(zip(self._header,[0,np.nan,np.nan,np.nan]))
row = pandas.DataFrame(data, index = pandas.Index([symbol]))
self.df = self.df.append(row[self._header]) # append data and set correct column order
def priceHandler(self,sender,event,msg=None):
if msg['symbol'] not in self.df.index:
self.addSymbol(msg['symbol'])
if msg['type'] in self._header:
self.df.ix[msg['symbol'],msg['type']] = msg['price']
self.signalUpdate()
#print self.df
class Broker(Sender):
def __init__(self, name = "broker"):
super(Broker,self).__init__()
self.name = name
self.log = logger.getLogger(self.name)
self.log.debug('Initializing broker. Pandas version={0}'.format(pandas.__version__))
self.contracts = {} # a dict to keep track of subscribed contracts
self._id2symbol = {} # id-> symbol dict
self.tws = None
self._nextId = 1 # tws subscription id
self.nextValidOrderId = None
def connect(self):
""" connect to tws """
self.tws = ibConnection() # tws interface
self.tws.registerAll(self._defaultHandler)
self.tws.register(self._nextValidIdHandler,'NextValidId')
self.log.debug('Connecting to tws')
self.tws.connect()
self.tws.reqAccountUpdates(True,'')
self.tws.register(self._priceHandler,'TickPrice')
def subscribeStk(self,symbol, secType='STK', exchange='SMART',currency='USD'):
''' subscribe to stock data '''
self.log.debug('Subscribing to '+symbol)
c = Contract()
c.m_symbol = symbol
c.m_secType = secType
c.m_exchange = exchange
c.m_currency = currency
subId = self._nextId
self._nextId += 1
self.tws.reqMktData(subId,c,'',False)
self._id2symbol[subId] = c.m_symbol
self.contracts[symbol]=c
def disconnect(self):
self.tws.disconnect()
#------event handlers--------------------
def _defaultHandler(self,msg):
''' default message handler '''
#print msg.typeName
if msg.typeName == 'Error':
self.log.error(msg)
def _nextValidIdHandler(self,msg):
self.nextValidOrderId = msg.orderId
self.log.debug( 'Next valid order id:{0}'.format(self.nextValidOrderId))
def _priceHandler(self,msg):
#translate to meaningful messages
message = {'symbol':self._id2symbol[msg.tickerId],
'price':msg.price,
'type':priceTicks[msg.field]}
self.dispatch('price',message)
#-----------------GUI elements-------------------------
class TableView(QTableView):
""" extended table view """
def __init__(self,name='TableView1', parent=None):
super(TableView,self).__init__(parent)
self.name = name
self.setSelectionBehavior(QAbstractItemView.SelectRows)
def contextMenuEvent(self, event):
menu = QMenu(self)
Action = menu.addAction("print selected rows")
Action.triggered.connect(self.printName)
menu.exec_(event.globalPos())
def printName(self):
print "Action triggered from " + self.name
print 'Selected :'
for idx in self.selectionModel().selectedRows():
print self.model().df.ix[idx.row(),:]
class Form(QDialog):
def __init__(self,parent=None):
super(Form,self).__init__(parent)
self.broker = Broker()
self.price = PriceListener()
self.broker.connect()
symbols = ['SPY','XLE','QQQ','VXX','XIV']
for symbol in symbols:
self.broker.subscribeStk(symbol)
self.broker.register(self.price.priceHandler, 'price')
widget = TableView(parent=self)
widget.setModel(self.price)
widget.horizontalHeader().setResizeMode(QHeaderView.Stretch)
layout = QVBoxLayout()
layout.addWidget(widget)
self.setLayout(layout)
def __del__(self):
print 'Disconnecting.'
self.broker.disconnect()
if __name__=="__main__":
print "Running nautilus"
import sys
app = QApplication(sys.argv)
form = Form()
form.show()
app.exec_()
print "All done."
|
bsd-3-clause
|
blackecho/Deep-Learning-TensorFlow
|
yadlt/models/boltzmann/rbm.py
|
2
|
11013
|
"""Restricted Boltzmann Machine TensorFlow implementation."""
from __future__ import division
from __future__ import print_function
import numpy as np
import tensorflow as tf
from tqdm import tqdm
from yadlt.core import Layers, Loss
from yadlt.core import UnsupervisedModel
from yadlt.utils import tf_utils, utilities
class RBM(UnsupervisedModel):
"""Restricted Boltzmann Machine implementation using TensorFlow.
The interface of the class is sklearn-like.
"""
def __init__(
self, num_hidden, visible_unit_type='bin',
name='rbm', loss_func='mse', learning_rate=0.01,
regcoef=5e-4, regtype='none', gibbs_sampling_steps=1,
batch_size=10, num_epochs=10, stddev=0.1):
"""Constructor.
:param num_hidden: number of hidden units
:param loss_function: type of loss function
:param visible_unit_type: type of the visible units (bin or gauss)
:param gibbs_sampling_steps: optional, default 1
:param stddev: default 0.1. Ignored if visible_unit_type is not 'gauss'
"""
UnsupervisedModel.__init__(self, name)
self.loss_func = loss_func
self.learning_rate = learning_rate
self.num_epochs = num_epochs
self.batch_size = batch_size
self.regtype = regtype
self.regcoef = regcoef
self.loss = Loss(self.loss_func)
self.num_hidden = num_hidden
self.visible_unit_type = visible_unit_type
self.gibbs_sampling_steps = gibbs_sampling_steps
self.stddev = stddev
self.W = None
self.bh_ = None
self.bv_ = None
self.w_upd8 = None
self.bh_upd8 = None
self.bv_upd8 = None
self.cost = None
self.input_data = None
self.hrand = None
self.vrand = None
def _train_model(self, train_set, train_ref=None, validation_set=None,
Validation_ref=None):
"""Train the model.
:param train_set: training set
:param validation_set: validation set. optional, default None
:return: self
"""
pbar = tqdm(range(self.num_epochs))
for i in pbar:
self._run_train_step(train_set)
if validation_set is not None:
feed = self._create_feed_dict(validation_set)
err = tf_utils.run_summaries(
self.tf_session, self.tf_merged_summaries,
self.tf_summary_writer, i, feed, self.cost)
pbar.set_description("Reconstruction loss: %s" % (err))
def _run_train_step(self, train_set):
"""Run a training step.
A training step is made by randomly shuffling the training set,
divide into batches and run the variable update nodes for each batch.
:param train_set: training set
:return: self
"""
np.random.shuffle(train_set)
batches = [_ for _ in utilities.gen_batches(train_set,
self.batch_size)]
updates = [self.w_upd8, self.bh_upd8, self.bv_upd8]
for batch in batches:
self.tf_session.run(updates,
feed_dict=self._create_feed_dict(batch))
def _create_feed_dict(self, data):
"""Create the dictionary of data to feed to tf session during training.
:param data: training/validation set batch
:return: dictionary(self.input_data: data, self.hrand: random_uniform,
self.vrand: random_uniform)
"""
return {
self.input_data: data,
self.hrand: np.random.rand(data.shape[0], self.num_hidden),
self.vrand: np.random.rand(data.shape[0], data.shape[1])
}
def build_model(self, n_features, regtype='none'):
"""Build the Restricted Boltzmann Machine model in TensorFlow.
:param n_features: number of features
:param regtype: regularization type
:return: self
"""
self._create_placeholders(n_features)
self._create_variables(n_features)
self.encode = self.sample_hidden_from_visible(self.input_data)[0]
self.reconstruction = self.sample_visible_from_hidden(
self.encode, n_features)
hprob0, hstate0, vprob, hprob1, hstate1 = self.gibbs_sampling_step(
self.input_data, n_features)
positive = self.compute_positive_association(self.input_data,
hprob0, hstate0)
nn_input = vprob
for step in range(self.gibbs_sampling_steps - 1):
hprob, hstate, vprob, hprob1, hstate1 = self.gibbs_sampling_step(
nn_input, n_features)
nn_input = vprob
negative = tf.matmul(tf.transpose(vprob), hprob1)
self.w_upd8 = self.W.assign_add(
self.learning_rate * (positive - negative) / self.batch_size)
self.bh_upd8 = self.bh_.assign_add(tf.multiply(self.learning_rate, tf.reduce_mean(
tf.subtract(hprob0, hprob1), 0)))
self.bv_upd8 = self.bv_.assign_add(tf.multiply(self.learning_rate, tf.reduce_mean(
tf.subtract(self.input_data, vprob), 0)))
variables = [self.W, self.bh_, self.bv_]
regterm = Layers.regularization(variables, self.regtype, self.regcoef)
self.cost = self.loss.compile(vprob, self.input_data, regterm=regterm)
def _create_placeholders(self, n_features):
"""Create the TensorFlow placeholders for the model.
:param n_features: number of features
:return: self
"""
self.input_data = tf.placeholder(tf.float32, [None, n_features],
name='x-input')
self.hrand = tf.placeholder(tf.float32, [None, self.num_hidden],
name='hrand')
self.vrand = tf.placeholder(tf.float32, [None, n_features],
name='vrand')
# not used in this model, created just to comply with
# unsupervised_model.py
self.input_labels = tf.placeholder(tf.float32)
self.keep_prob = tf.placeholder(tf.float32, name='keep-probs')
def _create_variables(self, n_features):
"""Create the TensorFlow variables for the model.
:param n_features: number of features
:return: self
"""
w_name = 'weights'
self.W = tf.Variable(tf.truncated_normal(
shape=[n_features, self.num_hidden], stddev=0.1), name=w_name)
tf.summary.histogram(w_name, self.W)
bh_name = 'hidden-bias'
self.bh_ = tf.Variable(tf.constant(0.1, shape=[self.num_hidden]),
name=bh_name)
tf.summary.histogram(bh_name, self.bh_)
bv_name = 'visible-bias'
self.bv_ = tf.Variable(tf.constant(0.1, shape=[n_features]),
name=bv_name)
tf.summary.histogram(bv_name, self.bv_)
def gibbs_sampling_step(self, visible, n_features):
"""Perform one step of gibbs sampling.
:param visible: activations of the visible units
:param n_features: number of features
:return: tuple(hidden probs, hidden states, visible probs,
new hidden probs, new hidden states)
"""
hprobs, hstates = self.sample_hidden_from_visible(visible)
vprobs = self.sample_visible_from_hidden(hprobs, n_features)
hprobs1, hstates1 = self.sample_hidden_from_visible(vprobs)
return hprobs, hstates, vprobs, hprobs1, hstates1
def sample_hidden_from_visible(self, visible):
"""Sample the hidden units from the visible units.
This is the Positive phase of the Contrastive Divergence algorithm.
:param visible: activations of the visible units
:return: tuple(hidden probabilities, hidden binary states)
"""
hprobs = tf.nn.sigmoid(tf.add(tf.matmul(visible, self.W), self.bh_))
hstates = utilities.sample_prob(hprobs, self.hrand)
return hprobs, hstates
def sample_visible_from_hidden(self, hidden, n_features):
"""Sample the visible units from the hidden units.
This is the Negative phase of the Contrastive Divergence algorithm.
:param hidden: activations of the hidden units
:param n_features: number of features
:return: visible probabilities
"""
visible_activation = tf.add(
tf.matmul(hidden, tf.transpose(self.W)),
self.bv_
)
if self.visible_unit_type == 'bin':
vprobs = tf.nn.sigmoid(visible_activation)
elif self.visible_unit_type == 'gauss':
vprobs = tf.truncated_normal(
(1, n_features), mean=visible_activation, stddev=self.stddev)
else:
vprobs = None
return vprobs
def compute_positive_association(self, visible,
hidden_probs, hidden_states):
"""Compute positive associations between visible and hidden units.
:param visible: visible units
:param hidden_probs: hidden units probabilities
:param hidden_states: hidden units states
:return: positive association = dot(visible.T, hidden)
"""
if self.visible_unit_type == 'bin':
positive = tf.matmul(tf.transpose(visible), hidden_states)
elif self.visible_unit_type == 'gauss':
positive = tf.matmul(tf.transpose(visible), hidden_probs)
else:
positive = None
return positive
def load_model(self, shape, gibbs_sampling_steps, model_path):
"""Load a trained model from disk.
The shape of the model (num_visible, num_hidden) and the number
of gibbs sampling steps must be known in order to restore the model.
:param shape: tuple(num_visible, num_hidden)
:param gibbs_sampling_steps:
:param model_path:
:return: self
"""
n_features, self.num_hidden = shape[0], shape[1]
self.gibbs_sampling_steps = gibbs_sampling_steps
self.build_model(n_features)
init_op = tf.global_variables_initializer()
self.tf_saver = tf.train.Saver()
with tf.Session() as self.tf_session:
self.tf_session.run(init_op)
self.tf_saver.restore(self.tf_session, model_path)
def get_parameters(self, graph=None):
"""Return the model parameters in the form of numpy arrays.
:param graph: tf graph object
:return: model parameters
"""
g = graph if graph is not None else self.tf_graph
with g.as_default():
with tf.Session() as self.tf_session:
self.tf_saver.restore(self.tf_session, self.model_path)
return {
'W': self.W.eval(),
'bh_': self.bh_.eval(),
'bv_': self.bv_.eval()
}
|
mit
|
sergiohzlz/lectorcfdi
|
extrainfo.py
|
1
|
8345
|
#!/usr/bin/python
#-*-coding:utf8-*-
from bs4 import BeautifulSoup as Soup
#import pandas as pd
import glob
import sys
import re
"""
Version xml de cfdi 3.3
"""
class CFDI(object):
def __init__(self, f):
"""
Constructor que requiere en el parámetro una cadena con el nombre del
cfdi.
"""
fxml = open(f,'r').read()
soup = Soup(fxml,'lxml')
#============componentes del cfdi============
emisor = soup.find('cfdi:emisor')
receptor = soup.find('cfdi:receptor')
comprobante = soup.find('cfdi:comprobante')
tfd = soup.find('tfd:timbrefiscaldigital')
self.__version = comprobante['version']
self.__folio = comprobante['folio']
self.__uuid = tfd['uuid']
self.__fechatimbrado = tfd['fechatimbrado']
self.__traslados = soup.find_all(lambda e: e.name=='cfdi:traslado' and
sorted(e.attrs.keys())==['importe','impuesto','tasaocuota','tipofactor'])
self.__retenciones = soup.find_all(lambda e: e.name=='cfdi:retencion' and
sorted(e.attrs.keys())==['importe','impuesto'])
#============emisor==========================
self.__emisorrfc = emisor['rfc']
try:
self.__emisornombre = emisor['nombre']
except:
self.__emisornombre = emisor['rfc']
#============receptor========================
self.__receptorrfc = receptor['rfc']
try:
self.__receptornombre = receptor['nombre']
except:
self.__receptornombre = receptor['rfc']
#============comprobante=====================
self.__certificado = comprobante['certificado']
self.__sello = comprobante['sello']
self.__total = round(float(comprobante['total']),2)
self.__subtotal = round(float(comprobante['subtotal']),2)
self.__fecha_cfdi = comprobante['fecha']
self.__conceptos = soup.find_all(lambda e: e.name=='cfdi:concepto')
self.__n_conceptos = len(self.__conceptos)
try:
self.__moneda = comprobante['moneda']
except KeyError as k:
self.__moneda = 'MXN'
try:
self.__lugar = comprobante['lugarexpedicion']
except KeyError as k:
self.__lugar = u'México'
tipo = comprobante['tipodecomprobante']
if(float(self.__version)==3.2):
self.__tipo = tipo
else:
tcomprobantes = {'I':'Ingreso', 'E':'Egreso', 'N':'Nomina', 'P':'Pagado'}
self.__tipo = tcomprobantes[tipo]
try:
self.__tcambio = float(comprobante['tipocambio'])
except:
self.__tcambio = 1.
triva, trieps, trisr = self.__calcula_traslados()
self.__triva = round(triva,2)
self.__trieps = round(trieps,2)
self.__trisr = round(trisr,2)
retiva, retisr = self.__calcula_retenciones()
self.__retiva = round(retiva,2)
self.__retisr = round(retisr,2)
def __str__(self):
"""
Imprime el cfdi en el siguiente orden
emisor, fecha de timbrado, tipo de comprobante, rfc emisor, uuid,_
receptor, rfc receptor, subtotal, ieps, iva, retiva, retisr, tc, total
"""
respuesta = '\t'.join( map(str, self.lista_valores))
return respuesta
def __calcula_traslados(self):
triva, trieps, trisr = 0., 0., 0
for t in self.__traslados:
impuesto = t['impuesto']
importe = float(t['importe'])
if(self.__version=='3.2'):
if impuesto=='IVA':
triva += importe
elif impuesto=='ISR':
trisr += importe
elif impuesto=='IEPS':
trieps += importe
elif(self.__version=='3.3'):
if impuesto=='002':
triva += importe
elif impuesto=='001':
trisr += importe
elif impuesto=='003':
trieps += importe
return triva, trieps, trisr
def __calcula_retenciones(self):
retiva, retisr = 0., 0.
for t in self.__retenciones:
impuesto = t['impuesto']
importe = float(t['importe'])
if(self.__version=='3.2'):
if(impuesto=='ISR'):
retisr += importe
elif(impuesto=='IVA'):
retiva += importe
elif(self.__version=='3.3'):
if(impuesto=='002'):
retiva += importe
elif(impuesto=='001'):
retisr += importe
return retiva, retisr
@property
def lista_valores(self):
v = [self.__emisornombre,self.__fechatimbrado, self.__tipo, self.__emisorrfc ]
v += [self.__uuid, self.__folio, self.__receptornombre, self.__receptorrfc ]
v += [self.__subtotal, self.__trieps, self.__triva]
v += [self.__retiva, self.__retisr, self.__tcambio, self.__total]
return v
@property
def dic_cfdi(self):
d = {}
d["Emisor"] = self.__emisornombre
d["Fecha_CFDI"] = self.__fechatimbrado
d["Tipo"] = self.__tipo
d["RFC_Emisor"] = self.__emisorrfc
d["Folio_fiscal"] = self.__uuid
d["Folio"] = self.__folio
d["Receptor"] = self.__receptornombre
d["RFC_Receptor"] = self.__receptorrfc
d["Subtotal"] = self.__subtotal
d["IEPS"] = self.__trieps
d["IVA"] = self.__triva
d["Ret IVA"] = self.__retiva
d["Ret ISR"] = self.__retisr
d["TC"] = self.__tcambio
d["Total"] = self.__total
return d
@property
def certificado(self):
return self.__certificado
@property
def sello(self):
return self.__sello
@property
def total(self):
return self.__total
@property
def subtotal(self):
return self.__subtotal
@property
def fechatimbrado(self):
return self.__fechatimbrado
@property
def tipodecambio(self):
return self.__tcambio
@property
def lugar(self):
return self.__lugar
@property
def moneda(self):
return self.__moneda
@property
def traslado_iva(self):
return self.__triva
@property
def traslado_isr(self):
return self.__trisr
@property
def traslado_ieps(self):
return self.__trieps
@property
def n_conceptos(self):
return self.__n_conceptos
@property
def conceptos(self):
return self.__conceptos
@property
def folio(self):
return self.__folio
@staticmethod
def columnas():
return ["Emisor","Fecha_CFDI","Tipo","RFC_Emisor","Folio_fiscal","Folio","Receptor",
"RFC_Receptor", "Subtotal","IEPS","IVA","Ret IVA","Ret ISR","TC","Total"]
@staticmethod
def imprime_reporte(nf, nr):
reporte = "Número de archivos procesados:\t {}\n".format(nf)
reporte += "Número de filas en tsv:\t {}\n".format(nr)
if(nf!=nr):
reporte += "\n\n**** Atención ****\n"
return reporte
L = glob.glob('./*.xml')
#R = [ patt[1:].strip().lower() for patt in re.findall('(<cfdi:[A-z]*\s|<tfd:[A-z]*\s)',fxml)]
if __name__=='__main__':
salida = sys.argv[1]
fout = open(salida,'w')
columnas = CFDI.columnas()
titulo = '\t'.join(columnas)+'\n'
fout.write(titulo)
nl = 0
for f in L:
try:
#print("abriendo {0}".format(f))
rcfdi = CFDI(f)
dic = rcfdi.dic_cfdi
vals = [dic[c] for c in columnas]
strvals = ' \t '.join(map(str, vals))+'\n'
fout.write(strvals)
nl += 1
except:
assert "Error en archivo {0}".format(f)
fout.close()
nr = len(L)
rep = CFDI.imprime_reporte(nr, nl)
print(rep)
|
apache-2.0
|
tlhr/plumology
|
plumology/io/hdf.py
|
1
|
7625
|
"""hdf - HDF5 wrappers and utilities"""
from typing import (Any, Sequence, List, Mapping,
Callable, Union, Optional)
import h5py
import numpy as np
import pandas as pd
from .rw import read_plumed_fields
__all__ = ['plumed_to_h5', 'plumed_to_hdf', 'hdf_to_dataframe']
def plumed_to_hdf(
files: Union[List[str], str],
hdf_file: str,
keys: Union[List[str], str],
field_map: Optional[Mapping[str, str]]=None,
columnwise: bool=False,
overwrite: bool=False,
) -> None:
"""
Read PLUMED files and dump to HDF5.
Parameters
----------
files : Files generated by PLUMED.
hdf_file : Path to HDF file.
keys : Keys to be used for top-level HDF groups.
field_map : Mapping to replace potentially bad names for fields.
columnwise : Operate on columns instead of whole files,
this is easier on memory, but far slower.
overwrite : Overwrite an existing file, or just append.
"""
# Check input
if not isinstance(files, list):
files = [files]
if not isinstance(keys, list):
keys = [keys]
if len(keys) != len(files):
raise ValueError(
'You must supply the same number of callables as files!'
)
fmode = 'w' if overwrite else 'a'
with h5py.File(hdf_file, fmode) as store:
for key, file in zip(keys, files):
fields = read_plumed_fields(file)
# Replace fields with better names
if field_map is not None:
fields = [
field_map[f] if f in field_map else f for f in fields
]
# Prepare hdf structure
grp = store.create_group(key)
# This is lighter on memory, but slower
if columnwise:
for field in fields:
column = pd.read_csv(
file,
header=None,
comment='#',
names=fields,
sep=r'\s+',
dtype=np.float64,
usecols=[field]
).values.flatten()
grp.create_dataset(field, data=column)
# Memory heavy, but fast
else:
dataset = pd.read_csv(
file,
header=None,
comment='#',
names=fields,
sep=r'\s+',
dtype=np.float64,
usecols=fields
)
for field in fields:
grp.create_dataset(
field, data=dataset[field].values.flatten()
)
def hdf_to_dataframe(
hdf_file: str,
reduce: Optional[int]=None,
aggregator: Optional[Callable[[np.ndarray], float]]=None,
weight: bool=True,
reshape: bool=True,
grouper: str='ff',
weight_name: str='ww'
) -> pd.DataFrame:
"""
Read HDF file to dataframe and reduce number of datapoints.
Parameters
----------
hdf_file : Path to HDF file.
reduce : Skip this number of lines when reading the file.
aggregator : Function taking a column as input and returning a float.
weight : Whether to reweight the data using a weight column.
reshape : If true, use wide_to_long to add residue index.
grouper : Name of the index determining the simulation.
weight_name : Name of the weight column.
Returns
-------
df : Multiindexed dataframe with the type of datapoint as columns
and time, residue number and HDF top-level key as indices.
"""
if reduce is not None and aggregator is not None:
raise TypeError(
'You can not specify both a reduction and an aggregator!'
)
frames = []
with h5py.File(hdf_file, 'r') as store:
# Iterate through HDF groups
for key in store.keys():
# Reducing density
if reduce is not None:
raw = pd.DataFrame({
k: v[::reduce] for k, v in store[key].items()
})
# Renormalize weights
raw[weight_name] /= raw[weight_name].sum()
raw[grouper] = key
# Aggregate over time
else:
raw = {}
# Iterate through columns
for k, v in store[key].items():
# Time and weights will be lost anyway
if k in ['time', weight_name]:
continue
elif weight:
raw[k] = aggregator(
v[()] * store[key][weight_name][()]
)
else:
raw[k] = aggregator(v[()])
raw[grouper] = key
raw = pd.DataFrame(raw, index=[key])
frames.append(raw)
raw_frame = pd.concat(frames)
if reshape:
# Unique columns for wide_to_long
cols = list({v.rstrip('0123456789') for v in raw_frame.columns
if v not in ['time', grouper, weight_name]})
# Move residue number to index
index = 'time' if reduce is not None else grouper
df = pd.wide_to_long(raw_frame, cols, i=index, j='res_nr')
else:
df = raw_frame
# Remove superfluous columns
if reduce is not None:
df = df.set_index([grouper], append=True)
if 'time' in df:
del df['time']
else:
if grouper in df:
del df[grouper]
return df.sort_index(axis=1).sort_index()
def plumed_to_h5(
files: Union[str, List[str]],
hdf_file: str,
func: Union[List[Callable[[pd.DataFrame], pd.DataFrame]],
Callable[[pd.DataFrame], pd.DataFrame], None]=None,
chunksize: int=10000,
verbose: bool=True,
kwargs: Union[Mapping[str, Any],
Sequence[Mapping[str, Any]], None]=None
) -> None:
"""
Read PLUMED files and dump to pytables HDF5.
Parameters
----------
files : File(s) generated by PLUMED.
hdf_file : Path to HDF file.
func : Function(s) operating on dataframe chunks.
chunksize : Size of the chunks to be read by pd.read_csv()
verbose : Be more verbose.
kwargs : Arguments to be passed to func.
"""
# Check input
if isinstance(files, list):
files = [files]
if isinstance(func, list):
func = [func]
if isinstance(kwargs, list):
kwargs = [kwargs]
if func[0] is not None and len(func) != len(files):
raise ValueError(
'You must supply the same number of callables as files!'
)
for j, file in enumerate(files):
# Read column names
fields = read_plumed_fields(file)
# Prepare iterator over chunks
chunker = pd.read_csv(
file,
header=None,
comment='#',
names=fields,
sep=r'\s+',
chunksize=chunksize,
dtype=np.float64
)
# Iterate over chunks
for i, chunk in enumerate(chunker):
# We make sure each file has a unique entry
if func is not None:
chunk = func[j](chunk, **kwargs[j])
else:
chunk['file'] = j
chunk = chunk.sort_index(axis=1)
chunk.to_hdf(hdf_file, 'df', append=True)
if verbose:
print('Writing chunk {0}'.format(i))
|
mit
|
PROSIC/prosic-evaluation
|
scripts/plot-concordance.py
|
1
|
4454
|
from itertools import product
import matplotlib
matplotlib.use("agg")
from matplotlib import pyplot as plt
import seaborn as sns
import pandas as pd
import common
import numpy as np
import math
from matplotlib.lines import Line2D
from matplotlib.colors import to_rgba
class NotEnoughObservationsException(Exception):
pass
MIN_CALLS = 20
MAX_LEN = 1000
vartype = snakemake.wildcards.vartype
colors = common.get_colors(snakemake.config)
varlociraptor_calls_low = [pd.read_table(f) for f in snakemake.input.varlociraptor_calls_low]
varlociraptor_calls_high = [pd.read_table(f) for f in snakemake.input.varlociraptor_calls_high]
adhoc_calls = [pd.read_table(f) for f in snakemake.input.adhoc_calls]
def expected_count(af, effective_mutation_rate):
"""Calculate the expected number of somatic variants
greater than a given allele frequency given an effective mutation
rate, according to the model of Williams et al. Nature
Genetics 2016"""
return effective_mutation_rate * (1.0 / af - 1.0)
def expected_counts(afs, effective_mutation_rate):
return [expected_count(af, effective_mutation_rate) for af in afs]
def calc_concordance(calls):
n = len(calls)
return (calls["concordance_count"] > 1).sum() / n
def plot_len_range(minlen, maxlen, yfunc=None, yscale=None, upper_bound=None):
handles_varlociraptor = []
handles_adhoc = []
for i, caller in enumerate(snakemake.params.callers):
def plot_calls(calls, label, color, style, calls_lower=None):
def get_xy(calls, caseafs=None):
svlen = calls.loc[:, calls.columns.str.startswith("SVLEN")].abs()
# at least one of the calls has a valid svlen
valid = ((svlen >= minlen) & (svlen <= maxlen)).sum(axis=1) >= 1
calls = calls[valid]
if caseafs is None:
caseafs = calls["max_case_af"].dropna().unique()
y = []
_caseafs = []
for caseaf in sorted(caseafs):
_calls = calls[calls["max_case_af"] >= caseaf]
if upper_bound is not None:
_calls = _calls[_calls["max_case_af"] <= caseaf + upper_bound]
if len(_calls) < MIN_CALLS:
continue
_caseafs.append(caseaf)
y.append(yfunc(_calls))
return _caseafs, y
x, y = get_xy(calls)
if not x:
raise NotEnoughObservationsException()
if calls_lower is not None:
_, y2 = get_xy(calls_lower, caseafs=x)
return plt.fill_between(x, y, y2, label=label, edgecolor=color, facecolor=to_rgba(color, alpha=0.2))
else:
if style != "-":
plt.plot(x, y, "-", color="white", alpha=0.8)
return plt.plot(x, y, style, label=label, color=color)[0]
color = colors[snakemake.params.callers[i]]
try:
handles_varlociraptor.append(
plot_calls(
varlociraptor_calls_high[i],
"varlociraptor+{}".format(caller),
color=color, style="-",
calls_lower=varlociraptor_calls_low[i]))
except NotEnoughObservationsException:
# skip plot
pass
try:
handles_adhoc.append(plot_calls(adhoc_calls[i], caller, color=color, style=":"))
except NotEnoughObservationsException:
# skip plot
pass
handles = handles_varlociraptor + handles_adhoc
sns.despine()
ax = plt.gca()
if yscale is not None:
ax.set_yscale(yscale)
return ax, handles
plt.figure(figsize=(10, 4))
plt.subplot(121)
plot_len_range(1, MAX_LEN, yfunc=calc_concordance)
plt.xlabel("$\geq$ tumor allele frequency")
plt.ylabel("concordance")
plt.subplot(122)
for effective_mutation_rate in 10 ** np.linspace(1, 5, 7):
afs = np.linspace(0.0, 1.0, 100, endpoint=False)
plt.semilogy(afs, expected_counts(afs, effective_mutation_rate), "-", color="grey", alpha=0.4)
ax, handles = plot_len_range(1, MAX_LEN, yfunc=lambda calls: len(calls), yscale="log")
plt.xlabel("$\geq$ tumor allele frequency")
plt.ylabel("# of calls")
ax.legend(handles=handles, loc="upper left", bbox_to_anchor=(1.0, 1.0))
plt.tight_layout()
plt.savefig(snakemake.output[0], bbox_inches="tight")
|
mit
|
douglask3/UKESM_LandSurface_plotting
|
libs/plot_TS.py
|
1
|
1606
|
import iris
import numpy as np
import cartopy.crs as ccrs
import iris.plot as iplt
import matplotlib.pyplot as plt
from pdb import set_trace as browser
def grid_area(cube):
if cube.coord('latitude').bounds is None:
cube.coord('latitude').guess_bounds()
cube.coord('longitude').guess_bounds()
return iris.analysis.cartography.area_weights(cube)
### Running mean/Moving average
def running_N_mean(l, N):
sum = 0
result = list( 0 for x in l)
for i in range( 0, N ):
sum = sum + l[i]
result[i] = sum / (i+1)
for i in range( N, len(l) ):
sum = sum - l[i-N] + l[i]
result[i] = sum / N
return result
def cube_TS(cube, running_mean = False, mean = False):
cube.data = np.ma.masked_invalid(cube.data)
grid_areas = grid_area(cube)
collapseFun = iris.analysis.MEAN if mean else iris.analysis.SUM
cube = cube.collapsed(['latitude', 'longitude'], collapseFun, weights = grid_areas)
if (running_mean): cube.data = running_N_mean(cube.data, 12)
return cube
def plot_cube_TS(cubes, running_mean, mean, units):
cubes = [cube_TS(cube, running_mean, mean) for cube in cubes]
for cube in cubes: iplt.plot(cube, label = cube.name())
if units is None: units = [cubes[0].units if mean else '']
ncol = min(4 * int(len(cubes)**0.5), len(cubes))
plt.legend(loc = 'upper center', bbox_to_anchor = (0.5, -0.05),
fancybox = True, shadow = True, ncol = ncol)
plt.grid(True)
plt.axis('tight')
plt.gca().set_ylabel(units, fontsize=16)
|
gpl-3.0
|
henrykironde/scikit-learn
|
examples/applications/plot_species_distribution_modeling.py
|
254
|
7434
|
"""
=============================
Species distribution modeling
=============================
Modeling species' geographic distributions is an important
problem in conservation biology. In this example we
model the geographic distribution of two south american
mammals given past observations and 14 environmental
variables. Since we have only positive examples (there are
no unsuccessful observations), we cast this problem as a
density estimation problem and use the `OneClassSVM` provided
by the package `sklearn.svm` as our modeling tool.
The dataset is provided by Phillips et. al. (2006).
If available, the example uses
`basemap <http://matplotlib.sourceforge.net/basemap/doc/html/>`_
to plot the coast lines and national boundaries of South America.
The two species are:
- `"Bradypus variegatus"
<http://www.iucnredlist.org/apps/redlist/details/3038/0>`_ ,
the Brown-throated Sloth.
- `"Microryzomys minutus"
<http://www.iucnredlist.org/apps/redlist/details/13408/0>`_ ,
also known as the Forest Small Rice Rat, a rodent that lives in Peru,
Colombia, Ecuador, Peru, and Venezuela.
References
----------
* `"Maximum entropy modeling of species geographic distributions"
<http://www.cs.princeton.edu/~schapire/papers/ecolmod.pdf>`_
S. J. Phillips, R. P. Anderson, R. E. Schapire - Ecological Modelling,
190:231-259, 2006.
"""
# Authors: Peter Prettenhofer <peter.prettenhofer@gmail.com>
# Jake Vanderplas <vanderplas@astro.washington.edu>
#
# License: BSD 3 clause
from __future__ import print_function
from time import time
import numpy as np
import matplotlib.pyplot as plt
from sklearn.datasets.base import Bunch
from sklearn.datasets import fetch_species_distributions
from sklearn.datasets.species_distributions import construct_grids
from sklearn import svm, metrics
# if basemap is available, we'll use it.
# otherwise, we'll improvise later...
try:
from mpl_toolkits.basemap import Basemap
basemap = True
except ImportError:
basemap = False
print(__doc__)
def create_species_bunch(species_name, train, test, coverages, xgrid, ygrid):
"""Create a bunch with information about a particular organism
This will use the test/train record arrays to extract the
data specific to the given species name.
"""
bunch = Bunch(name=' '.join(species_name.split("_")[:2]))
species_name = species_name.encode('ascii')
points = dict(test=test, train=train)
for label, pts in points.items():
# choose points associated with the desired species
pts = pts[pts['species'] == species_name]
bunch['pts_%s' % label] = pts
# determine coverage values for each of the training & testing points
ix = np.searchsorted(xgrid, pts['dd long'])
iy = np.searchsorted(ygrid, pts['dd lat'])
bunch['cov_%s' % label] = coverages[:, -iy, ix].T
return bunch
def plot_species_distribution(species=("bradypus_variegatus_0",
"microryzomys_minutus_0")):
"""
Plot the species distribution.
"""
if len(species) > 2:
print("Note: when more than two species are provided,"
" only the first two will be used")
t0 = time()
# Load the compressed data
data = fetch_species_distributions()
# Set up the data grid
xgrid, ygrid = construct_grids(data)
# The grid in x,y coordinates
X, Y = np.meshgrid(xgrid, ygrid[::-1])
# create a bunch for each species
BV_bunch = create_species_bunch(species[0],
data.train, data.test,
data.coverages, xgrid, ygrid)
MM_bunch = create_species_bunch(species[1],
data.train, data.test,
data.coverages, xgrid, ygrid)
# background points (grid coordinates) for evaluation
np.random.seed(13)
background_points = np.c_[np.random.randint(low=0, high=data.Ny,
size=10000),
np.random.randint(low=0, high=data.Nx,
size=10000)].T
# We'll make use of the fact that coverages[6] has measurements at all
# land points. This will help us decide between land and water.
land_reference = data.coverages[6]
# Fit, predict, and plot for each species.
for i, species in enumerate([BV_bunch, MM_bunch]):
print("_" * 80)
print("Modeling distribution of species '%s'" % species.name)
# Standardize features
mean = species.cov_train.mean(axis=0)
std = species.cov_train.std(axis=0)
train_cover_std = (species.cov_train - mean) / std
# Fit OneClassSVM
print(" - fit OneClassSVM ... ", end='')
clf = svm.OneClassSVM(nu=0.1, kernel="rbf", gamma=0.5)
clf.fit(train_cover_std)
print("done.")
# Plot map of South America
plt.subplot(1, 2, i + 1)
if basemap:
print(" - plot coastlines using basemap")
m = Basemap(projection='cyl', llcrnrlat=Y.min(),
urcrnrlat=Y.max(), llcrnrlon=X.min(),
urcrnrlon=X.max(), resolution='c')
m.drawcoastlines()
m.drawcountries()
else:
print(" - plot coastlines from coverage")
plt.contour(X, Y, land_reference,
levels=[-9999], colors="k",
linestyles="solid")
plt.xticks([])
plt.yticks([])
print(" - predict species distribution")
# Predict species distribution using the training data
Z = np.ones((data.Ny, data.Nx), dtype=np.float64)
# We'll predict only for the land points.
idx = np.where(land_reference > -9999)
coverages_land = data.coverages[:, idx[0], idx[1]].T
pred = clf.decision_function((coverages_land - mean) / std)[:, 0]
Z *= pred.min()
Z[idx[0], idx[1]] = pred
levels = np.linspace(Z.min(), Z.max(), 25)
Z[land_reference == -9999] = -9999
# plot contours of the prediction
plt.contourf(X, Y, Z, levels=levels, cmap=plt.cm.Reds)
plt.colorbar(format='%.2f')
# scatter training/testing points
plt.scatter(species.pts_train['dd long'], species.pts_train['dd lat'],
s=2 ** 2, c='black',
marker='^', label='train')
plt.scatter(species.pts_test['dd long'], species.pts_test['dd lat'],
s=2 ** 2, c='black',
marker='x', label='test')
plt.legend()
plt.title(species.name)
plt.axis('equal')
# Compute AUC with regards to background points
pred_background = Z[background_points[0], background_points[1]]
pred_test = clf.decision_function((species.cov_test - mean)
/ std)[:, 0]
scores = np.r_[pred_test, pred_background]
y = np.r_[np.ones(pred_test.shape), np.zeros(pred_background.shape)]
fpr, tpr, thresholds = metrics.roc_curve(y, scores)
roc_auc = metrics.auc(fpr, tpr)
plt.text(-35, -70, "AUC: %.3f" % roc_auc, ha="right")
print("\n Area under the ROC curve : %f" % roc_auc)
print("\ntime elapsed: %.2fs" % (time() - t0))
plot_species_distribution()
plt.show()
|
bsd-3-clause
|
jia-kai/hearv
|
riesz/libriesz/analyze.py
|
1
|
13254
|
# -*- coding: utf-8 -*-
# $File: analyze.py
# $Date: Wed Jan 07 01:30:31 2015 +0800
# $Author: jiakai <jia.kai66@gmail.com>
from .config import floatX
from .utils import plot_val_with_fft, get_env_config
import matplotlib.pyplot as plt
import numpy as np
from abc import ABCMeta, abstractmethod
import logging
logger = logging.getLogger(__name__)
class Motion1DAnalyserBase(object):
"""analyze local 1D motion"""
__metaclass__ = ABCMeta
start_idx = 0
"""actual index of first frame in pyr_list"""
ref = None
"""reference frame"""
pyr_list = None
"""current active frame"""
def __init__(self, pyr_list):
self.pyr_list = pyr_list
assert len(pyr_list) >= 2 and \
all(i.img_shape == pyr_list[0].img_shape for i in pyr_list)
self.ref = self.pyr_list[0]
self.__cached_motion = [None] * len(pyr_list)
@property
def nr_level(self):
return self.pyr_list[0].nr_level
@property
def nr_frame(self):
return len(self.pyr_list) + self.start_idx
@abstractmethod
def _local_motion_map(self, frame_idx, level):
""":param frame_idx: the actual idx in self.pyr_list"""
pass
def __call__(self, frame_idx, level):
"""get local motion map for every pixel at specific level and frame
index
:param frame: int, frame index
:param level: int, level index
:return: motion, amp"""
frame_idx -= self.start_idx
assert frame_idx >= 0 and frame_idx < len(self.pyr_list)
if self.__cached_motion[frame_idx]:
d = self.__cached_motion[frame_idx]
else:
d = self.__cached_motion[frame_idx] = dict()
v = d.get(level)
if v is None:
v = self._local_motion_map(frame_idx, level)
d[level] = v
motion, amp = v
assert motion.shape == amp.shape and motion.ndim == 2
assert np.min(motion) < 0 and np.min(amp) > 0
return motion, amp
def add_frame(self, new_pyr):
"""move forward by adding a new pyramid"""
assert type(new_pyr) is type(self.pyr_list[0])
del self.pyr_list[0]
del self.__cached_motion[0]
self.pyr_list.append(new_pyr)
self.__cached_motion.append(None)
self.start_idx += 1
def next_pow2(n):
v = 1
while v < n:
v *= 2
return v
def find_optimal_fft_size(val):
while True:
p = 2
v0 = val
while val > 1:
while val % p == 0:
val /= p
p += 1
if p - 1 <= 5:
return v0
val = v0 + 1
class AvgSpectrum(object):
EPS = np.finfo(floatX).tiny
_nr_adj_frame = None
_sample_rate = None
_target_duration = None
_last_level_nr_sample = None
vert_group_size = int(get_env_config('VERT_GROUP_SIZE', 150))
"""size of vertical group"""
vert_group_exp_decay = int(get_env_config('VERT_GROUP_EXP_DECAY', 0))
_real_sample_rate = None
"""actual sample rate of last level in the pyramid"""
def __init__(self, nr_adj_frame, sample_rate, target_duration=None):
""":param nr_adj_frame: number of adjacent frames to be used
:param sample_rate: sample rate for columns in one frame
:param target_duration: target duration for each frame"""
self._nr_adj_frame = int(nr_adj_frame)
self._sample_rate = float(sample_rate)
self._target_duration = target_duration
assert self._nr_adj_frame > 0
def __call__(self, motion_ana, frame_idx):
# nr_sample in last level
ll_nr_sample = motion_ana(
frame_idx, motion_ana.nr_level - 1)[0].shape[1]
sample_rate = self._sample_rate / (2 ** (motion_ana.nr_level - 1))
if self._real_sample_rate is None:
self._real_sample_rate = sample_rate
assert self._real_sample_rate == sample_rate
if self._target_duration:
tgt_ll_nr_sample = int(float(self._target_duration) * sample_rate)
else:
tgt_ll_nr_sample = ll_nr_sample
assert tgt_ll_nr_sample >= ll_nr_sample
if self._last_level_nr_sample is None:
if False:
padded_width = next_pow2(tgt_ll_nr_sample * self._nr_adj_frame)
if padded_width / 2 >= ll_nr_sample * self._nr_adj_frame:
padded_width /= 2
if self._last_level_nr_sample is None:
self._last_level_nr_sample = padded_width
assert self._last_level_nr_sample == padded_width
else:
padded_width = find_optimal_fft_size(
tgt_ll_nr_sample * self._nr_adj_frame)
self._last_level_nr_sample = padded_width
nopadding_width = ll_nr_sample * self._nr_adj_frame
logger.info('padding ratio: {}/{}={}'.format(
padded_width, nopadding_width,
float(padded_width) / nopadding_width))
padded_width = self._last_level_nr_sample
padded_width *= 2 ** (motion_ana.nr_level - 1)
spec_sum = None
weight_sum = None
vg_orig = self.vert_group_size
for level in range(motion_ana.nr_level):
frames = [motion_ana(frame_idx + i, level)
for i in range(self._nr_adj_frame)]
for spec, weight in self._analyze_one_scale(frames, padded_width):
weight = float(weight)
spec *= weight
if spec_sum is None:
spec_sum = spec.copy()
weight_sum = weight
else:
spec_sum += spec
weight_sum += weight
padded_width /= 2
if self.vert_group_exp_decay:
self.vert_group_size /= 2
self.vert_group_size = vg_orig
amp = spec_sum / weight_sum
assert len(amp) == self._last_level_nr_sample / 2
freq = np.arange(len(amp), dtype=floatX)
freq *= self._real_sample_rate / (len(amp) * 2)
return amp, freq
def _analyze_one_scale(self, frames, nr_sample):
""":return: spectrum, weight"""
motion = [i[0] for i in frames]
amps = [np.square(i[1]) for i in frames]
signal = np.empty(shape=nr_sample, dtype=floatX)
window = None
for y in range(0, amps[0].shape[0], self.vert_group_size):
all_weight = []
signal.fill(0)
x0 = 0
for fidx in range(len(frames)):
cur_amps = amps[fidx][y:y+self.vert_group_size]
amps_sum = np.sum(cur_amps, axis=0)
assert amps_sum.ndim == 1
all_weight.append(np.sum(amps_sum) / cur_amps.size)
x1 = x0 + amps_sum.size
cur_motion = np.sum(motion[fidx][y:y+self.vert_group_size] *
cur_amps, axis=0) / (amps_sum + self.EPS)
signal[x0:x1] = cur_motion
x0 = x1
#plot_val_with_fft(cur_motion, self._sample_rate, show=False)
if window is None:
window = RawSpectrogram.make_window(x0)
signal[:x0] *= window
#plot_val_with_fft(signal, self._sample_rate)
cur_weight = np.mean(all_weight)
fft_amp = np.abs(np.fft.fft(signal)[:self._last_level_nr_sample/2])
yield fft_amp, cur_weight
class RawSpectrogram(object):
_window = None
_step = None
def __init__(self, length, step, win_type='hamming'):
step = int(step)
assert step > 0
self._step = step
self._window = self.make_window(length, win_type)
@classmethod
def make_window(cls, length, win_type='hamming'):
length = int(length)
if win_type == 'hamming':
a = 0.54
b = -0.46
else:
assert win_type == 'hanning'
a = 0.5
b = -0.5
x = np.arange(length, dtype=floatX) * 2 + 1
x *= np.pi / length
w = a + np.cos(x) * b
w /= np.sqrt(4 * a * a + 2 * b * b)
if length % 4 == 0:
# check for normalization
step = length / 4
for i in range(length):
p = i
while p >= step:
p -= step
s = 0
while p < length:
s += w[p] * w[p]
p += step
assert abs(s - 1) < 1e-5
else:
logger.warn(
'\n\n!!!length not multiple of 4 for scaled window, '
'do not use recon')
return w
def __call__(self, signal, nr_time=None):
"""return: 2D matrix indexed by (time, frequency)"""
assert signal.ndim == 1 and signal.size >= self._window.size
if nr_time is None:
nr_time = (signal.size - self._window.size) / self._step + 1
result = np.empty((nr_time, self._window.size), dtype=floatX)
pos = 0
for i in range(nr_time):
p = i * self._step
sub = signal[p:p+self._window.size]
result[i] = np.abs(np.fft.fft(sub * self._window))
return result
@property
def win_size(self):
return self._window.size
class AdjSTFT(object):
"""short-time fourier transform for adjacent frams"""
EPS = np.finfo(floatX).tiny
_sample_rate = None
_step_factor = None
"""step size relative to window length"""
_nr_spec_per_frame = None
"""number of time points for spectrogram for each frame"""
_get_spectrogram = None
"""list of :class:`RawSpectrogram` objects to calculate spectrogram for
each scale"""
_ll_nr_sample = None
"""number of samples in last level"""
_ll_sample_rate = None
"""sample rate in last level"""
vert_group_size = 6
"""size of vertical group"""
def __init__(self, sample_rate, target_duration):
""":param sample_rate: sample rate for level0
:param target_duration: duration for each frame"""
self._sample_rate = float(sample_rate)
self._step_factor = 0.25 / (target_duration * self._sample_rate)
def __call__(self, motion_ana, frame_idx):
if self._get_spectrogram is None:
self._get_spectrogram = []
self._step_factor *= motion_ana(frame_idx, 0)[0].shape[0]
self._nr_spec_per_frame = int(1 / self._step_factor)
logger.info('stretch={} nr_spec_per_frame={}'.format(
0.25 / self._step_factor, self._nr_spec_per_frame))
for level in range(motion_ana.nr_level):
win_size = motion_ana(frame_idx, level)[0].shape[1]
step = int(win_size * self._step_factor)
logger.info('level {}: spectrogram '
'win_size={} step={}'.format(level, win_size, step))
assert step * self._nr_spec_per_frame <= win_size
self._get_spectrogram.append(RawSpectrogram(win_size, step))
self._ll_nr_sample = self._get_spectrogram[-1].win_size
self._ll_sample_rate = self._sample_rate / (
2 ** (motion_ana.nr_level - 1))
logger.info('usable sample rate: {}'.format(self._ll_sample_rate))
spec_sum = None
weight_sum = None
for level in range(motion_ana.nr_level):
frames = [motion_ana(frame_idx + i, level) for i in range(2)]
for spec, weight in self._analyze_one_scale(
frames, self._get_spectrogram[level]):
weight = float(weight)
spec *= weight
if spec_sum is None:
spec_sum = spec.copy()
weight_sum = weight
else:
spec_sum += spec
weight_sum += weight
spec_sum /= weight_sum
N = self._ll_nr_sample / 2
assert spec_sum.shape[1] == N
freq = np.arange(N, dtype=floatX)
freq *= self._ll_sample_rate / (N * 2)
return spec_sum, freq
def _analyze_one_scale(self, frames, spectrogram):
assert len(frames) == 2
motion = [i[0] for i in frames]
amps = [np.square(i[1]) for i in frames]
signal = np.empty(shape=spectrogram.win_size*2, dtype=floatX)
for y in range(0, amps[0].shape[0], self.vert_group_size):
all_weight = []
signal.fill(0)
x0 = 0
for fidx in range(len(frames)):
cur_amps = amps[fidx][y:y+self.vert_group_size]
amps_sum = np.sum(cur_amps, axis=0)
assert amps_sum.ndim == 1
all_weight.append(np.sum(amps_sum) / cur_amps.size)
x1 = x0 + amps_sum.size
cur_motion = np.sum(motion[fidx][y:y+self.vert_group_size] *
cur_amps, axis=0) / (amps_sum + self.EPS)
signal[x0:x1] = cur_motion
x0 = x1
cur_weight = np.mean(all_weight)
cur_spec = spectrogram(signal, nr_time=self._nr_spec_per_frame)
yield cur_spec[:, :self._ll_nr_sample/2], cur_weight
|
unlicense
|
reuk/wayverb
|
scripts/python/iterative_tetrahedral.py
|
2
|
4819
|
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
import numpy as np
from numpy import sqrt
import operator
BASIC_CUBE = [(0, 0, 0), # 0
(0.5, 0, 0.5), # 1
(0.25, 0.25, 0.25), # 2
(0.75, 0.25, 0.75), # 3
(0, 0.5, 0.5), # 4
(0.5, 0.5, 0), # 5
(0.25, 0.75, 0.75), # 6
(0.75, 0.75, 0.25)] # 7
def get_neighbor_offset_table():
ret = [[((0, 0, 0), 2), ((-1, 0, -1), 3), ((-1, -1, 0), 6), ((0, -1, -1), 7)],
[((0, 0, 0), 2), ((0, 0, 0), 3), ((0, -1, 0), 6), ((0, -1, 0), 7)],
[((0, 0, 0), 0), ((0, 0, 0), 1), ((0, 0, 0), 4), ((0, 0, 0), 5)],
[((1, 0, 1), 0), ((0, 0, 0), 1), ((0, 0, 1), 4), ((1, 0, 0), 5)],
[((0, 0, 0), 2), ((0, 0, -1), 3), ((0, 0, 0), 6), ((0, 0, -1), 7)],
[((0, 0, 0), 2), ((-1, 0, 0), 3), ((-1, 0, 0), 6), ((0, 0, 0), 7)],
[((1, 1, 0), 0), ((0, 1, 0), 1), ((0, 0, 0), 4), ((1, 0, 0), 5)],
[((0, 1, 1), 0), ((0, 1, 0), 1), ((0, 0, 1), 4), ((0, 0, 0), 5)]]
return map(lambda j: map(lambda i: Locator(*i), j), ret)
def mul((x, y, z), d):
return (x * d, y * d, z * d)
def add(a, b):
return map(lambda (x, y): x + y, zip(a, b))
def node_cube(spacing):
return map(lambda i: mul(i, spacing), BASIC_CUBE)
def get_mesh((x, y, z), spacing):
c = []
for i in range(x):
xo = i * spacing
for j in range(y):
yo = j * spacing
for k in range(z):
zo = k * spacing
nodes = node_cube(spacing)
nodes = map(lambda i: add(i, (xo, yo, zo)), nodes)
c += nodes
return c
class Locator:
def __init__(self, pos, mod_ind):
self.pos = pos
self.mod_ind = mod_ind
class WaveguideMesh:
def __init__(self, dim, spacing):
self.mesh = get_mesh(dim, spacing)
self.dim = dim
self.offsets = get_neighbor_offset_table()
def get_index(self, locator):
i, j, k = self.dim
x, y, z = locator.pos
l = len(BASIC_CUBE)
return locator.mod_ind + x * l + y * i * l + z * i * j * l
def get_locator(self, index):
i, j, k = self.dim
mod_ind = index % len(BASIC_CUBE)
index -= mod_ind
index /= len(BASIC_CUBE)
x = index % i
index -= x
index /= i
y = index % j
index -= y
index /= j
z = index % k
index -= z
index /= k
return Locator((x, y, z), mod_ind)
def locator_filter(self, c, relative):
x, y, z = self.dim
rlx, rly, rlz = add(c.pos, relative.pos)
return 0 <= rlx < x and 0 <= rly < y and 0 <= rlz < z
def get_absolute_neighbors(self, index):
locator = self.get_locator(index)
x, y, z = locator.pos
mod_ind = locator.mod_ind
relative = self.offsets[mod_ind]
ret = []
for i in relative:
summed = add(locator.pos, i.pos)
sx, sy, sz = summed
is_neighbor = (0 <= summed[0] < self.dim[0] and
0 <= summed[1] < self.dim[1] and
0 <= summed[2] < self.dim[2])
ind = self.get_index(Locator(summed, i.mod_ind)) if is_neighbor else -1;
ret.append(ind)
return ret
def concat(l):
return reduce(operator.add, l)
def main():
waveguide = WaveguideMesh((2, 2, 2), 1)
x, y, z = map(lambda i: np.array(i), zip(*waveguide.mesh))
max_range = np.array([x.max() - x.min(), y.max() - y.min(), z.max() - z.min()]).max() / 2.0
mean_x = x.mean()
mean_y = y.mean()
mean_z = z.mean()
fig = plt.figure()
for plot in range(8):
ax = fig.add_subplot(331 + plot, projection='3d', aspect='equal')
pos = waveguide.get_index(Locator((0, 0, 0), plot))
n = waveguide.get_absolute_neighbors(pos)
n = filter(lambda i: i >= 0, n)
p = []
p += [waveguide.mesh[i] for i in n]
p += [waveguide.mesh[pos]]
print plot, p
ax.scatter(*zip(*p))
ax.set_xlim(mean_x - max_range, mean_x + max_range)
ax.set_ylim(mean_y - max_range, mean_y + max_range)
ax.set_zlim(mean_z - max_range, mean_z + max_range)
ax.set_xlabel("x")
ax.set_ylabel("y")
ax.set_zlabel("z")
ax = fig.add_subplot(339, projection='3d', aspect='equal')
ax.scatter(*zip(*waveguide.mesh))
ax.set_xlim(mean_x - max_range, mean_x + max_range)
ax.set_ylim(mean_y - max_range, mean_y + max_range)
ax.set_zlim(mean_z - max_range, mean_z + max_range)
ax.set_xlabel("x")
ax.set_ylabel("y")
ax.set_zlabel("z")
plt.show()
if __name__ == "__main__":
main()
|
gpl-2.0
|
dongjoon-hyun/spark
|
python/run-tests.py
|
15
|
13614
|
#!/usr/bin/env python3
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import logging
from argparse import ArgumentParser
import os
import re
import shutil
import subprocess
import sys
import tempfile
from threading import Thread, Lock
import time
import uuid
import queue as Queue
from multiprocessing import Manager
# Append `SPARK_HOME/dev` to the Python path so that we can import the sparktestsupport module
sys.path.append(os.path.join(os.path.dirname(os.path.realpath(__file__)), "../dev/"))
from sparktestsupport import SPARK_HOME # noqa (suppress pep8 warnings)
from sparktestsupport.shellutils import which, subprocess_check_output # noqa
from sparktestsupport.modules import all_modules, pyspark_sql # noqa
python_modules = dict((m.name, m) for m in all_modules if m.python_test_goals if m.name != 'root')
def print_red(text):
print('\033[31m' + text + '\033[0m')
SKIPPED_TESTS = None
LOG_FILE = os.path.join(SPARK_HOME, "python/unit-tests.log")
FAILURE_REPORTING_LOCK = Lock()
LOGGER = logging.getLogger()
# Find out where the assembly jars are located.
# TODO: revisit for Scala 2.13
for scala in ["2.12"]:
build_dir = os.path.join(SPARK_HOME, "assembly", "target", "scala-" + scala)
if os.path.isdir(build_dir):
SPARK_DIST_CLASSPATH = os.path.join(build_dir, "jars", "*")
break
else:
raise RuntimeError("Cannot find assembly build directory, please build Spark first.")
def run_individual_python_test(target_dir, test_name, pyspark_python):
env = dict(os.environ)
env.update({
'SPARK_DIST_CLASSPATH': SPARK_DIST_CLASSPATH,
'SPARK_TESTING': '1',
'SPARK_PREPEND_CLASSES': '1',
'PYSPARK_PYTHON': which(pyspark_python),
'PYSPARK_DRIVER_PYTHON': which(pyspark_python),
# Preserve legacy nested timezone behavior for pyarrow>=2, remove after SPARK-32285
'PYARROW_IGNORE_TIMEZONE': '1',
})
# Create a unique temp directory under 'target/' for each run. The TMPDIR variable is
# recognized by the tempfile module to override the default system temp directory.
tmp_dir = os.path.join(target_dir, str(uuid.uuid4()))
while os.path.isdir(tmp_dir):
tmp_dir = os.path.join(target_dir, str(uuid.uuid4()))
os.mkdir(tmp_dir)
env["TMPDIR"] = tmp_dir
metastore_dir = os.path.join(tmp_dir, str(uuid.uuid4()))
while os.path.isdir(metastore_dir):
metastore_dir = os.path.join(metastore_dir, str(uuid.uuid4()))
os.mkdir(metastore_dir)
# Also override the JVM's temp directory by setting driver and executor options.
java_options = "-Djava.io.tmpdir={0} -Dio.netty.tryReflectionSetAccessible=true".format(tmp_dir)
spark_args = [
"--conf", "spark.driver.extraJavaOptions='{0}'".format(java_options),
"--conf", "spark.executor.extraJavaOptions='{0}'".format(java_options),
"--conf", "spark.sql.warehouse.dir='{0}'".format(metastore_dir),
"pyspark-shell"
]
env["PYSPARK_SUBMIT_ARGS"] = " ".join(spark_args)
LOGGER.info("Starting test(%s): %s", pyspark_python, test_name)
start_time = time.time()
try:
per_test_output = tempfile.TemporaryFile()
retcode = subprocess.Popen(
[os.path.join(SPARK_HOME, "bin/pyspark")] + test_name.split(),
stderr=per_test_output, stdout=per_test_output, env=env).wait()
shutil.rmtree(tmp_dir, ignore_errors=True)
except:
LOGGER.exception("Got exception while running %s with %s", test_name, pyspark_python)
# Here, we use os._exit() instead of sys.exit() in order to force Python to exit even if
# this code is invoked from a thread other than the main thread.
os._exit(1)
duration = time.time() - start_time
# Exit on the first failure.
if retcode != 0:
try:
with FAILURE_REPORTING_LOCK:
with open(LOG_FILE, 'ab') as log_file:
per_test_output.seek(0)
log_file.writelines(per_test_output)
per_test_output.seek(0)
for line in per_test_output:
decoded_line = line.decode("utf-8", "replace")
if not re.match('[0-9]+', decoded_line):
print(decoded_line, end='')
per_test_output.close()
except:
LOGGER.exception("Got an exception while trying to print failed test output")
finally:
print_red("\nHad test failures in %s with %s; see logs." % (test_name, pyspark_python))
# Here, we use os._exit() instead of sys.exit() in order to force Python to exit even if
# this code is invoked from a thread other than the main thread.
os._exit(-1)
else:
skipped_counts = 0
try:
per_test_output.seek(0)
# Here expects skipped test output from unittest when verbosity level is
# 2 (or --verbose option is enabled).
decoded_lines = map(lambda line: line.decode("utf-8", "replace"), iter(per_test_output))
skipped_tests = list(filter(
lambda line: re.search(r'test_.* \(pyspark\..*\) ... (skip|SKIP)', line),
decoded_lines))
skipped_counts = len(skipped_tests)
if skipped_counts > 0:
key = (pyspark_python, test_name)
assert SKIPPED_TESTS is not None
SKIPPED_TESTS[key] = skipped_tests
per_test_output.close()
except:
import traceback
print_red("\nGot an exception while trying to store "
"skipped test output:\n%s" % traceback.format_exc())
# Here, we use os._exit() instead of sys.exit() in order to force Python to exit even if
# this code is invoked from a thread other than the main thread.
os._exit(-1)
if skipped_counts != 0:
LOGGER.info(
"Finished test(%s): %s (%is) ... %s tests were skipped", pyspark_python, test_name,
duration, skipped_counts)
else:
LOGGER.info(
"Finished test(%s): %s (%is)", pyspark_python, test_name, duration)
def get_default_python_executables():
python_execs = [x for x in ["python3.6", "pypy3"] if which(x)]
if "python3.6" not in python_execs:
p = which("python3")
if not p:
LOGGER.error("No python3 executable found. Exiting!")
os._exit(1)
else:
python_execs.insert(0, p)
return python_execs
def parse_opts():
parser = ArgumentParser(
prog="run-tests"
)
parser.add_argument(
"--python-executables", type=str, default=','.join(get_default_python_executables()),
help="A comma-separated list of Python executables to test against (default: %(default)s)"
)
parser.add_argument(
"--modules", type=str,
default=",".join(sorted(python_modules.keys())),
help="A comma-separated list of Python modules to test (default: %(default)s)"
)
parser.add_argument(
"-p", "--parallelism", type=int, default=4,
help="The number of suites to test in parallel (default %(default)d)"
)
parser.add_argument(
"--verbose", action="store_true",
help="Enable additional debug logging"
)
group = parser.add_argument_group("Developer Options")
group.add_argument(
"--testnames", type=str,
default=None,
help=(
"A comma-separated list of specific modules, classes and functions of doctest "
"or unittest to test. "
"For example, 'pyspark.sql.foo' to run the module as unittests or doctests, "
"'pyspark.sql.tests FooTests' to run the specific class of unittests, "
"'pyspark.sql.tests FooTests.test_foo' to run the specific unittest in the class. "
"'--modules' option is ignored if they are given.")
)
args, unknown = parser.parse_known_args()
if unknown:
parser.error("Unsupported arguments: %s" % ' '.join(unknown))
if args.parallelism < 1:
parser.error("Parallelism cannot be less than 1")
return args
def _check_coverage(python_exec):
# Make sure if coverage is installed.
try:
subprocess_check_output(
[python_exec, "-c", "import coverage"],
stderr=open(os.devnull, 'w'))
except:
print_red("Coverage is not installed in Python executable '%s' "
"but 'COVERAGE_PROCESS_START' environment variable is set, "
"exiting." % python_exec)
sys.exit(-1)
def main():
opts = parse_opts()
if opts.verbose:
log_level = logging.DEBUG
else:
log_level = logging.INFO
should_test_modules = opts.testnames is None
logging.basicConfig(stream=sys.stdout, level=log_level, format="%(message)s")
LOGGER.info("Running PySpark tests. Output is in %s", LOG_FILE)
if os.path.exists(LOG_FILE):
os.remove(LOG_FILE)
python_execs = opts.python_executables.split(',')
LOGGER.info("Will test against the following Python executables: %s", python_execs)
if should_test_modules:
modules_to_test = []
for module_name in opts.modules.split(','):
if module_name in python_modules:
modules_to_test.append(python_modules[module_name])
else:
print("Error: unrecognized module '%s'. Supported modules: %s" %
(module_name, ", ".join(python_modules)))
sys.exit(-1)
LOGGER.info("Will test the following Python modules: %s", [x.name for x in modules_to_test])
else:
testnames_to_test = opts.testnames.split(',')
LOGGER.info("Will test the following Python tests: %s", testnames_to_test)
task_queue = Queue.PriorityQueue()
for python_exec in python_execs:
# Check if the python executable has coverage installed when 'COVERAGE_PROCESS_START'
# environmental variable is set.
if "COVERAGE_PROCESS_START" in os.environ:
_check_coverage(python_exec)
python_implementation = subprocess_check_output(
[python_exec, "-c", "import platform; print(platform.python_implementation())"],
universal_newlines=True).strip()
LOGGER.info("%s python_implementation is %s", python_exec, python_implementation)
LOGGER.info("%s version is: %s", python_exec, subprocess_check_output(
[python_exec, "--version"], stderr=subprocess.STDOUT, universal_newlines=True).strip())
if should_test_modules:
for module in modules_to_test:
if python_implementation not in module.excluded_python_implementations:
for test_goal in module.python_test_goals:
heavy_tests = ['pyspark.streaming.tests', 'pyspark.mllib.tests',
'pyspark.tests', 'pyspark.sql.tests', 'pyspark.ml.tests',
'pyspark.pandas.tests']
if any(map(lambda prefix: test_goal.startswith(prefix), heavy_tests)):
priority = 0
else:
priority = 100
task_queue.put((priority, (python_exec, test_goal)))
else:
for test_goal in testnames_to_test:
task_queue.put((0, (python_exec, test_goal)))
# Create the target directory before starting tasks to avoid races.
target_dir = os.path.abspath(os.path.join(os.path.dirname(__file__), 'target'))
if not os.path.isdir(target_dir):
os.mkdir(target_dir)
def process_queue(task_queue):
while True:
try:
(priority, (python_exec, test_goal)) = task_queue.get_nowait()
except Queue.Empty:
break
try:
run_individual_python_test(target_dir, test_goal, python_exec)
finally:
task_queue.task_done()
start_time = time.time()
for _ in range(opts.parallelism):
worker = Thread(target=process_queue, args=(task_queue,))
worker.daemon = True
worker.start()
try:
task_queue.join()
except (KeyboardInterrupt, SystemExit):
print_red("Exiting due to interrupt")
sys.exit(-1)
total_duration = time.time() - start_time
LOGGER.info("Tests passed in %i seconds", total_duration)
for key, lines in sorted(SKIPPED_TESTS.items()):
pyspark_python, test_name = key
LOGGER.info("\nSkipped tests in %s with %s:" % (test_name, pyspark_python))
for line in lines:
LOGGER.info(" %s" % line.rstrip())
if __name__ == "__main__":
SKIPPED_TESTS = Manager().dict()
main()
|
apache-2.0
|
dimkarakostas/rupture
|
etc/theory/experiments/rupture_performance/plot.py
|
4
|
2506
|
import matplotlib.pyplot as plt
from collections import OrderedDict
'''
# Divide&conquer adaptive (keeping only the last 2 known chars) on ruptureit, try 1
seconds = OrderedDict([
('aes128cbc', [0, 11, 8, 5, 6, 6, 11]), # 47
('aes128gcm', [0, 6, 8, 6, 5, 6, 7]), # 38
('aes256cbc', [0, 7, 7, 5, 6, 6, 9]), # 40
('aes256gcm', [0, 10, 8, 6, 8, 9, 7]) # 48
])
title = 'Rupture divide&conquer against block ciphers'
filename = 'div_1.png'
'''
'''
# Divide&conquer adaptive (keeping only the last 2 known chars) on ruptureit, try 2
seconds = OrderedDict([
('aes128cbc', [0, 10, 11, 4, 6, 10, 17]), # 58
('aes128gcm', [0, 7, 6, 5, 6, 7, 6]), # 37
('aes256cbc', [0, 8, 7, 20, 7, 14, 14]), # 70
('aes256gcm', [0, 8, 6, 6, 5, 7, 7]), # 39
])
title = 'Rupture divide&conquer against block ciphers'
filename = 'div_2.png'
'''
'''
# Divide&conquer adaptive (keeping only the last 2 known chars) on ruptureit, try 3
seconds = OrderedDict([
('aes128cbc', [0, 9, 7, 5, 8, 7, 8]), # 44
('aes128gcm', [0, 7, 9, 5, 6, 6, 7]), # 40
('aes256cbc', [0, 15, 7, 8, 8, 8, 14]), # 60
('aes256gcm', [0, 10, 6, 6, 5, 7, 6]), # 40
])
title = 'Rupture divide&conquer against block ciphers'
filename = 'div_3.png'
'''
# Serial adaptive (keeping only the last 2 known chars) on ruptureit
seconds = OrderedDict([
('aes128cbc', [0, 18, 16, 17, 17, 18, 17, 18, 18, 18, 17, 16, 20, 18, 33, 37, 17, 16, 16, 15, 16, 17, 19, 51]), # 465
('aes128gcm', [0, 19, 20, 19, 18, 17, 20, 19, 17, 16, 19, 16, 17, 17, 17, 19, 17, 17, 19, 18, 22, 17, 17, 20]), # 417
('aes256cbc', [0, 22, 18, 21, 19, 18, 37, 18, 19, 20, 19, 17, 19, 36, 18, 16, 18, 19, 18, 34, 18, 18, 18, 19]), # 479
('aes256gcm', [0, 18, 18, 21, 18, 21, 20, 18, 20, 22, 20, 18, 19, 16, 17, 18, 15, 15, 18, 17, 17, 16, 16, 18]) # 416
])
title = 'Rupture serial against block ciphers'
filename = 'rupture_serial_performance.png'
letters = [i for i in range(len(seconds['aes128cbc']))]
aggregated_seconds = OrderedDict()
for ciph, timings in seconds.items():
aggregated_seconds[ciph] = []
prev = 0
for t in timings:
aggregated_seconds[ciph].append(prev+t)
prev += t
font = {
'size': 12
}
plt.rc('font', **font)
plt.title(title, y=1.01)
plt.ylabel('Decrypted characters')
plt.xlabel('Time (sec)')
for i in aggregated_seconds:
plt.plot(aggregated_seconds[i], letters)
plt.legend([i for i in aggregated_seconds])
plt.ylim(ymin=0)
plt.xlim(xmin=0)
plt.savefig(filename)
|
mit
|
expectocode/telegram-analysis
|
venn_userlist.py
|
2
|
3908
|
#!/usr/bin/env python3
"""
A program to plot the overlap of chats
"""
import argparse
from json import loads
import matplotlib.pyplot as plt
from matplotlib_venn import venn2, venn3
def main():
"""
main function
"""
parser = argparse.ArgumentParser(
description="Visualise the overlap between 2 or 3 chats. Note that for 3 chats, perfect geometry may be impossible.")
parser.add_argument(
'-f','--file',
help='paths to the json userlist',
required = True)
parser.add_argument(
'-c','--chat_names',
help="Names (or part of names) of the chats you're interested in, case insensitive",
nargs='+',
required = True)
parser.add_argument('-o', '--output-folder',
help='the folder to save the graph image in')
args = parser.parse_args()
filepath = args.file
names = args.chat_names
savefolder = args.output_folder
full_names = []
userlists = [[] for name in names]
j = loads(open(filepath,'r').read())
list_of_chats = [j[x] for x in j]
titles = [x['title'] for x in list_of_chats]
#this code works but doesn't sort j by chat title length, which is important due to the user title search thing
#for index,name in enumerate(names):
# found_chat = False
# for chat_id in j:
# if name in j[chat_id]['title'] and len(list(j[chat_id]['users'])) > 0:
# #because some of the memberlist things come to zero
# #print(j[chat_id]['users'])
# full_names.append(j[chat_id]['title'])
# userlists[index].extend( [user['id'] for user in j[chat_id]['users']] )
# found_chat = True
#
# if not found_chat:
# print("Could not find result for", name)
# exit()
#magic
# [x[1]['title'] for x in sorted(list(j.items()), key=lambda a: len(a[1]['title']))]
sorted_j = [ x for x in sorted(list(j.items()), key=lambda a: len(a[1]['title'])) if(len(x[1]['users']) > 0) ]
#this makes the JSON input into a list of tuples (chat_id, info_dict) and also removes empty chats
#the importance of sorting is so that search strings are first tested on the smaller titles
#eg searching 'GNU/Linux' should yield 'GNU/Linux' before 'GNU/Linux Chat' (real example)
for index,name in enumerate(names):
found_chat = False
for chat in sorted_j:
#lowercase because case sensitivity annoyed me
if name.lower() in chat[1]['title'].lower() and len(chat[1]['users']) > 0 and (not found_chat):
#because some of the memberlist things come to zero
#print(j[chat_id]['users'])
full_names.append(chat[1]['title'])
userlists[index].extend( [user['id'] for user in chat[1]['users']] )
found_chat = True
if not found_chat:
print("Could not find result for", name)
exit()
if len(userlists) == 2:
venn2([set(userlists[0]), set(userlists[1])],(full_names[0], full_names[1]))
elif len(userlists) == 3:
venn3([set(userlists[0]), set(userlists[1]), set(userlists[2])],(full_names[0], full_names[1], full_names[2]))
#print(users)
if savefolder is not None:
#if there is a given folder to save the figure in, save it there
names_string = '_'.join(full_names)
if len(names_string) > 200:
#file name likely to be so long as to cause issues
figname = input(
"This diagram is going to have a very long file name. Please enter a custom name(no need to add an extension): ")
else:
figname = 'User overlap in {}'.format(names_string).replace('/','_')
plt.savefig("{}/{}.png".format(savefolder, figname))
else:
plt.show()
if __name__ == "__main__":
main()
|
mit
|
namvo88/Thesis-Quadrotor-Code
|
sw/tools/calibration/calibrate_gyro.py
|
87
|
4686
|
#! /usr/bin/env python
# Copyright (C) 2010 Antoine Drouin
#
# This file is part of Paparazzi.
#
# Paparazzi is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2, or (at your option)
# any later version.
#
# Paparazzi is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Paparazzi; see the file COPYING. If not, write to
# the Free Software Foundation, 59 Temple Place - Suite 330,
# Boston, MA 02111-1307, USA.
#
#
# calibrate gyrometers using turntable measurements
#
from __future__ import print_function, division
from optparse import OptionParser
import os
import sys
from scipy import linspace, polyval, stats
import matplotlib.pyplot as plt
import calibration_utils
#
# lisa 3
# p : a=-4511.16 b=31948.34, std error= 0.603
# q : a=-4598.46 b=31834.48, std error= 0.734
# r : a=-4525.63 b=32687.95, std error= 0.624
#
# lisa 4
# p : a=-4492.05 b=32684.94, std error= 0.600
# q : a=-4369.63 b=33260.96, std error= 0.710
# r : a=-4577.13 b=32707.72, std error= 0.730
#
# crista
# p : a= 3864.82 b=31288.09, std error= 0.866
# q : a= 3793.71 b=32593.89, std error= 3.070
# r : a= 3817.11 b=32709.70, std error= 3.296
#
def main():
usage = "usage: %prog --id <ac_id> --tt_id <tt_id> --axis <axis> [options] log_filename.data" + "\n" + "Run %prog --help to list the options."
parser = OptionParser(usage)
parser.add_option("-i", "--id", dest="ac_id",
action="store", type=int, default=-1,
help="aircraft id to use")
parser.add_option("-t", "--tt_id", dest="tt_id",
action="store", type=int, default=-1,
help="turntable id to use")
parser.add_option("-a", "--axis", dest="axis",
type="choice", choices=['p', 'q', 'r'],
help="axis to calibrate (p, q, r)",
action="store")
parser.add_option("-v", "--verbose",
action="store_true", dest="verbose")
(options, args) = parser.parse_args()
if len(args) != 1:
parser.error("incorrect number of arguments")
else:
if os.path.isfile(args[0]):
filename = args[0]
else:
print(args[0] + " not found")
sys.exit(1)
if not filename.endswith(".data"):
parser.error("Please specify a *.data log file")
if options.ac_id < 0 or options.ac_id > 255:
parser.error("Specify a valid aircraft id number!")
if options.tt_id < 0 or options.tt_id > 255:
parser.error("Specify a valid turntable id number!")
if options.verbose:
print("reading file "+filename+" for aircraft "+str(options.ac_id)+" and turntable "+str(options.tt_id))
samples = calibration_utils.read_turntable_log(options.ac_id, options.tt_id, filename, 1, 7)
if len(samples) == 0:
print("Error: found zero matching messages in log file!")
print("Was looking for IMU_TURNTABLE from id: "+str(options.tt_id)+" and IMU_GYRO_RAW from id: "+str(options.ac_id)+" in file "+filename)
sys.exit(1)
if options.verbose:
print("found "+str(len(samples))+" records")
if options.axis == 'p':
axis_idx = 1
elif options.axis == 'q':
axis_idx = 2
elif options.axis == 'r':
axis_idx = 3
else:
parser.error("Specify a valid axis!")
#Linear regression using stats.linregress
t = samples[:, 0]
xn = samples[:, axis_idx]
(a_s, b_s, r, tt, stderr) = stats.linregress(t, xn)
print('Linear regression using stats.linregress')
print(('regression: a=%.2f b=%.2f, std error= %.3f' % (a_s, b_s, stderr)))
print(('<define name="GYRO_X_NEUTRAL" value="%d"/>' % (b_s)))
print(('<define name="GYRO_X_SENS" value="%f" integer="16"/>' % (pow(2, 12)/a_s)))
#
# overlay fited value
#
ovl_omega = linspace(1, 7.5, 10)
ovl_adc = polyval([a_s, b_s], ovl_omega)
plt.title('Linear Regression Example')
plt.subplot(3, 1, 1)
plt.plot(samples[:, 1])
plt.plot(samples[:, 2])
plt.plot(samples[:, 3])
plt.legend(['p', 'q', 'r'])
plt.subplot(3, 1, 2)
plt.plot(samples[:, 0])
plt.subplot(3, 1, 3)
plt.plot(samples[:, 0], samples[:, axis_idx], 'b.')
plt.plot(ovl_omega, ovl_adc, 'r')
plt.show()
if __name__ == "__main__":
main()
|
gpl-2.0
|
tonyqtian/quora-simi
|
test/scoreAvg.py
|
1
|
1458
|
'''
Created on Jun 6, 2017
@author: tonyq
'''
from pandas.io.parsers import read_csv
from pandas.core.frame import DataFrame
from time import strftime
timestr = strftime("%Y%m%d-%H%M%S-")
base = '../output/candi/'
file_list = '20170604-165432-XGB_leaky.clean.csv,' + \
'20170605-112337-XGB_leaky.csv,' + \
'20170605-231748-0.1622_lstm_225_120_0.40_0.26.csv,' + \
'20170606-181044-XGB_leaky.csv,' + \
'20170606-153627-0.1647_lstm_176_147_0.22_0.23.csv,' + \
'20170606-183958-0.1693_lstm_191_110_0.23_0.17.csv,' + \
'20170606-193222-0.1651_lstm_231_100_0.36_0.32.csv,' + \
'20170606-203300-0.1608_lstm_241_122_0.49_0.30.csv,' + \
'20170606-220331-0.1654_lstm_264_107_0.57_0.35.csv,' + \
'20170606-162818-0.1763_lstm_221_135_0.30_0.25.csv'
test_ids = []
is_duplicate = []
for filename in file_list.split(','):
print('Processing file ', filename.strip())
df = read_csv(base + filename.strip())
if len(test_ids) == 0:
test_ids = df.test_id
if len(is_duplicate) == 0:
is_duplicate = df.is_duplicate
else:
is_duplicate += df.is_duplicate
is_duplicate /= len(file_list.split(','))
print('Dumping file...')
submission = DataFrame({'test_id':test_ids, 'is_duplicate':is_duplicate})
submission.to_csv(base + timestr + 'ensemble_submit.csv', index=False)
print('Finished')
|
mit
|
UWSEDS-aut17/uwseds-group-city-fynders
|
cityfynders/UI_setup.py
|
1
|
7055
|
import dash
import dash_core_components as dcc
import dash_html_components as html
import pandas as pd
def layout_setup(pairs):
"""
This function returns a layout of the user interface
pairs: pairs is a two_dimensional list. The first is the columns from pandas data frame,
the second is the relative name for each column in the dropdown choices
"""
INFORMATION = """
City information is always an interesting topic in people’s daily life. Many websites
have their own ranking results based on unique preference. However, it is hard to say
which city is the best choice for everyone. We created a tool for people to choose the
factors matter to them instead of ranking with some information they don’t care about.
We divided the factors into four main parts: human related (Population, Crime, College,
Hospital, etc), economy (Income, Percent Unemployment, etc), natural (Green
Score, Air, Water, etc) and tertiary (Restaurant, Museum, Library, etc). We
combined our data from many different sources like former ranking websites and some
departments collected data. We provide some buttons below to help you make your own choice.
"""
lay = html.Div([
html.Div([
html.Br(),
html.Br(),
html.Center(html.H1("Find Your Dream City to Live in the US",
style={ 'color': 'lavender', 'fontFamily': 'Helvetica', 'fontSize': 50}),
),
html.Center(html.P("Powerd by City Fynders",
style={'color': 'black','fontFamily':'Helvetica', 'fontSize': 20}),
),
html.Br(),
html.Br()],
style={'class':'l-r','backgroundColor': 'FireBrick', 'margin': 0,'padding': 0}),
html.Div([
html.Br(),
html.Br(),
html.Div(html.P(INFORMATION,
style={'margin-left': '10%','float':'left','width': '500px','height': '500px','color':'black','fontFamily':'Helvetica', 'fontSize': 20 }),
),
html.Center(html.Img(src="http://travelquaz.com/wp-content/uploads/2015/09/photo-travel-holidays-to-the-usa-america-visit-usa-images.jpg"),
style={'margin-right': '10%','float':'right'})
]),
html.Br(),
html.Br(),
html.Br(),
html.Br(),
html.Br(),
html.Br(),
html.Br(),
html.Br(),
html.Br(),
html.Br(),
html.Br(),
html.Br(),
html.Br(),
html.Br(),
html.Br(),
html.Br(),
html.Br(),
html.Br(),
html.Br(),
html.Br(),
html.Br(),
html.Br(),
html.Br(),
html.Br(),
html.Br(),
html.Br(),
html.Br(),
html.Br(),
html.Br(),
html.Br(),
html.Div([
html.Center(html.Button('Total Rank', id='Total',
style={'width': 200, 'color': 'white', 'backgroundColor': 'FireBrick', 'height': 70,'fontSize': 15})),
]),
html.Br(),
html.Br(),
html.Center(html.Div([
html.Button('Human Related Rank', id='Hrank',
style={'width': 200, 'color': 'black', 'backgroundColor': 'lightblue', 'height': 70,'fontSize': 15,'marginRight':100}),
html.Button('Natural Rank', id='Nrank',
style={'width': 200, 'color': 'black', 'backgroundColor': 'lightblue', 'height': 70,'fontSize': 15,'marginRight':100}),
html.Button('Economy Rank', id='Erank',
style={'width': 200, 'color': 'black', 'backgroundColor': 'lightblue', 'height': 70,'fontSize': 15,'marginRight':100}),
html.Button('Teritary Industry Rank', id='Trank',
style={'width': 200, 'color': 'black', 'backgroundColor': 'lightblue', 'height': 70,'fontSize': 15}),
])),
html.Br(),
html.Br(),
html.Center(html.Div([
html.Center(html.H1("Factor Correlation Graph",
style={ 'color': 'black', 'fontFamily': 'Helvetica', 'fontSize': 20}),
),
html.Center(html.Img(src="https://user-images.githubusercontent.com/32367015/33920542-67cd1d80-df73-11e7-9dc5-48cdc7a5c81e.png"),
)
])),
html.Br(),
html.Br(),
html.Center(html.Div([
html.P("Now choose factors you care about",
style={'color': 'black','fontFamily':'Helvetica', 'fontSize': 20}),
])),
# First important factor
html.Div([
html.P('Important No.1'),
dcc.Dropdown(
id='First-care',
options=[ {'label': pairs[1][i],'value': pairs[0][i]} for i in range(19)],
)
],
style={'width': '20%', 'fontFamily':'Helvetica','display': 'inline-block'}),
# Second important factor
html.Div([
html.P('Important No.2'),
dcc.Dropdown(
id='Second-care',
options=[{'label': pairs[1][i],'value': pairs[0][i]} for i in range(19)],
)
],
style={'width': '20%','fontFamily':'Helvetica', 'display': 'inline-block'}),
# Third important factor
html.Div([
html.P('Important No.3'),
dcc.Dropdown(
id='Third-care',
options=[{'label': pairs[1][i],'value': pairs[0][i]} for i in range(19)],
)
],
style={'width': '20%','fontFamily':'Helvetica', 'display': 'inline-block'}),
# Fourth important factor
html.Div([
html.P('Important No.4'),
dcc.Dropdown(
id='Fourth-care',
options=[{'label': pairs[1][i],'value': pairs[0][i]} for i in range(19)],
)
],
style={'width': '20%','fontFamily':'Helvetica', 'display': 'inline-block'}),
# Fifth important factor
html.Div([
html.P('Important No.5'),
dcc.Dropdown(
id='Fifth-care',
options=[{'label': pairs[1][i],'value': pairs[0][i]} for i in range(19)],
)
],
style={'width': '20%', 'fontFamily':'Helvetica','display': 'inline-block'}),
html.Br(),
html.Br(),
html.Br(),
html.Center(html.Button('Go Searching!', id='Search',
style={'width': 200, 'color': 'white', 'backgroundColor': 'FireBrick', 'height': 70,'fontSize': 20})),
html.Div(dcc.Graph(id='User-graphic')),
html.Div(dcc.Graph(id='Total-graphic')),
html.Div(dcc.Graph(id='Human-graphic')),
html.Div(dcc.Graph(id='Natural-graphic')),
html.Div(dcc.Graph(id='Economic-graphic')),
html.Div(dcc.Graph(id='Tertiary-graphic'))
])
return lay
|
mit
|
JudoWill/ResearchNotebooks
|
HIVSystemsBio.py
|
1
|
1296
|
# -*- coding: utf-8 -*-
# <nbformat>3.0</nbformat>
# <codecell>
from pandas import *
import os, os.path
import csv
os.chdir('/home/will/HIVSystemsBio/')
# <codecell>
cocaine_genes = read_csv('CocaineGeneList.csv')
hiv_genes = read_csv('HIVGeneList.csv', sep = '\t')
biomart_conv = read_csv('mart_export.txt', sep = '\t')
# <codecell>
hiv_genes = merge(hiv_genes, biomart_conv,
left_on = 'Gene identifier',
right_on = 'Ensembl Gene ID',
how = 'inner')
# <codecell>
cocaine_genes
# <codecell>
printed = set()
with open('out_gene_list.tsv', 'w') as handle:
writer = csv.writer(handle, delimiter = '\t')
for gene, direc in hiv_genes[['EntrezGene ID', 'Expression']].dropna().values:
geneid = int(gene)
group = 'HIV-' + direc
tup = (geneid, group)
if tup not in printed:
writer.writerow(tup)
printed.add(tup)
for gene in cocaine_genes['ID'].values:
geneid = int(gene)
writer.writerow((geneid, 'Cocaine'))
# <codecell>
both_genes = merge(hiv_genes, cocaine_genes,
left_on = 'Gene identifier',
right_on = 'ID',
how = 'inner')
# <codecell>
both_genes
# <codecell>
|
mit
|
PatrickOReilly/scikit-learn
|
benchmarks/bench_sparsify.py
|
323
|
3372
|
"""
Benchmark SGD prediction time with dense/sparse coefficients.
Invoke with
-----------
$ kernprof.py -l sparsity_benchmark.py
$ python -m line_profiler sparsity_benchmark.py.lprof
Typical output
--------------
input data sparsity: 0.050000
true coef sparsity: 0.000100
test data sparsity: 0.027400
model sparsity: 0.000024
r^2 on test data (dense model) : 0.233651
r^2 on test data (sparse model) : 0.233651
Wrote profile results to sparsity_benchmark.py.lprof
Timer unit: 1e-06 s
File: sparsity_benchmark.py
Function: benchmark_dense_predict at line 51
Total time: 0.532979 s
Line # Hits Time Per Hit % Time Line Contents
==============================================================
51 @profile
52 def benchmark_dense_predict():
53 301 640 2.1 0.1 for _ in range(300):
54 300 532339 1774.5 99.9 clf.predict(X_test)
File: sparsity_benchmark.py
Function: benchmark_sparse_predict at line 56
Total time: 0.39274 s
Line # Hits Time Per Hit % Time Line Contents
==============================================================
56 @profile
57 def benchmark_sparse_predict():
58 1 10854 10854.0 2.8 X_test_sparse = csr_matrix(X_test)
59 301 477 1.6 0.1 for _ in range(300):
60 300 381409 1271.4 97.1 clf.predict(X_test_sparse)
"""
from scipy.sparse.csr import csr_matrix
import numpy as np
from sklearn.linear_model.stochastic_gradient import SGDRegressor
from sklearn.metrics import r2_score
np.random.seed(42)
def sparsity_ratio(X):
return np.count_nonzero(X) / float(n_samples * n_features)
n_samples, n_features = 5000, 300
X = np.random.randn(n_samples, n_features)
inds = np.arange(n_samples)
np.random.shuffle(inds)
X[inds[int(n_features / 1.2):]] = 0 # sparsify input
print("input data sparsity: %f" % sparsity_ratio(X))
coef = 3 * np.random.randn(n_features)
inds = np.arange(n_features)
np.random.shuffle(inds)
coef[inds[n_features/2:]] = 0 # sparsify coef
print("true coef sparsity: %f" % sparsity_ratio(coef))
y = np.dot(X, coef)
# add noise
y += 0.01 * np.random.normal((n_samples,))
# Split data in train set and test set
n_samples = X.shape[0]
X_train, y_train = X[:n_samples / 2], y[:n_samples / 2]
X_test, y_test = X[n_samples / 2:], y[n_samples / 2:]
print("test data sparsity: %f" % sparsity_ratio(X_test))
###############################################################################
clf = SGDRegressor(penalty='l1', alpha=.2, fit_intercept=True, n_iter=2000)
clf.fit(X_train, y_train)
print("model sparsity: %f" % sparsity_ratio(clf.coef_))
def benchmark_dense_predict():
for _ in range(300):
clf.predict(X_test)
def benchmark_sparse_predict():
X_test_sparse = csr_matrix(X_test)
for _ in range(300):
clf.predict(X_test_sparse)
def score(y_test, y_pred, case):
r2 = r2_score(y_test, y_pred)
print("r^2 on test data (%s) : %f" % (case, r2))
score(y_test, clf.predict(X_test), 'dense model')
benchmark_dense_predict()
clf.sparsify()
score(y_test, clf.predict(X_test), 'sparse model')
benchmark_sparse_predict()
|
bsd-3-clause
|
sarahgrogan/scikit-learn
|
sklearn/datasets/tests/test_20news.py
|
280
|
3045
|
"""Test the 20news downloader, if the data is available."""
import numpy as np
import scipy.sparse as sp
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import SkipTest
from sklearn import datasets
def test_20news():
try:
data = datasets.fetch_20newsgroups(
subset='all', download_if_missing=False, shuffle=False)
except IOError:
raise SkipTest("Download 20 newsgroups to run this test")
# Extract a reduced dataset
data2cats = datasets.fetch_20newsgroups(
subset='all', categories=data.target_names[-1:-3:-1], shuffle=False)
# Check that the ordering of the target_names is the same
# as the ordering in the full dataset
assert_equal(data2cats.target_names,
data.target_names[-2:])
# Assert that we have only 0 and 1 as labels
assert_equal(np.unique(data2cats.target).tolist(), [0, 1])
# Check that the number of filenames is consistent with data/target
assert_equal(len(data2cats.filenames), len(data2cats.target))
assert_equal(len(data2cats.filenames), len(data2cats.data))
# Check that the first entry of the reduced dataset corresponds to
# the first entry of the corresponding category in the full dataset
entry1 = data2cats.data[0]
category = data2cats.target_names[data2cats.target[0]]
label = data.target_names.index(category)
entry2 = data.data[np.where(data.target == label)[0][0]]
assert_equal(entry1, entry2)
def test_20news_length_consistency():
"""Checks the length consistencies within the bunch
This is a non-regression test for a bug present in 0.16.1.
"""
try:
data = datasets.fetch_20newsgroups(
subset='all', download_if_missing=False, shuffle=False)
except IOError:
raise SkipTest("Download 20 newsgroups to run this test")
# Extract the full dataset
data = datasets.fetch_20newsgroups(subset='all')
assert_equal(len(data['data']), len(data.data))
assert_equal(len(data['target']), len(data.target))
assert_equal(len(data['filenames']), len(data.filenames))
def test_20news_vectorized():
# This test is slow.
raise SkipTest("Test too slow.")
bunch = datasets.fetch_20newsgroups_vectorized(subset="train")
assert_true(sp.isspmatrix_csr(bunch.data))
assert_equal(bunch.data.shape, (11314, 107428))
assert_equal(bunch.target.shape[0], 11314)
assert_equal(bunch.data.dtype, np.float64)
bunch = datasets.fetch_20newsgroups_vectorized(subset="test")
assert_true(sp.isspmatrix_csr(bunch.data))
assert_equal(bunch.data.shape, (7532, 107428))
assert_equal(bunch.target.shape[0], 7532)
assert_equal(bunch.data.dtype, np.float64)
bunch = datasets.fetch_20newsgroups_vectorized(subset="all")
assert_true(sp.isspmatrix_csr(bunch.data))
assert_equal(bunch.data.shape, (11314 + 7532, 107428))
assert_equal(bunch.target.shape[0], 11314 + 7532)
assert_equal(bunch.data.dtype, np.float64)
|
bsd-3-clause
|
cython-testbed/pandas
|
pandas/tests/arrays/categorical/test_operators.py
|
4
|
11785
|
# -*- coding: utf-8 -*-
import pytest
import pandas as pd
import numpy as np
import pandas.util.testing as tm
from pandas import Categorical, Series, DataFrame, date_range
from pandas.tests.arrays.categorical.common import TestCategorical
class TestCategoricalOpsWithFactor(TestCategorical):
def test_categories_none_comparisons(self):
factor = Categorical(['a', 'b', 'b', 'a',
'a', 'c', 'c', 'c'], ordered=True)
tm.assert_categorical_equal(factor, self.factor)
def test_comparisons(self):
result = self.factor[self.factor == 'a']
expected = self.factor[np.asarray(self.factor) == 'a']
tm.assert_categorical_equal(result, expected)
result = self.factor[self.factor != 'a']
expected = self.factor[np.asarray(self.factor) != 'a']
tm.assert_categorical_equal(result, expected)
result = self.factor[self.factor < 'c']
expected = self.factor[np.asarray(self.factor) < 'c']
tm.assert_categorical_equal(result, expected)
result = self.factor[self.factor > 'a']
expected = self.factor[np.asarray(self.factor) > 'a']
tm.assert_categorical_equal(result, expected)
result = self.factor[self.factor >= 'b']
expected = self.factor[np.asarray(self.factor) >= 'b']
tm.assert_categorical_equal(result, expected)
result = self.factor[self.factor <= 'b']
expected = self.factor[np.asarray(self.factor) <= 'b']
tm.assert_categorical_equal(result, expected)
n = len(self.factor)
other = self.factor[np.random.permutation(n)]
result = self.factor == other
expected = np.asarray(self.factor) == np.asarray(other)
tm.assert_numpy_array_equal(result, expected)
result = self.factor == 'd'
expected = np.repeat(False, len(self.factor))
tm.assert_numpy_array_equal(result, expected)
# comparisons with categoricals
cat_rev = Categorical(
["a", "b", "c"], categories=["c", "b", "a"], ordered=True)
cat_rev_base = Categorical(
["b", "b", "b"], categories=["c", "b", "a"], ordered=True)
cat = Categorical(["a", "b", "c"], ordered=True)
cat_base = Categorical(
["b", "b", "b"], categories=cat.categories, ordered=True)
# comparisons need to take categories ordering into account
res_rev = cat_rev > cat_rev_base
exp_rev = np.array([True, False, False])
tm.assert_numpy_array_equal(res_rev, exp_rev)
res_rev = cat_rev < cat_rev_base
exp_rev = np.array([False, False, True])
tm.assert_numpy_array_equal(res_rev, exp_rev)
res = cat > cat_base
exp = np.array([False, False, True])
tm.assert_numpy_array_equal(res, exp)
# Only categories with same categories can be compared
def f():
cat > cat_rev
pytest.raises(TypeError, f)
cat_rev_base2 = Categorical(
["b", "b", "b"], categories=["c", "b", "a", "d"])
def f():
cat_rev > cat_rev_base2
pytest.raises(TypeError, f)
# Only categories with same ordering information can be compared
cat_unorderd = cat.set_ordered(False)
assert not (cat > cat).any()
def f():
cat > cat_unorderd
pytest.raises(TypeError, f)
# comparison (in both directions) with Series will raise
s = Series(["b", "b", "b"])
pytest.raises(TypeError, lambda: cat > s)
pytest.raises(TypeError, lambda: cat_rev > s)
pytest.raises(TypeError, lambda: s < cat)
pytest.raises(TypeError, lambda: s < cat_rev)
# comparison with numpy.array will raise in both direction, but only on
# newer numpy versions
a = np.array(["b", "b", "b"])
pytest.raises(TypeError, lambda: cat > a)
pytest.raises(TypeError, lambda: cat_rev > a)
# Make sure that unequal comparison take the categories order in
# account
cat_rev = Categorical(
list("abc"), categories=list("cba"), ordered=True)
exp = np.array([True, False, False])
res = cat_rev > "b"
tm.assert_numpy_array_equal(res, exp)
class TestCategoricalOps(object):
def test_datetime_categorical_comparison(self):
dt_cat = Categorical(date_range('2014-01-01', periods=3), ordered=True)
tm.assert_numpy_array_equal(dt_cat > dt_cat[0],
np.array([False, True, True]))
tm.assert_numpy_array_equal(dt_cat[0] < dt_cat,
np.array([False, True, True]))
def test_reflected_comparison_with_scalars(self):
# GH8658
cat = Categorical([1, 2, 3], ordered=True)
tm.assert_numpy_array_equal(cat > cat[0],
np.array([False, True, True]))
tm.assert_numpy_array_equal(cat[0] < cat,
np.array([False, True, True]))
def test_comparison_with_unknown_scalars(self):
# https://github.com/pandas-dev/pandas/issues/9836#issuecomment-92123057
# and following comparisons with scalars not in categories should raise
# for unequal comps, but not for equal/not equal
cat = Categorical([1, 2, 3], ordered=True)
pytest.raises(TypeError, lambda: cat < 4)
pytest.raises(TypeError, lambda: cat > 4)
pytest.raises(TypeError, lambda: 4 < cat)
pytest.raises(TypeError, lambda: 4 > cat)
tm.assert_numpy_array_equal(cat == 4,
np.array([False, False, False]))
tm.assert_numpy_array_equal(cat != 4,
np.array([True, True, True]))
@pytest.mark.parametrize('data,reverse,base', [
(list("abc"), list("cba"), list("bbb")),
([1, 2, 3], [3, 2, 1], [2, 2, 2])]
)
def test_comparisons(self, data, reverse, base):
cat_rev = Series(
Categorical(data, categories=reverse, ordered=True))
cat_rev_base = Series(
Categorical(base, categories=reverse, ordered=True))
cat = Series(Categorical(data, ordered=True))
cat_base = Series(
Categorical(base, categories=cat.cat.categories, ordered=True))
s = Series(base)
a = np.array(base)
# comparisons need to take categories ordering into account
res_rev = cat_rev > cat_rev_base
exp_rev = Series([True, False, False])
tm.assert_series_equal(res_rev, exp_rev)
res_rev = cat_rev < cat_rev_base
exp_rev = Series([False, False, True])
tm.assert_series_equal(res_rev, exp_rev)
res = cat > cat_base
exp = Series([False, False, True])
tm.assert_series_equal(res, exp)
scalar = base[1]
res = cat > scalar
exp = Series([False, False, True])
exp2 = cat.values > scalar
tm.assert_series_equal(res, exp)
tm.assert_numpy_array_equal(res.values, exp2)
res_rev = cat_rev > scalar
exp_rev = Series([True, False, False])
exp_rev2 = cat_rev.values > scalar
tm.assert_series_equal(res_rev, exp_rev)
tm.assert_numpy_array_equal(res_rev.values, exp_rev2)
# Only categories with same categories can be compared
def f():
cat > cat_rev
pytest.raises(TypeError, f)
# categorical cannot be compared to Series or numpy array, and also
# not the other way around
pytest.raises(TypeError, lambda: cat > s)
pytest.raises(TypeError, lambda: cat_rev > s)
pytest.raises(TypeError, lambda: cat > a)
pytest.raises(TypeError, lambda: cat_rev > a)
pytest.raises(TypeError, lambda: s < cat)
pytest.raises(TypeError, lambda: s < cat_rev)
pytest.raises(TypeError, lambda: a < cat)
pytest.raises(TypeError, lambda: a < cat_rev)
@pytest.mark.parametrize('ctor', [
lambda *args, **kwargs: Categorical(*args, **kwargs),
lambda *args, **kwargs: Series(Categorical(*args, **kwargs)),
])
def test_unordered_different_order_equal(self, ctor):
# https://github.com/pandas-dev/pandas/issues/16014
c1 = ctor(['a', 'b'], categories=['a', 'b'], ordered=False)
c2 = ctor(['a', 'b'], categories=['b', 'a'], ordered=False)
assert (c1 == c2).all()
c1 = ctor(['a', 'b'], categories=['a', 'b'], ordered=False)
c2 = ctor(['b', 'a'], categories=['b', 'a'], ordered=False)
assert (c1 != c2).all()
c1 = ctor(['a', 'a'], categories=['a', 'b'], ordered=False)
c2 = ctor(['b', 'b'], categories=['b', 'a'], ordered=False)
assert (c1 != c2).all()
c1 = ctor(['a', 'a'], categories=['a', 'b'], ordered=False)
c2 = ctor(['a', 'b'], categories=['b', 'a'], ordered=False)
result = c1 == c2
tm.assert_numpy_array_equal(np.array(result), np.array([True, False]))
def test_unordered_different_categories_raises(self):
c1 = Categorical(['a', 'b'], categories=['a', 'b'], ordered=False)
c2 = Categorical(['a', 'c'], categories=['c', 'a'], ordered=False)
with tm.assert_raises_regex(TypeError,
"Categoricals can only be compared"):
c1 == c2
def test_compare_different_lengths(self):
c1 = Categorical([], categories=['a', 'b'])
c2 = Categorical([], categories=['a'])
msg = "Categories are different lengths"
with tm.assert_raises_regex(TypeError, msg):
c1 == c2
def test_compare_unordered_different_order(self):
# https://github.com/pandas-dev/pandas/issues/16603#issuecomment-
# 349290078
a = pd.Categorical(['a'], categories=['a', 'b'])
b = pd.Categorical(['b'], categories=['b', 'a'])
assert not a.equals(b)
def test_numeric_like_ops(self):
df = DataFrame({'value': np.random.randint(0, 10000, 100)})
labels = ["{0} - {1}".format(i, i + 499) for i in range(0, 10000, 500)]
cat_labels = Categorical(labels, labels)
df = df.sort_values(by=['value'], ascending=True)
df['value_group'] = pd.cut(df.value, range(0, 10500, 500),
right=False, labels=cat_labels)
# numeric ops should not succeed
for op in ['__add__', '__sub__', '__mul__', '__truediv__']:
pytest.raises(TypeError,
lambda: getattr(df, op)(df))
# reduction ops should not succeed (unless specifically defined, e.g.
# min/max)
s = df['value_group']
for op in ['kurt', 'skew', 'var', 'std', 'mean', 'sum', 'median']:
pytest.raises(TypeError,
lambda: getattr(s, op)(numeric_only=False))
# mad technically works because it takes always the numeric data
# numpy ops
s = Series(Categorical([1, 2, 3, 4]))
pytest.raises(TypeError, lambda: np.sum(s))
# numeric ops on a Series
for op in ['__add__', '__sub__', '__mul__', '__truediv__']:
pytest.raises(TypeError, lambda: getattr(s, op)(2))
# invalid ufunc
pytest.raises(TypeError, lambda: np.log(s))
def test_contains(self):
# GH21508
c = pd.Categorical(list('aabbca'), categories=list('cab'))
assert 'b' in c
assert 'z' not in c
assert np.nan not in c
with pytest.raises(TypeError):
assert [1] in c
# assert codes NOT in index
assert 0 not in c
assert 1 not in c
c = pd.Categorical(list('aabbca') + [np.nan], categories=list('cab'))
assert np.nan in c
|
bsd-3-clause
|
madgik/exareme
|
Exareme-Docker/src/mip-algorithms/CART/step/3/local.py
|
1
|
2268
|
from __future__ import division
from __future__ import print_function
import sys
from os import path
from argparse import ArgumentParser
import pandas as pd
import numpy as np
sys.path.append(path.dirname(path.dirname(path.dirname(path.dirname(path.abspath(__file__))))) + '/utils/')
sys.path.append(path.dirname(path.dirname(path.dirname(path.dirname(path.abspath(__file__))))) + '/CART/')
from algorithm_utils import StateData
from cart_lib import Cart_Glob2Loc_TD, CartIter3_Loc2Glob_TD, cart_step_3_local
def main(args):
sys.argv =args
# Parse arguments
parser = ArgumentParser()
parser.add_argument('-cur_state_pkl', required=True,
help='Path to the pickle file holding the current state.')
parser.add_argument('-prev_state_pkl', required=True,
help='Path to the pickle file holding the previous state.')
parser.add_argument('-global_step_db', required=True,
help='Path to db holding global step results.')
args, unknown = parser.parse_known_args()
fname_cur_state = path.abspath(args.cur_state_pkl)
fname_prev_state = path.abspath(args.prev_state_pkl)
global_db = path.abspath(args.global_step_db)
# Load local state
local_state = StateData.load(fname_prev_state).data
# Load global node output
globalTree, activePaths = Cart_Glob2Loc_TD.load(global_db).get_data()
# Run algorithm local iteration step
activePaths = cart_step_3_local(local_state['dataFrame'], local_state['args_X'], local_state['args_Y'], local_state['CategoricalVariables'], activePaths)
## Finished
local_state = StateData( args_X = local_state['args_X'],
args_Y = local_state['args_Y'],
CategoricalVariables = local_state['CategoricalVariables'],
dataFrame = local_state['dataFrame'],
globalTree = globalTree,
activePaths = activePaths)
local_out = CartIter3_Loc2Glob_TD(activePaths)
# Save local state
local_state.save(fname=fname_cur_state)
# Return
local_out.transfer()
if __name__ == '__main__':
main()
|
mit
|
mhue/scikit-learn
|
examples/decomposition/plot_sparse_coding.py
|
247
|
3846
|
"""
===========================================
Sparse coding with a precomputed dictionary
===========================================
Transform a signal as a sparse combination of Ricker wavelets. This example
visually compares different sparse coding methods using the
:class:`sklearn.decomposition.SparseCoder` estimator. The Ricker (also known
as Mexican hat or the second derivative of a Gaussian) is not a particularly
good kernel to represent piecewise constant signals like this one. It can
therefore be seen how much adding different widths of atoms matters and it
therefore motivates learning the dictionary to best fit your type of signals.
The richer dictionary on the right is not larger in size, heavier subsampling
is performed in order to stay on the same order of magnitude.
"""
print(__doc__)
import numpy as np
import matplotlib.pylab as pl
from sklearn.decomposition import SparseCoder
def ricker_function(resolution, center, width):
"""Discrete sub-sampled Ricker (Mexican hat) wavelet"""
x = np.linspace(0, resolution - 1, resolution)
x = ((2 / ((np.sqrt(3 * width) * np.pi ** 1 / 4)))
* (1 - ((x - center) ** 2 / width ** 2))
* np.exp((-(x - center) ** 2) / (2 * width ** 2)))
return x
def ricker_matrix(width, resolution, n_components):
"""Dictionary of Ricker (Mexican hat) wavelets"""
centers = np.linspace(0, resolution - 1, n_components)
D = np.empty((n_components, resolution))
for i, center in enumerate(centers):
D[i] = ricker_function(resolution, center, width)
D /= np.sqrt(np.sum(D ** 2, axis=1))[:, np.newaxis]
return D
resolution = 1024
subsampling = 3 # subsampling factor
width = 100
n_components = resolution / subsampling
# Compute a wavelet dictionary
D_fixed = ricker_matrix(width=width, resolution=resolution,
n_components=n_components)
D_multi = np.r_[tuple(ricker_matrix(width=w, resolution=resolution,
n_components=np.floor(n_components / 5))
for w in (10, 50, 100, 500, 1000))]
# Generate a signal
y = np.linspace(0, resolution - 1, resolution)
first_quarter = y < resolution / 4
y[first_quarter] = 3.
y[np.logical_not(first_quarter)] = -1.
# List the different sparse coding methods in the following format:
# (title, transform_algorithm, transform_alpha, transform_n_nozero_coefs)
estimators = [('OMP', 'omp', None, 15), ('Lasso', 'lasso_cd', 2, None), ]
pl.figure(figsize=(13, 6))
for subplot, (D, title) in enumerate(zip((D_fixed, D_multi),
('fixed width', 'multiple widths'))):
pl.subplot(1, 2, subplot + 1)
pl.title('Sparse coding against %s dictionary' % title)
pl.plot(y, ls='dotted', label='Original signal')
# Do a wavelet approximation
for title, algo, alpha, n_nonzero in estimators:
coder = SparseCoder(dictionary=D, transform_n_nonzero_coefs=n_nonzero,
transform_alpha=alpha, transform_algorithm=algo)
x = coder.transform(y)
density = len(np.flatnonzero(x))
x = np.ravel(np.dot(x, D))
squared_error = np.sum((y - x) ** 2)
pl.plot(x, label='%s: %s nonzero coefs,\n%.2f error'
% (title, density, squared_error))
# Soft thresholding debiasing
coder = SparseCoder(dictionary=D, transform_algorithm='threshold',
transform_alpha=20)
x = coder.transform(y)
_, idx = np.where(x != 0)
x[0, idx], _, _, _ = np.linalg.lstsq(D[idx, :].T, y)
x = np.ravel(np.dot(x, D))
squared_error = np.sum((y - x) ** 2)
pl.plot(x,
label='Thresholding w/ debiasing:\n%d nonzero coefs, %.2f error' %
(len(idx), squared_error))
pl.axis('tight')
pl.legend()
pl.subplots_adjust(.04, .07, .97, .90, .09, .2)
pl.show()
|
bsd-3-clause
|
P1R/cinves
|
TrabajoFinal/PortadoraVariableModuladaFija/AM/TvsFrqRate-AM.py
|
1
|
1245
|
import numpy as np
import matplotlib.pyplot as plt
'''
La Frecuencia Base es de 50 Hz y las variaciones en frecuencia de rate de 30 a 200
este ejemplo es con un pawn de 50% en AM.
para este experimento los valores son:
tiempo de medicion: 2 minutos
voltaje de generador: 0.3 volts
tubo de prueba: cobre 350 cm
SIN STREAMING.
'''
Freq=np.array([20,30,40,50,60,70,80,90,100,110,120,130,140,150,160,170,180]);
DeltaTemp=np.array([0.4,0.7,1,1.1,0.4,0.7,0.8,0.8,0.2,0.7,1.2,1.8,3.7,0.9,0.1,0.1,1.8])
TempT1=np.array([18.8,18.6,18.7,18.8,18.4,18.9,19.1,20.2,19.9,19.8,20.2,20.1,19.1,20.4,21.1,21.6,22.5])
TempT2=np.array([19.2,19.3,19.7,19.9,18.0,18.2,18.3,19.4,20.1,20.5,21.4,21.9,22.8,21.3,21.0,20.9,20.7])
TempAmb=np.array([18.9,19.0,19.2,19.5,18.4,18.6,18.6,19.0,19.8,20.3,20.9,20.5,20.6,20.8,20.8,20.9,21.0])
plt.xlabel('Rate')
plt.ylabel('Temperatura')
plt.title('Temperatura vs Rate en AM con pawn de 50%')
#for i in range(len(Freq)):
# plt.text(Freq[i],Db[i], r'$Freq=%f, \ Db=%f$' % (Freq[i], Db[i]))
plt.axis([0, 200, 0, 30])
plt.plot(Freq,TempT1,'bo',Freq,TempT1,'k')
plt.plot(Freq,TempT2,'r^',Freq,TempT2,'r')
plt.plot(Freq,DeltaTemp,'ko',Freq,DeltaTemp,'k')
plt.plot(Freq,TempAmb,'yo',Freq,TempAmb,'y')
plt.grid(True)
plt.show()
|
apache-2.0
|
Vvucinic/Wander
|
venv_2_7/lib/python2.7/site-packages/pandas/tests/test_msgpack/test_except.py
|
15
|
1043
|
#!/usr/bin/env python
# coding: utf-8
import unittest
import nose
import datetime
from pandas.msgpack import packb, unpackb
class DummyException(Exception):
pass
class TestExceptions(unittest.TestCase):
def test_raise_on_find_unsupported_value(self):
import datetime
self.assertRaises(TypeError, packb, datetime.datetime.now())
def test_raise_from_object_hook(self):
def hook(obj):
raise DummyException
self.assertRaises(DummyException, unpackb, packb({}), object_hook=hook)
self.assertRaises(DummyException, unpackb, packb({'fizz': 'buzz'}), object_hook=hook)
self.assertRaises(DummyException, unpackb, packb({'fizz': 'buzz'}), object_pairs_hook=hook)
self.assertRaises(DummyException, unpackb, packb({'fizz': {'buzz': 'spam'}}), object_hook=hook)
self.assertRaises(DummyException, unpackb, packb({'fizz': {'buzz': 'spam'}}), object_pairs_hook=hook)
def test_invalidvalue(self):
self.assertRaises(ValueError, unpackb, b'\xd9\x97#DL_')
|
artistic-2.0
|
legacysurvey/pipeline
|
py/legacyanalysis/check-fracflux.py
|
1
|
3198
|
import matplotlib
matplotlib.use('Agg')
import pylab as plt
import numpy as np
from tractor import *
from tractor.galaxy import *
from astrometry.util.util import *
from astrometry.util.fits import *
from astrometry.util.plotutils import *
from legacypipe.runbrick import _compute_source_metrics
if __name__ == '__main__':
ps = PlotSequence('frac')
W,H = 50,50
cx,cy = W/2., H/2.
gala = ExpGalaxy(RaDecPos(0., 0.), Flux(200.), EllipseESoft(1., 0., 0.5))
galb = ExpGalaxy(RaDecPos(0., 0.), Flux(100.), EllipseESoft(1., 0., 0.5))
halfsize = 25
gpsf = NCircularGaussianPSF([2.], [1.])
gpsf.radius = halfsize
psfimg = gpsf.getPointSourcePatch(0., 0., radius=15)
print 'PSF image size', psfimg.shape
pixpsf = PixelizedPSF(psfimg.patch)
data=np.zeros((H,W), np.float32)
wcs = ConstantFitsWcs(Tan(0., 0., W/2., H/2.,
1e-4/3600., 0.262/3600., 0.263/3600., 1e-5/3600.,
float(W), float(H)))
img = Image(data=data, invvar=np.ones_like(data), psf=gpsf, wcs=wcs)
img.band = 'r'
srcs = [gala, galb]
tims = [img]
tr = Tractor(tims, srcs)
B = fits_table()
B.sources = srcs
print 'B.sources:', B.sources
bands = 'r'
## Mask out half the image
img.inverr[H/2:,:] = 0.
allfracs = []
allfracins = []
allfracmasked = []
for ra in np.linspace(-0.001, 0.001, 7):
gala.pos.ra = ra
M = _compute_source_metrics(srcs, tims, bands, tr)
for k,v in M.items():
B.set(k, v)
allfracs.append(B.fracflux)
allfracins.append(B.fracin)
allfracmasked.append(B.fracmasked)
allfracs = np.hstack(allfracs).T
allfracins = np.hstack(allfracins).T
allfracmasked = np.hstack(allfracmasked).T
print 'Allfracs:', allfracs.shape
plt.clf()
plt.plot(allfracs[:,0], 'b-')
plt.plot(allfracs[:,1], 'r-')
plt.plot(allfracins[:,0], 'b--')
plt.plot(allfracins[:,1], 'r--')
plt.plot(allfracmasked[:,0], 'b-.')
plt.plot(allfracmasked[:,1], 'r-.')
ps.savefig()
# Put one galaxy on the edge of the image.
galb.pos = wcs.pixelToPosition(W/2., 0.)
print 'Gal b:', wcs.positionToPixel(galb.pos)
allfracs = []
allfracins = []
allfracmasked = []
for ra in np.linspace(-0.002, 0.002, 15):
gala.pos.ra = ra
print 'Gal a:', wcs.positionToPixel(gala.pos)
M = _compute_source_metrics(srcs, tims, bands, tr)
for k,v in M.items():
B.set(k, v)
allfracs.append(B.fracflux)
allfracins.append(B.fracin)
allfracmasked.append(B.fracmasked)
#sys.exit(0)
# mod = tr.getModelImage(0)
# plt.clf()
# dimshow(mod)
# ps.savefig()
allfracs = np.hstack(allfracs).T
allfracins = np.hstack(allfracins).T
allfracmasked = np.hstack(allfracmasked).T
plt.clf()
plt.plot(allfracs[:,0], 'b-')
plt.plot(allfracs[:,1], 'r-')
plt.plot(allfracins[:,0], 'b--')
plt.plot(allfracins[:,1], 'r--')
plt.plot(allfracmasked[:,0], 'b-.')
plt.plot(allfracmasked[:,1], 'r-.')
ps.savefig()
|
gpl-2.0
|
DIRACGrid/DIRAC
|
src/DIRAC/Core/Utilities/Graphs/GraphData.py
|
2
|
17470
|
""" GraphData encapsulates input data for the DIRAC Graphs plots
The DIRAC Graphs package is derived from the GraphTool plotting package of the
CMS/Phedex Project by ... <to be added>
"""
from __future__ import print_function
from __future__ import absolute_import
from __future__ import division
__RCSID__ = "$Id$"
import six
import time
import datetime
import numpy
from matplotlib.dates import date2num
from DIRAC.Core.Utilities.Graphs.GraphUtilities import to_timestamp, pretty_float
DEBUG = 0
def get_key_type(keys):
""" A utility function to guess the type of the plot keys
"""
min_time_stamp = 1000000000
max_time_stamp = 1900000000
time_type = True
num_type = True
string_type = True
key_type = 'unknown'
for key in keys:
if time_type:
try:
time_data = to_timestamp(key)
if time_data < min_time_stamp or time_data > max_time_stamp:
time_type = False
except ValueError:
time_type = False
if num_type:
try:
num_data = float(key)
except Exception:
num_type = False
if not isinstance(key, six.string_types):
string_type = False
# Take the most restrictive type
if string_type:
key_type = "string"
if num_type:
key_type = "numeric"
if time_type:
key_type = "time"
return key_type
class GraphData(object):
def __init__(self, data={}):
self.truncated = 0
self.all_keys = []
self.labels = []
self.label_values = []
self.subplots = {}
self.plotdata = None
self.data = dict(data)
self.key_type = 'string'
self.initialize()
def isEmpty(self):
""" Check if there is no data inserted
"""
return not self.plotdata and not self.subplots
def setData(self, data):
""" Add data to the GraphData object
"""
self.data = dict(data)
self.initialize()
def initialize(self, key_type=None):
keys = list(self.data)
if not keys:
print("GraphData Error: empty data")
start = time.time()
if isinstance(self.data[keys[0]], dict):
for key in self.data:
self.subplots[key] = PlotData(self.data[key], key_type=key_type)
else:
self.plotdata = PlotData(self.data, key_type=key_type)
if DEBUG:
print("Time: plot data", time.time() - start, len(self.subplots))
if self.plotdata:
self.all_keys = self.plotdata.getKeys()
else:
tmpset = set()
for sub in self.subplots.values():
for key in sub.getKeys():
tmpset.add(key)
self.all_keys = list(tmpset)
if key_type:
self.key_type = key_type
else:
self.key_type = get_key_type(self.all_keys)
self.sortKeys()
self.makeNumKeys()
self.sortLabels()
def expandKeys(self):
if not self.plotdata:
for sub in self.subplots:
self.subplots[sub].expandKeys(self.all_keys)
def isSimplePlot(self):
return self.plotdata is not None
def sortLabels(self, sort_type='max_value', reverse_order=False):
""" Sort labels with a specified method:
alpha - alphabetic order
max_value - by max value of the subplot
sum - by the sum of values of the subplot
last_value - by the last value in the subplot
avg_nozeros - by an average that excludes all zero values
"""
if self.plotdata:
if self.key_type == "string":
if sort_type in ['max_value', 'sum']:
self.labels = self.plotdata.sortKeys('weight')
else:
self.labels = self.plotdata.sortKeys()
if reverse_order:
self.labels.reverse()
self.label_values = [self.plotdata.parsed_data[l] for l in self.labels]
else:
if sort_type == 'max_value':
pairs = list(zip(list(self.subplots), self.subplots.values()))
reverse = not reverse_order
pairs.sort(key=lambda x: x[1].max_value, reverse=reverse)
self.labels = [x[0] for x in pairs]
self.label_values = [x[1].max_value for x in pairs]
elif sort_type == 'last_value':
pairs = list(zip(list(self.subplots), self.subplots.values()))
reverse = not reverse_order
pairs.sort(key=lambda x: x[1].last_value, reverse=reverse)
self.labels = [x[0] for x in pairs]
self.label_values = [x[1].last_value for x in pairs]
elif sort_type == 'sum':
pairs = []
for key in self.subplots:
pairs.append((key, self.subplots[key].sum_value))
reverse = not reverse_order
pairs.sort(key=lambda x: x[1], reverse=reverse)
self.labels = [x[0] for x in pairs]
self.label_values = [x[1] for x in pairs]
elif sort_type == 'alpha':
self.labels = list(self.subplots)
self.labels.sort()
if reverse_order:
self.labels.reverse()
self.label_values = [self.subplots[x].sum_value for x in self.labels]
elif sort_type == 'avg_nozeros':
pairs = list(zip(list(self.subplots), self.subplots.values()))
reverse = not reverse_order
pairs.sort(key=lambda x: x[1].avg_nozeros, reverse=reverse)
self.labels = [x[0] for x in pairs]
self.label_values = [x[1].avg_nozeros for x in pairs]
else:
self.labels = list(self.subplots)
if reverse_order:
self.labels.reverse()
def sortKeys(self):
""" Sort the graph keys in a natural order
"""
if self.plotdata:
self.plotdata.sortKeys()
self.all_keys = self.plotdata.getKeys()
else:
self.all_keys.sort()
self.min_key = min(self.all_keys)
self.max_key = max(self.all_keys)
def makeNumKeys(self):
""" Make numerical representation of the graph keys suitable for plotting
"""
self.all_num_keys = []
if self.key_type == "string":
self.all_string_map = {}
next = 0
for key in self.all_keys:
self.all_string_map[key] = next
self.all_num_keys.append(next)
next += 1
elif self.key_type == "time":
self.all_num_keys = [date2num(datetime.datetime.fromtimestamp(to_timestamp(key))) for key in self.all_keys]
elif self.key_type == "numeric":
self.all_num_keys = [float(key) for key in self.all_keys]
self.min_num_key = min(self.all_num_keys)
self.max_num_key = max(self.all_num_keys)
def makeCumulativeGraph(self):
""" Prepare data for the cumulative graph
"""
self.expandKeys()
if self.plotdata:
self.plotdata.makeCumulativePlot()
if self.truncated:
self.otherPlot.makeCumulativePlot()
if self.subplots:
for label in self.subplots:
self.subplots[label].makeCumulativePlot()
self.sortLabels(sort_type='last_value')
def getLabels(self):
""" Get the graph labels together with the numeric values used for the label
sorting
"""
labels = []
if self.plotdata:
if self.key_type != 'string':
labels = [('NoLabels', 0.)]
else:
labels = list(zip(self.labels, self.label_values))
elif self.truncated:
tlabels = self.labels[:self.truncated]
tvalues = self.label_values[:self.truncated]
labels = list(zip(tlabels, tvalues))
labels.append(('Others', sum(self.label_values[self.truncated:])))
else:
labels = list(zip(self.labels, self.label_values))
return labels
def getStringMap(self):
""" Get string to number mapping for numeric type keys
"""
return self.all_string_map
def getNumberOfKeys(self):
return len(self.all_keys)
def getNumberOfLabels(self):
if self.truncated:
return self.truncated + 1
else:
return len(self.labels)
def getPlotNumData(self, label=None, zipFlag=True):
""" Get the plot data in a numeric form
"""
if self.plotdata:
if zipFlag:
return zip(self.plotdata.getNumKeys(), self.plotdata.getValues(), self.plotdata.getPlotErrors())
else:
return self.plotdata.getValues()
elif label is not None:
if label == "Others":
return self.otherPlot.getPlotDataForNumKeys(self.all_num_keys)
else:
return self.subplots[label].getPlotDataForNumKeys(self.all_num_keys)
else:
# Get the sum of all the subplots
self.expandKeys()
arrays = []
for label in self.subplots:
arrays.append(numpy.array([x[1] for x in self.subplots[label].getPlotDataForNumKeys(self.all_num_keys, True)]))
sum_array = sum(arrays)
if zipFlag:
return zip(self.all_num_keys, list(sum_array))
else:
return sum_array
def truncateLabels(self, limit=10):
""" Truncate the number of labels to the limit, leave the most important
ones, accumulate the rest in the 'Others' label
"""
if self.plotdata:
return
nLabels = len(self.labels)
if nLabels <= limit:
return
self.truncated = limit
new_labels = self.labels[:limit]
new_labels.append('Others')
other_data = {}
for key in self.all_keys:
other_data[key] = 0.
for label in self.labels:
if label not in new_labels:
for key in self.all_keys:
if key in self.subplots[label].parsed_data:
other_data[key] += self.subplots[label].parsed_data[key]
self.otherPlot = PlotData(other_data)
def getStats(self):
""" Get statistics of the graph data
"""
numData = self.getPlotNumData(zipFlag=False)
if not len(numData): # pylint: disable=len-as-condition
return 0, 0, 0, 0
numData = numpy.array(numData)
min_value = numData.min()
max_value = numData.max()
average = float(numData.sum()) / len(numData)
current = numData[-1]
return min_value, max_value, average, current
def getStatString(self, unit=None):
""" Get a string summarizing the graph data statistics
"""
min_value, max_value, average, current = self.getStats()
tmpList = []
unitString = ''
if unit:
unitString = str(unit)
if max_value:
try:
s = "Max: " + pretty_float(max_value) + " " + unitString
tmpList.append(s.strip())
except Exception:
pass
if min_value:
try:
s = "Min: " + pretty_float(min_value) + " " + unitString
tmpList.append(s.strip())
except Exception:
pass
if average:
try:
s = "Average: " + pretty_float(average) + " " + unitString
tmpList.append(s.strip())
except Exception:
pass
if current:
try:
s = "Current: " + pretty_float(current) + " " + unitString
tmpList.append(s.strip())
except Exception:
pass
resultString = ', '.join(tmpList)
return resultString
class PlotData(object):
""" PlotData class is a container for a one dimensional plot data
"""
def __init__(self, data, single=True, key_type=None):
self.key_type = "unknown"
if not data:
print("PlotData Error: empty data")
return
# Original data
self.data = dict(data)
# Working copy of the parsed data
self.parsed_data = {}
self.parsed_errors = {}
# Keys and values as synchronized lists
self.keys = []
self.num_keys = []
self.values = []
self.errors = []
self.sorted_keys = []
# Do initial data parsing
self.parseData(key_type)
if single:
self.initialize()
def initialize(self):
if self.key_type == "string":
self.keys = self.sortKeys('weight')
else:
self.keys = self.sortKeys()
self.values = [self.parsed_data.get(k, 0.0) for k in self.keys]
self.errors = [self.parsed_errors.get(k, 0.0) for k in self.keys]
values_to_sum = [self.parsed_data.get(k, 0.0) for k in self.keys if k != '']
self.real_values = []
for k in self.keys:
if self.parsed_data[k] is not None:
self.real_values.append(self.parsed_data[k])
self.values_sum = float(sum(self.real_values))
# Prepare numerical representation of keys for plotting
self.num_keys = []
if self.key_type == "string":
self.string_map = {}
next = 0
for key in self.keys:
self.string_map[key] = next
self.num_keys.append(next)
next += 1
elif self.key_type == "time":
self.num_keys = [date2num(datetime.datetime.fromtimestamp(to_timestamp(key))) for key in self.keys]
elif self.key_type == "numeric":
self.num_keys = [float(key) for key in self.keys]
self.min_value = float(min(self.real_values))
self.max_value = float(max(self.real_values))
self.min_key = self.keys[0]
self.max_key = self.keys[-1]
self.sum_value = float(sum(self.real_values))
self.last_value = float(self.real_values[-1])
count = len(list(filter(lambda a: a != 0, self.real_values)))
if count != 0:
self.avg_nozeros = self.sum_value / float(count)
else:
self.avg_nozeros = 0
def expandKeys(self, all_keys):
""" Fill zero values into the missing keys
"""
for k in all_keys:
if k not in self.parsed_data:
self.parsed_data[k] = 0.
self.sorted_keys = []
self.keys = list(self.parsed_data)
self.initialize()
def sortKeys(self, sort_type='alpha'):
""" Sort keys according to the specified method :
alpha - sort in alphabetic order
weight - sort in the order of values
"""
if self.sorted_keys:
return self.sorted_keys
if sort_type == 'weight':
pairs = list(zip(list(self.parsed_data), self.parsed_data.values()))
pairs.sort(key=lambda x: x[1], reverse=True)
self.sorted_keys = [x[0] for x in pairs]
elif sort_type == 'alpha':
self.sorted_keys = list(self.keys)
self.sorted_keys.sort()
else:
print("Unknown sorting type:", sort_type)
return self.sorted_keys
def __data_size(self, item):
"""
Determine a numerical size for the data; this is used to
sort the keys of the graph.
If the item is a tuple, take the absolute value of the first entry.
Otherwise, attempt to take the absolute value of that item. If that
fails, just return -1.
"""
if isinstance(item, tuple):
return abs(item[0])
try:
return abs(item)
except TypeError:
return - 1
def parseKey(self, key):
"""
Parse the name of the pivot; this is the identity function.
"""
if self.key_type == "time":
return to_timestamp(key)
else:
return key
def parseDatum(self, data):
"""
Parse the specific data value; this is the identity.
"""
if isinstance(data, six.string_types) and "::" in data:
datum, error = data.split("::")
elif isinstance(data, tuple):
datum, error = data
else:
error = 0.
datum = data
try:
resultD = float(datum)
except Exception:
resultD = None
try:
resultE = float(error)
except Exception:
resultE = None
return (resultD, resultE)
def parseData(self, key_type=None):
"""
Parse all the data values passed to the graph. For this super class,
basically does nothing except loop through all the data. A sub-class
should override the parseDatum and parse_pivot functions rather than
this one.
"""
if key_type:
self.key_type = key_type
else:
self.key_type = get_key_type(list(self.data))
new_parsed_data = {}
new_passed_errors = {}
for key, data in self.data.items():
new_key = self.parseKey(key)
data, error = self.parseDatum(data)
# if data != None:
new_parsed_data[new_key] = data
new_passed_errors[new_key] = error
self.parsed_data = new_parsed_data
self.parsed_errors = new_passed_errors
self.keys = list(self.parsed_data)
def makeCumulativePlot(self):
if not self.sorted_keys:
self.sortKeys()
cum_values = []
if self.values[0] is None:
cum_values.append(0.)
else:
cum_values.append(self.values[0])
for i in range(1, len(self.values)):
if self.values[i] is None:
cum_values.append(cum_values[i - 1])
else:
cum_values.append(cum_values[i - 1] + self.values[i])
self.values = cum_values
self.last_value = float(self.values[-1])
def getPlotData(self):
return self.parsed_data
def getPlotErrors(self):
return self.parsed_errors
def getPlotNumData(self):
return zip(self.num_keys, self.values, self.errors)
def getPlotDataForKeys(self, keys):
result_pairs = []
for key in keys:
if key in self.parsed_data:
result_pairs.append(key, self.parsed_data[key], self.parsed_errors[key])
else:
result_pairs.append(key, None, 0.)
return result_pairs
def getPlotDataForNumKeys(self, num_keys, zeroes=False):
result_pairs = []
for num_key in num_keys:
try:
ind = self.num_keys.index(num_key)
if self.values[ind] is None and zeroes:
result_pairs.append((self.num_keys[ind], 0., 0.))
else:
result_pairs.append((self.num_keys[ind], self.values[ind], self.errors[ind]))
except ValueError:
if zeroes:
result_pairs.append((num_key, 0., 0.))
else:
result_pairs.append((num_key, None, 0.))
return result_pairs
def getKeys(self):
return self.keys
def getNumKeys(self):
return self.num_keys
def getValues(self):
return self.values
def getMaxValue(self):
return max(self.values)
def getMinValue(self):
return min(self.values)
|
gpl-3.0
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.