repo_name stringlengths 6 67 | path stringlengths 5 185 | copies stringlengths 1 3 | size stringlengths 4 6 | content stringlengths 1.02k 962k | license stringclasses 15 values |
|---|---|---|---|---|---|
CINPLA/expipe-dev | phy-contrib/examples/custom_view.py | 1 | 1208 | """Custom view plugin.
This plugin adds an interactive matplotlib figure showing the ISI of the
first selected cluster.
To activate the plugin, copy this file to `~/.phy/plugins/` and add this line
to your `~/.phy/phy_config.py`:
```python
c.KwikGUI.plugins = ['CustomView']
```
"""
from phy import IPlugin
import numpy as np
import matplotlib.pyplot as plt
class CustomView(IPlugin):
def attach_to_controller(self, c):
# Create the figure when initializing the GUI.
f, ax = plt.subplots()
@c.connect
def on_create_gui(gui):
# Called when the GUI is created.
# We add the matplotlib figure to the GUI.
gui.add_view(f, name='ISI')
# We connect this function to the "select" event triggered
# by the controller at every cluster selection change.
@c.connect
def on_select(clusters, **kwargs):
# We clear the figure.
ax.clear()
# We compute the ISI.
spikes = c.spikes_per_cluster(clusters[0])
ax.hist(np.diff(spikes), bins=50)
# We update the figure.
f.canvas.draw()
| gpl-3.0 |
dave-the-scientist/brugia_project | import_directionality.py | 1 | 4409 | import pandas
from cobra import Model, Metabolite, Reaction
from read_excel import read_excel, write_excel
elegans_file = 'iCEL1273.xlsx'
model_in_file = 'model_b_mal.xlsx'
model_out_file = 'model_b_mal_2-wip.xlsx'
reversible_arrow = '<==>'
irreversible_arrow = ('-->', '<--')
l_bound, u_bound = -1000, 1000
old_m = read_excel(model_in_file)
pio = pandas.io.excel.ExcelFile(elegans_file)
rxn_frame = pandas.read_excel(elegans_file, 'Reactions')
def use_directionalities(old_m, disagreements):
unresolved = []
for r_id, cel_dir, m_dir, cel_products in disagreements:
updated = False
if cel_dir == 'reversible':
rxn = old_m.reactions.get_by_id(r_id)
rxn.bounds = (l_bound, u_bound)
updated = True
elif cel_dir == 'irreversible':
if ',' not in r_id: # combined reactions are handled manually
cel_prods = set()
for m in cel_products:
met = m.replace('M','C').replace('E','C')
if ' ' in met:
met = met.partition(' ')[2]
cel_prods.add(met)
rxn = old_m.reactions.get_by_id(r_id)
prds = set(m.id for m in rxn.products)
rcts = set(m.id for m in rxn.reactants)
if prds == cel_prods or prds|set(['C00080']) == cel_prods:
rxn.bounds = (0, u_bound)
updated = True
elif rcts == cel_prods or rcts|set(['C00080']) == cel_prods:
rxn.bounds = (l_bound, 0)
updated = True
if not updated:
unresolved.append((r_id, cel_dir, m_dir, cel_products))
return unresolved
processed = set()
disagreements, rvsb, irrvsb, broken = [], 0, 0, 0
for i in range(len(rxn_frame)):
row = rxn_frame.ix[i]
rxn_str = row['Machine readable']
r_ids = row['KEGG']
if not isinstance(r_ids, unicode) or not isinstance(r_ids, unicode):
continue
if r_ids in processed:
continue
else:
processed.add(r_ids)
r_ids = r_ids.split(';')
if len(rxn_str) == 0 or len(r_ids) == 0:
continue
products = set()
if reversible_arrow in rxn_str:
cel_direction = 'reversible'
elif irreversible_arrow[0] in rxn_str:
cel_direction = 'irreversible'
products = set(str(p.strip()) for p in rxn_str.partition(irreversible_arrow[0])[2].split('+'))
elif irreversible_arrow[1] in rxn_str:
cel_direction = 'irreversible'
products = set(str(p.strip()) for p in rxn_str.partition(irreversible_arrow[1])[0].split('+'))
else:
print('\nError: could not parse %s' % rxn_str)
old_directions = []
for r_id in r_ids:
if r_id in old_m.reactions:
rxn = old_m.reactions.get_by_id(r_id)
if rxn.lower_bound < 0:
if rxn.upper_bound <= 0:
old_directions.append('irreversible')
else:
old_directions.append('reversible')
else:
if rxn.upper_bound > 0:
old_directions.append('irreversible')
else:
old_directions.append('blocked')
else:
old_directions.append('missing')
agreed = False
if len(set(old_directions)) == 1 and old_directions[0] == 'missing':
continue # No matching reactions found in old_m
elif 'blocked' in old_directions or 'missing' in old_directions:
broken += 1 # Part of the reaction set exists in old_m, but part is missing or blocked.
elif cel_direction == 'reversible':
if 'irreversible' not in old_directions:
agreed = True
else:
rvsb += 1
else: # Should be irreversible
if 'irreversible' in old_directions:
agreed = True
else:
irrvsb += 1
if not agreed:
disagreements.append((', '.join(r_ids), cel_direction, ', '.join(old_directions), products))
disagreements.sort(key=lambda d: d[0])
disagreements.sort(key=lambda d: d[1])
print('\n%i should have been reversible, %i irreversible, and %i were broken' % (rvsb, irrvsb, broken))
unresolved = use_directionalities(old_m, disagreements)
print('\n'.join('%s should be %s, but is: %s' % (d[:3]) for d in unresolved))
print len(unresolved)
write_excel(old_m, model_out_file)
| gpl-3.0 |
alexeyum/scikit-learn | sklearn/neighbors/approximate.py | 40 | 22369 | """Approximate nearest neighbor search"""
# Author: Maheshakya Wijewardena <maheshakya.10@cse.mrt.ac.lk>
# Joel Nothman <joel.nothman@gmail.com>
import numpy as np
import warnings
from scipy import sparse
from .base import KNeighborsMixin, RadiusNeighborsMixin
from ..base import BaseEstimator
from ..utils.validation import check_array
from ..utils import check_random_state
from ..metrics.pairwise import pairwise_distances
from ..random_projection import GaussianRandomProjection
__all__ = ["LSHForest"]
HASH_DTYPE = '>u4'
MAX_HASH_SIZE = np.dtype(HASH_DTYPE).itemsize * 8
def _find_matching_indices(tree, bin_X, left_mask, right_mask):
"""Finds indices in sorted array of integers.
Most significant h bits in the binary representations of the
integers are matched with the items' most significant h bits.
"""
left_index = np.searchsorted(tree, bin_X & left_mask)
right_index = np.searchsorted(tree, bin_X | right_mask,
side='right')
return left_index, right_index
def _find_longest_prefix_match(tree, bin_X, hash_size,
left_masks, right_masks):
"""Find the longest prefix match in tree for each query in bin_X
Most significant bits are considered as the prefix.
"""
hi = np.empty_like(bin_X, dtype=np.intp)
hi.fill(hash_size)
lo = np.zeros_like(bin_X, dtype=np.intp)
res = np.empty_like(bin_X, dtype=np.intp)
left_idx, right_idx = _find_matching_indices(tree, bin_X,
left_masks[hi],
right_masks[hi])
found = right_idx > left_idx
res[found] = lo[found] = hash_size
r = np.arange(bin_X.shape[0])
kept = r[lo < hi] # indices remaining in bin_X mask
while kept.shape[0]:
mid = (lo.take(kept) + hi.take(kept)) // 2
left_idx, right_idx = _find_matching_indices(tree,
bin_X.take(kept),
left_masks[mid],
right_masks[mid])
found = right_idx > left_idx
mid_found = mid[found]
lo[kept[found]] = mid_found + 1
res[kept[found]] = mid_found
hi[kept[~found]] = mid[~found]
kept = r[lo < hi]
return res
class ProjectionToHashMixin(object):
"""Turn a transformed real-valued array into a hash"""
@staticmethod
def _to_hash(projected):
if projected.shape[1] % 8 != 0:
raise ValueError('Require reduced dimensionality to be a multiple '
'of 8 for hashing')
# XXX: perhaps non-copying operation better
out = np.packbits((projected > 0).astype(int)).view(dtype=HASH_DTYPE)
return out.reshape(projected.shape[0], -1)
def fit_transform(self, X, y=None):
self.fit(X)
return self.transform(X)
def transform(self, X, y=None):
return self._to_hash(super(ProjectionToHashMixin, self).transform(X))
class GaussianRandomProjectionHash(ProjectionToHashMixin,
GaussianRandomProjection):
"""Use GaussianRandomProjection to produce a cosine LSH fingerprint"""
def __init__(self,
n_components=8,
random_state=None):
super(GaussianRandomProjectionHash, self).__init__(
n_components=n_components,
random_state=random_state)
def _array_of_arrays(list_of_arrays):
"""Creates an array of array from list of arrays."""
out = np.empty(len(list_of_arrays), dtype=object)
out[:] = list_of_arrays
return out
class LSHForest(BaseEstimator, KNeighborsMixin, RadiusNeighborsMixin):
"""Performs approximate nearest neighbor search using LSH forest.
LSH Forest: Locality Sensitive Hashing forest [1] is an alternative
method for vanilla approximate nearest neighbor search methods.
LSH forest data structure has been implemented using sorted
arrays and binary search and 32 bit fixed-length hashes.
Random projection is used as the hash family which approximates
cosine distance.
The cosine distance is defined as ``1 - cosine_similarity``: the lowest
value is 0 (identical point) but it is bounded above by 2 for the farthest
points. Its value does not depend on the norm of the vector points but
only on their relative angles.
Read more in the :ref:`User Guide <approximate_nearest_neighbors>`.
Parameters
----------
n_estimators : int (default = 10)
Number of trees in the LSH Forest.
min_hash_match : int (default = 4)
lowest hash length to be searched when candidate selection is
performed for nearest neighbors.
n_candidates : int (default = 10)
Minimum number of candidates evaluated per estimator, assuming enough
items meet the `min_hash_match` constraint.
n_neighbors : int (default = 5)
Number of neighbors to be returned from query function when
it is not provided to the :meth:`kneighbors` method.
radius : float, optinal (default = 1.0)
Radius from the data point to its neighbors. This is the parameter
space to use by default for the :meth`radius_neighbors` queries.
radius_cutoff_ratio : float, optional (default = 0.9)
A value ranges from 0 to 1. Radius neighbors will be searched until
the ratio between total neighbors within the radius and the total
candidates becomes less than this value unless it is terminated by
hash length reaching `min_hash_match`.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Attributes
----------
hash_functions_ : list of GaussianRandomProjectionHash objects
Hash function g(p,x) for a tree is an array of 32 randomly generated
float arrays with the same dimension as the data set. This array is
stored in GaussianRandomProjectionHash object and can be obtained
from ``components_`` attribute.
trees_ : array, shape (n_estimators, n_samples)
Each tree (corresponding to a hash function) contains an array of
sorted hashed values. The array representation may change in future
versions.
original_indices_ : array, shape (n_estimators, n_samples)
Original indices of sorted hashed values in the fitted index.
References
----------
.. [1] M. Bawa, T. Condie and P. Ganesan, "LSH Forest: Self-Tuning
Indexes for Similarity Search", WWW '05 Proceedings of the
14th international conference on World Wide Web, 651-660,
2005.
Examples
--------
>>> from sklearn.neighbors import LSHForest
>>> X_train = [[5, 5, 2], [21, 5, 5], [1, 1, 1], [8, 9, 1], [6, 10, 2]]
>>> X_test = [[9, 1, 6], [3, 1, 10], [7, 10, 3]]
>>> lshf = LSHForest(random_state=42)
>>> lshf.fit(X_train) # doctest: +NORMALIZE_WHITESPACE
LSHForest(min_hash_match=4, n_candidates=50, n_estimators=10,
n_neighbors=5, radius=1.0, radius_cutoff_ratio=0.9,
random_state=42)
>>> distances, indices = lshf.kneighbors(X_test, n_neighbors=2)
>>> distances # doctest: +ELLIPSIS
array([[ 0.069..., 0.149...],
[ 0.229..., 0.481...],
[ 0.004..., 0.014...]])
>>> indices
array([[1, 2],
[2, 0],
[4, 0]])
"""
def __init__(self, n_estimators=10, radius=1.0, n_candidates=50,
n_neighbors=5, min_hash_match=4, radius_cutoff_ratio=.9,
random_state=None):
self.n_estimators = n_estimators
self.radius = radius
self.random_state = random_state
self.n_candidates = n_candidates
self.n_neighbors = n_neighbors
self.min_hash_match = min_hash_match
self.radius_cutoff_ratio = radius_cutoff_ratio
def _compute_distances(self, query, candidates):
"""Computes the cosine distance.
Distance is from the query to points in the candidates array.
Returns argsort of distances in the candidates
array and sorted distances.
"""
if candidates.shape == (0,):
# needed since _fit_X[np.array([])] doesn't work if _fit_X sparse
return np.empty(0, dtype=np.int), np.empty(0, dtype=float)
if sparse.issparse(self._fit_X):
candidate_X = self._fit_X[candidates]
else:
candidate_X = self._fit_X.take(candidates, axis=0, mode='clip')
distances = pairwise_distances(query, candidate_X,
metric='cosine')[0]
distance_positions = np.argsort(distances)
distances = distances.take(distance_positions, mode='clip', axis=0)
return distance_positions, distances
def _generate_masks(self):
"""Creates left and right masks for all hash lengths."""
tri_size = MAX_HASH_SIZE + 1
# Called once on fitting, output is independent of hashes
left_mask = np.tril(np.ones((tri_size, tri_size), dtype=int))[:, 1:]
right_mask = left_mask[::-1, ::-1]
self._left_mask = np.packbits(left_mask).view(dtype=HASH_DTYPE)
self._right_mask = np.packbits(right_mask).view(dtype=HASH_DTYPE)
def _get_candidates(self, query, max_depth, bin_queries, n_neighbors):
"""Performs the Synchronous ascending phase.
Returns an array of candidates, their distance ranks and
distances.
"""
index_size = self._fit_X.shape[0]
# Number of candidates considered including duplicates
# XXX: not sure whether this is being calculated correctly wrt
# duplicates from different iterations through a single tree
n_candidates = 0
candidate_set = set()
min_candidates = self.n_candidates * self.n_estimators
while (max_depth > self.min_hash_match and
(n_candidates < min_candidates or
len(candidate_set) < n_neighbors)):
left_mask = self._left_mask[max_depth]
right_mask = self._right_mask[max_depth]
for i in range(self.n_estimators):
start, stop = _find_matching_indices(self.trees_[i],
bin_queries[i],
left_mask, right_mask)
n_candidates += stop - start
candidate_set.update(
self.original_indices_[i][start:stop].tolist())
max_depth -= 1
candidates = np.fromiter(candidate_set, count=len(candidate_set),
dtype=np.intp)
# For insufficient candidates, candidates are filled.
# Candidates are filled from unselected indices uniformly.
if candidates.shape[0] < n_neighbors:
warnings.warn(
"Number of candidates is not sufficient to retrieve"
" %i neighbors with"
" min_hash_match = %i. Candidates are filled up"
" uniformly from unselected"
" indices." % (n_neighbors, self.min_hash_match))
remaining = np.setdiff1d(np.arange(0, index_size), candidates)
to_fill = n_neighbors - candidates.shape[0]
candidates = np.concatenate((candidates, remaining[:to_fill]))
ranks, distances = self._compute_distances(query,
candidates.astype(int))
return (candidates[ranks[:n_neighbors]],
distances[:n_neighbors])
def _get_radius_neighbors(self, query, max_depth, bin_queries, radius):
"""Finds radius neighbors from the candidates obtained.
Their distances from query are smaller than radius.
Returns radius neighbors and distances.
"""
ratio_within_radius = 1
threshold = 1 - self.radius_cutoff_ratio
total_candidates = np.array([], dtype=int)
total_neighbors = np.array([], dtype=int)
total_distances = np.array([], dtype=float)
while (max_depth > self.min_hash_match and
ratio_within_radius > threshold):
left_mask = self._left_mask[max_depth]
right_mask = self._right_mask[max_depth]
candidates = []
for i in range(self.n_estimators):
start, stop = _find_matching_indices(self.trees_[i],
bin_queries[i],
left_mask, right_mask)
candidates.extend(
self.original_indices_[i][start:stop].tolist())
candidates = np.setdiff1d(candidates, total_candidates)
total_candidates = np.append(total_candidates, candidates)
ranks, distances = self._compute_distances(query, candidates)
m = np.searchsorted(distances, radius, side='right')
positions = np.searchsorted(total_distances, distances[:m])
total_neighbors = np.insert(total_neighbors, positions,
candidates[ranks[:m]])
total_distances = np.insert(total_distances, positions,
distances[:m])
ratio_within_radius = (total_neighbors.shape[0] /
float(total_candidates.shape[0]))
max_depth = max_depth - 1
return total_neighbors, total_distances
def fit(self, X, y=None):
"""Fit the LSH forest on the data.
This creates binary hashes of input data points by getting the
dot product of input points and hash_function then
transforming the projection into a binary string array based
on the sign (positive/negative) of the projection.
A sorted array of binary hashes is created.
Parameters
----------
X : array_like or sparse (CSR) matrix, shape (n_samples, n_features)
List of n_features-dimensional data points. Each row
corresponds to a single data point.
Returns
-------
self : object
Returns self.
"""
self._fit_X = check_array(X, accept_sparse='csr')
# Creates a g(p,x) for each tree
self.hash_functions_ = []
self.trees_ = []
self.original_indices_ = []
rng = check_random_state(self.random_state)
int_max = np.iinfo(np.int32).max
for i in range(self.n_estimators):
# This is g(p,x) for a particular tree.
# Builds a single tree. Hashing is done on an array of data points.
# `GaussianRandomProjection` is used for hashing.
# `n_components=hash size and n_features=n_dim.
hasher = GaussianRandomProjectionHash(MAX_HASH_SIZE,
rng.randint(0, int_max))
hashes = hasher.fit_transform(self._fit_X)[:, 0]
original_index = np.argsort(hashes)
bin_hashes = hashes[original_index]
self.original_indices_.append(original_index)
self.trees_.append(bin_hashes)
self.hash_functions_.append(hasher)
self._generate_masks()
return self
def _query(self, X):
"""Performs descending phase to find maximum depth."""
# Calculate hashes of shape (n_samples, n_estimators, [hash_size])
bin_queries = np.asarray([hasher.transform(X)[:, 0]
for hasher in self.hash_functions_])
bin_queries = np.rollaxis(bin_queries, 1)
# descend phase
depths = [_find_longest_prefix_match(tree, tree_queries, MAX_HASH_SIZE,
self._left_mask, self._right_mask)
for tree, tree_queries in zip(self.trees_,
np.rollaxis(bin_queries, 1))]
return bin_queries, np.max(depths, axis=0)
def kneighbors(self, X, n_neighbors=None, return_distance=True):
"""Returns n_neighbors of approximate nearest neighbors.
Parameters
----------
X : array_like or sparse (CSR) matrix, shape (n_samples, n_features)
List of n_features-dimensional data points. Each row
corresponds to a single query.
n_neighbors : int, opitonal (default = None)
Number of neighbors required. If not provided, this will
return the number specified at the initialization.
return_distance : boolean, optional (default = False)
Returns the distances of neighbors if set to True.
Returns
-------
dist : array, shape (n_samples, n_neighbors)
Array representing the cosine distances to each point,
only present if return_distance=True.
ind : array, shape (n_samples, n_neighbors)
Indices of the approximate nearest points in the population
matrix.
"""
if not hasattr(self, 'hash_functions_'):
raise ValueError("estimator should be fitted.")
if n_neighbors is None:
n_neighbors = self.n_neighbors
X = check_array(X, accept_sparse='csr')
neighbors, distances = [], []
bin_queries, max_depth = self._query(X)
for i in range(X.shape[0]):
neighs, dists = self._get_candidates(X[[i]], max_depth[i],
bin_queries[i],
n_neighbors)
neighbors.append(neighs)
distances.append(dists)
if return_distance:
return np.array(distances), np.array(neighbors)
else:
return np.array(neighbors)
def radius_neighbors(self, X, radius=None, return_distance=True):
"""Finds the neighbors within a given radius of a point or points.
Return the indices and distances of some points from the dataset
lying in a ball with size ``radius`` around the points of the query
array. Points lying on the boundary are included in the results.
The result points are *not* necessarily sorted by distance to their
query point.
LSH Forest being an approximate method, some true neighbors from the
indexed dataset might be missing from the results.
Parameters
----------
X : array_like or sparse (CSR) matrix, shape (n_samples, n_features)
List of n_features-dimensional data points. Each row
corresponds to a single query.
radius : float
Limiting distance of neighbors to return.
(default is the value passed to the constructor).
return_distance : boolean, optional (default = False)
Returns the distances of neighbors if set to True.
Returns
-------
dist : array, shape (n_samples,) of arrays
Each element is an array representing the cosine distances
to some points found within ``radius`` of the respective query.
Only present if ``return_distance=True``.
ind : array, shape (n_samples,) of arrays
Each element is an array of indices for neighbors within ``radius``
of the respective query.
"""
if not hasattr(self, 'hash_functions_'):
raise ValueError("estimator should be fitted.")
if radius is None:
radius = self.radius
X = check_array(X, accept_sparse='csr')
neighbors, distances = [], []
bin_queries, max_depth = self._query(X)
for i in range(X.shape[0]):
neighs, dists = self._get_radius_neighbors(X[[i]], max_depth[i],
bin_queries[i], radius)
neighbors.append(neighs)
distances.append(dists)
if return_distance:
return _array_of_arrays(distances), _array_of_arrays(neighbors)
else:
return _array_of_arrays(neighbors)
def partial_fit(self, X, y=None):
"""
Inserts new data into the already fitted LSH Forest.
Cost is proportional to new total size, so additions
should be batched.
Parameters
----------
X : array_like or sparse (CSR) matrix, shape (n_samples, n_features)
New data point to be inserted into the LSH Forest.
"""
X = check_array(X, accept_sparse='csr')
if not hasattr(self, 'hash_functions_'):
return self.fit(X)
if X.shape[1] != self._fit_X.shape[1]:
raise ValueError("Number of features in X and"
" fitted array does not match.")
n_samples = X.shape[0]
n_indexed = self._fit_X.shape[0]
for i in range(self.n_estimators):
bin_X = self.hash_functions_[i].transform(X)[:, 0]
# gets the position to be added in the tree.
positions = self.trees_[i].searchsorted(bin_X)
# adds the hashed value into the tree.
self.trees_[i] = np.insert(self.trees_[i],
positions, bin_X)
# add the entry into the original_indices_.
self.original_indices_[i] = np.insert(self.original_indices_[i],
positions,
np.arange(n_indexed,
n_indexed +
n_samples))
# adds the entry into the input_array.
if sparse.issparse(X) or sparse.issparse(self._fit_X):
self._fit_X = sparse.vstack((self._fit_X, X))
else:
self._fit_X = np.row_stack((self._fit_X, X))
return self
| bsd-3-clause |
andaag/scikit-learn | sklearn/preprocessing/tests/test_label.py | 156 | 17626 | import numpy as np
from scipy.sparse import issparse
from scipy.sparse import coo_matrix
from scipy.sparse import csc_matrix
from scipy.sparse import csr_matrix
from scipy.sparse import dok_matrix
from scipy.sparse import lil_matrix
from sklearn.utils.multiclass import type_of_target
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import ignore_warnings
from sklearn.preprocessing.label import LabelBinarizer
from sklearn.preprocessing.label import MultiLabelBinarizer
from sklearn.preprocessing.label import LabelEncoder
from sklearn.preprocessing.label import label_binarize
from sklearn.preprocessing.label import _inverse_binarize_thresholding
from sklearn.preprocessing.label import _inverse_binarize_multiclass
from sklearn import datasets
iris = datasets.load_iris()
def toarray(a):
if hasattr(a, "toarray"):
a = a.toarray()
return a
def test_label_binarizer():
lb = LabelBinarizer()
# one-class case defaults to negative label
inp = ["pos", "pos", "pos", "pos"]
expected = np.array([[0, 0, 0, 0]]).T
got = lb.fit_transform(inp)
assert_array_equal(lb.classes_, ["pos"])
assert_array_equal(expected, got)
assert_array_equal(lb.inverse_transform(got), inp)
# two-class case
inp = ["neg", "pos", "pos", "neg"]
expected = np.array([[0, 1, 1, 0]]).T
got = lb.fit_transform(inp)
assert_array_equal(lb.classes_, ["neg", "pos"])
assert_array_equal(expected, got)
to_invert = np.array([[1, 0],
[0, 1],
[0, 1],
[1, 0]])
assert_array_equal(lb.inverse_transform(to_invert), inp)
# multi-class case
inp = ["spam", "ham", "eggs", "ham", "0"]
expected = np.array([[0, 0, 0, 1],
[0, 0, 1, 0],
[0, 1, 0, 0],
[0, 0, 1, 0],
[1, 0, 0, 0]])
got = lb.fit_transform(inp)
assert_array_equal(lb.classes_, ['0', 'eggs', 'ham', 'spam'])
assert_array_equal(expected, got)
assert_array_equal(lb.inverse_transform(got), inp)
def test_label_binarizer_unseen_labels():
lb = LabelBinarizer()
expected = np.array([[1, 0, 0],
[0, 1, 0],
[0, 0, 1]])
got = lb.fit_transform(['b', 'd', 'e'])
assert_array_equal(expected, got)
expected = np.array([[0, 0, 0],
[1, 0, 0],
[0, 0, 0],
[0, 1, 0],
[0, 0, 1],
[0, 0, 0]])
got = lb.transform(['a', 'b', 'c', 'd', 'e', 'f'])
assert_array_equal(expected, got)
def test_label_binarizer_set_label_encoding():
lb = LabelBinarizer(neg_label=-2, pos_label=0)
# two-class case with pos_label=0
inp = np.array([0, 1, 1, 0])
expected = np.array([[-2, 0, 0, -2]]).T
got = lb.fit_transform(inp)
assert_array_equal(expected, got)
assert_array_equal(lb.inverse_transform(got), inp)
lb = LabelBinarizer(neg_label=-2, pos_label=2)
# multi-class case
inp = np.array([3, 2, 1, 2, 0])
expected = np.array([[-2, -2, -2, +2],
[-2, -2, +2, -2],
[-2, +2, -2, -2],
[-2, -2, +2, -2],
[+2, -2, -2, -2]])
got = lb.fit_transform(inp)
assert_array_equal(expected, got)
assert_array_equal(lb.inverse_transform(got), inp)
@ignore_warnings
def test_label_binarizer_errors():
# Check that invalid arguments yield ValueError
one_class = np.array([0, 0, 0, 0])
lb = LabelBinarizer().fit(one_class)
multi_label = [(2, 3), (0,), (0, 2)]
assert_raises(ValueError, lb.transform, multi_label)
lb = LabelBinarizer()
assert_raises(ValueError, lb.transform, [])
assert_raises(ValueError, lb.inverse_transform, [])
assert_raises(ValueError, LabelBinarizer, neg_label=2, pos_label=1)
assert_raises(ValueError, LabelBinarizer, neg_label=2, pos_label=2)
assert_raises(ValueError, LabelBinarizer, neg_label=1, pos_label=2,
sparse_output=True)
# Fail on y_type
assert_raises(ValueError, _inverse_binarize_thresholding,
y=csr_matrix([[1, 2], [2, 1]]), output_type="foo",
classes=[1, 2], threshold=0)
# Sequence of seq type should raise ValueError
y_seq_of_seqs = [[], [1, 2], [3], [0, 1, 3], [2]]
assert_raises(ValueError, LabelBinarizer().fit_transform, y_seq_of_seqs)
# Fail on the number of classes
assert_raises(ValueError, _inverse_binarize_thresholding,
y=csr_matrix([[1, 2], [2, 1]]), output_type="foo",
classes=[1, 2, 3], threshold=0)
# Fail on the dimension of 'binary'
assert_raises(ValueError, _inverse_binarize_thresholding,
y=np.array([[1, 2, 3], [2, 1, 3]]), output_type="binary",
classes=[1, 2, 3], threshold=0)
# Fail on multioutput data
assert_raises(ValueError, LabelBinarizer().fit, np.array([[1, 3], [2, 1]]))
assert_raises(ValueError, label_binarize, np.array([[1, 3], [2, 1]]),
[1, 2, 3])
def test_label_encoder():
# Test LabelEncoder's transform and inverse_transform methods
le = LabelEncoder()
le.fit([1, 1, 4, 5, -1, 0])
assert_array_equal(le.classes_, [-1, 0, 1, 4, 5])
assert_array_equal(le.transform([0, 1, 4, 4, 5, -1, -1]),
[1, 2, 3, 3, 4, 0, 0])
assert_array_equal(le.inverse_transform([1, 2, 3, 3, 4, 0, 0]),
[0, 1, 4, 4, 5, -1, -1])
assert_raises(ValueError, le.transform, [0, 6])
def test_label_encoder_fit_transform():
# Test fit_transform
le = LabelEncoder()
ret = le.fit_transform([1, 1, 4, 5, -1, 0])
assert_array_equal(ret, [2, 2, 3, 4, 0, 1])
le = LabelEncoder()
ret = le.fit_transform(["paris", "paris", "tokyo", "amsterdam"])
assert_array_equal(ret, [1, 1, 2, 0])
def test_label_encoder_errors():
# Check that invalid arguments yield ValueError
le = LabelEncoder()
assert_raises(ValueError, le.transform, [])
assert_raises(ValueError, le.inverse_transform, [])
# Fail on unseen labels
le = LabelEncoder()
le.fit([1, 2, 3, 1, -1])
assert_raises(ValueError, le.inverse_transform, [-1])
def test_sparse_output_multilabel_binarizer():
# test input as iterable of iterables
inputs = [
lambda: [(2, 3), (1,), (1, 2)],
lambda: (set([2, 3]), set([1]), set([1, 2])),
lambda: iter([iter((2, 3)), iter((1,)), set([1, 2])]),
]
indicator_mat = np.array([[0, 1, 1],
[1, 0, 0],
[1, 1, 0]])
inverse = inputs[0]()
for sparse_output in [True, False]:
for inp in inputs:
# With fit_tranform
mlb = MultiLabelBinarizer(sparse_output=sparse_output)
got = mlb.fit_transform(inp())
assert_equal(issparse(got), sparse_output)
if sparse_output:
got = got.toarray()
assert_array_equal(indicator_mat, got)
assert_array_equal([1, 2, 3], mlb.classes_)
assert_equal(mlb.inverse_transform(got), inverse)
# With fit
mlb = MultiLabelBinarizer(sparse_output=sparse_output)
got = mlb.fit(inp()).transform(inp())
assert_equal(issparse(got), sparse_output)
if sparse_output:
got = got.toarray()
assert_array_equal(indicator_mat, got)
assert_array_equal([1, 2, 3], mlb.classes_)
assert_equal(mlb.inverse_transform(got), inverse)
assert_raises(ValueError, mlb.inverse_transform,
csr_matrix(np.array([[0, 1, 1],
[2, 0, 0],
[1, 1, 0]])))
def test_multilabel_binarizer():
# test input as iterable of iterables
inputs = [
lambda: [(2, 3), (1,), (1, 2)],
lambda: (set([2, 3]), set([1]), set([1, 2])),
lambda: iter([iter((2, 3)), iter((1,)), set([1, 2])]),
]
indicator_mat = np.array([[0, 1, 1],
[1, 0, 0],
[1, 1, 0]])
inverse = inputs[0]()
for inp in inputs:
# With fit_tranform
mlb = MultiLabelBinarizer()
got = mlb.fit_transform(inp())
assert_array_equal(indicator_mat, got)
assert_array_equal([1, 2, 3], mlb.classes_)
assert_equal(mlb.inverse_transform(got), inverse)
# With fit
mlb = MultiLabelBinarizer()
got = mlb.fit(inp()).transform(inp())
assert_array_equal(indicator_mat, got)
assert_array_equal([1, 2, 3], mlb.classes_)
assert_equal(mlb.inverse_transform(got), inverse)
def test_multilabel_binarizer_empty_sample():
mlb = MultiLabelBinarizer()
y = [[1, 2], [1], []]
Y = np.array([[1, 1],
[1, 0],
[0, 0]])
assert_array_equal(mlb.fit_transform(y), Y)
def test_multilabel_binarizer_unknown_class():
mlb = MultiLabelBinarizer()
y = [[1, 2]]
assert_raises(KeyError, mlb.fit(y).transform, [[0]])
mlb = MultiLabelBinarizer(classes=[1, 2])
assert_raises(KeyError, mlb.fit_transform, [[0]])
def test_multilabel_binarizer_given_classes():
inp = [(2, 3), (1,), (1, 2)]
indicator_mat = np.array([[0, 1, 1],
[1, 0, 0],
[1, 0, 1]])
# fit_transform()
mlb = MultiLabelBinarizer(classes=[1, 3, 2])
assert_array_equal(mlb.fit_transform(inp), indicator_mat)
assert_array_equal(mlb.classes_, [1, 3, 2])
# fit().transform()
mlb = MultiLabelBinarizer(classes=[1, 3, 2])
assert_array_equal(mlb.fit(inp).transform(inp), indicator_mat)
assert_array_equal(mlb.classes_, [1, 3, 2])
# ensure works with extra class
mlb = MultiLabelBinarizer(classes=[4, 1, 3, 2])
assert_array_equal(mlb.fit_transform(inp),
np.hstack(([[0], [0], [0]], indicator_mat)))
assert_array_equal(mlb.classes_, [4, 1, 3, 2])
# ensure fit is no-op as iterable is not consumed
inp = iter(inp)
mlb = MultiLabelBinarizer(classes=[1, 3, 2])
assert_array_equal(mlb.fit(inp).transform(inp), indicator_mat)
def test_multilabel_binarizer_same_length_sequence():
# Ensure sequences of the same length are not interpreted as a 2-d array
inp = [[1], [0], [2]]
indicator_mat = np.array([[0, 1, 0],
[1, 0, 0],
[0, 0, 1]])
# fit_transform()
mlb = MultiLabelBinarizer()
assert_array_equal(mlb.fit_transform(inp), indicator_mat)
assert_array_equal(mlb.inverse_transform(indicator_mat), inp)
# fit().transform()
mlb = MultiLabelBinarizer()
assert_array_equal(mlb.fit(inp).transform(inp), indicator_mat)
assert_array_equal(mlb.inverse_transform(indicator_mat), inp)
def test_multilabel_binarizer_non_integer_labels():
tuple_classes = np.empty(3, dtype=object)
tuple_classes[:] = [(1,), (2,), (3,)]
inputs = [
([('2', '3'), ('1',), ('1', '2')], ['1', '2', '3']),
([('b', 'c'), ('a',), ('a', 'b')], ['a', 'b', 'c']),
([((2,), (3,)), ((1,),), ((1,), (2,))], tuple_classes),
]
indicator_mat = np.array([[0, 1, 1],
[1, 0, 0],
[1, 1, 0]])
for inp, classes in inputs:
# fit_transform()
mlb = MultiLabelBinarizer()
assert_array_equal(mlb.fit_transform(inp), indicator_mat)
assert_array_equal(mlb.classes_, classes)
assert_array_equal(mlb.inverse_transform(indicator_mat), inp)
# fit().transform()
mlb = MultiLabelBinarizer()
assert_array_equal(mlb.fit(inp).transform(inp), indicator_mat)
assert_array_equal(mlb.classes_, classes)
assert_array_equal(mlb.inverse_transform(indicator_mat), inp)
mlb = MultiLabelBinarizer()
assert_raises(TypeError, mlb.fit_transform, [({}), ({}, {'a': 'b'})])
def test_multilabel_binarizer_non_unique():
inp = [(1, 1, 1, 0)]
indicator_mat = np.array([[1, 1]])
mlb = MultiLabelBinarizer()
assert_array_equal(mlb.fit_transform(inp), indicator_mat)
def test_multilabel_binarizer_inverse_validation():
inp = [(1, 1, 1, 0)]
mlb = MultiLabelBinarizer()
mlb.fit_transform(inp)
# Not binary
assert_raises(ValueError, mlb.inverse_transform, np.array([[1, 3]]))
# The following binary cases are fine, however
mlb.inverse_transform(np.array([[0, 0]]))
mlb.inverse_transform(np.array([[1, 1]]))
mlb.inverse_transform(np.array([[1, 0]]))
# Wrong shape
assert_raises(ValueError, mlb.inverse_transform, np.array([[1]]))
assert_raises(ValueError, mlb.inverse_transform, np.array([[1, 1, 1]]))
def test_label_binarize_with_class_order():
out = label_binarize([1, 6], classes=[1, 2, 4, 6])
expected = np.array([[1, 0, 0, 0], [0, 0, 0, 1]])
assert_array_equal(out, expected)
# Modified class order
out = label_binarize([1, 6], classes=[1, 6, 4, 2])
expected = np.array([[1, 0, 0, 0], [0, 1, 0, 0]])
assert_array_equal(out, expected)
out = label_binarize([0, 1, 2, 3], classes=[3, 2, 0, 1])
expected = np.array([[0, 0, 1, 0],
[0, 0, 0, 1],
[0, 1, 0, 0],
[1, 0, 0, 0]])
assert_array_equal(out, expected)
def check_binarized_results(y, classes, pos_label, neg_label, expected):
for sparse_output in [True, False]:
if ((pos_label == 0 or neg_label != 0) and sparse_output):
assert_raises(ValueError, label_binarize, y, classes,
neg_label=neg_label, pos_label=pos_label,
sparse_output=sparse_output)
continue
# check label_binarize
binarized = label_binarize(y, classes, neg_label=neg_label,
pos_label=pos_label,
sparse_output=sparse_output)
assert_array_equal(toarray(binarized), expected)
assert_equal(issparse(binarized), sparse_output)
# check inverse
y_type = type_of_target(y)
if y_type == "multiclass":
inversed = _inverse_binarize_multiclass(binarized, classes=classes)
else:
inversed = _inverse_binarize_thresholding(binarized,
output_type=y_type,
classes=classes,
threshold=((neg_label +
pos_label) /
2.))
assert_array_equal(toarray(inversed), toarray(y))
# Check label binarizer
lb = LabelBinarizer(neg_label=neg_label, pos_label=pos_label,
sparse_output=sparse_output)
binarized = lb.fit_transform(y)
assert_array_equal(toarray(binarized), expected)
assert_equal(issparse(binarized), sparse_output)
inverse_output = lb.inverse_transform(binarized)
assert_array_equal(toarray(inverse_output), toarray(y))
assert_equal(issparse(inverse_output), issparse(y))
def test_label_binarize_binary():
y = [0, 1, 0]
classes = [0, 1]
pos_label = 2
neg_label = -1
expected = np.array([[2, -1], [-1, 2], [2, -1]])[:, 1].reshape((-1, 1))
yield check_binarized_results, y, classes, pos_label, neg_label, expected
# Binary case where sparse_output = True will not result in a ValueError
y = [0, 1, 0]
classes = [0, 1]
pos_label = 3
neg_label = 0
expected = np.array([[3, 0], [0, 3], [3, 0]])[:, 1].reshape((-1, 1))
yield check_binarized_results, y, classes, pos_label, neg_label, expected
def test_label_binarize_multiclass():
y = [0, 1, 2]
classes = [0, 1, 2]
pos_label = 2
neg_label = 0
expected = 2 * np.eye(3)
yield check_binarized_results, y, classes, pos_label, neg_label, expected
assert_raises(ValueError, label_binarize, y, classes, neg_label=-1,
pos_label=pos_label, sparse_output=True)
def test_label_binarize_multilabel():
y_ind = np.array([[0, 1, 0], [1, 1, 1], [0, 0, 0]])
classes = [0, 1, 2]
pos_label = 2
neg_label = 0
expected = pos_label * y_ind
y_sparse = [sparse_matrix(y_ind)
for sparse_matrix in [coo_matrix, csc_matrix, csr_matrix,
dok_matrix, lil_matrix]]
for y in [y_ind] + y_sparse:
yield (check_binarized_results, y, classes, pos_label, neg_label,
expected)
assert_raises(ValueError, label_binarize, y, classes, neg_label=-1,
pos_label=pos_label, sparse_output=True)
def test_invalid_input_label_binarize():
assert_raises(ValueError, label_binarize, [0, 2], classes=[0, 2],
pos_label=0, neg_label=1)
def test_inverse_binarize_multiclass():
got = _inverse_binarize_multiclass(csr_matrix([[0, 1, 0],
[-1, 0, -1],
[0, 0, 0]]),
np.arange(3))
assert_array_equal(got, np.array([1, 1, 0]))
| bsd-3-clause |
jstoxrocky/statsmodels | statsmodels/sandbox/distributions/mv_measures.py | 33 | 6257 | '''using multivariate dependence and divergence measures
The standard correlation coefficient measures only linear dependence between
random variables.
kendall's tau measures any monotonic relationship also non-linear.
mutual information measures any kind of dependence, but does not distinguish
between positive and negative relationship
mutualinfo_kde and mutualinfo_binning follow Khan et al. 2007
Shiraj Khan, Sharba Bandyopadhyay, Auroop R. Ganguly, Sunil Saigal,
David J. Erickson, III, Vladimir Protopopescu, and George Ostrouchov,
Relative performance of mutual information estimation methods for
quantifying the dependence among short and noisy data,
Phys. Rev. E 76, 026209 (2007)
http://pre.aps.org/abstract/PRE/v76/i2/e026209
'''
import numpy as np
from scipy import stats
from scipy.stats import gaussian_kde
import statsmodels.sandbox.infotheo as infotheo
def mutualinfo_kde(y, x, normed=True):
'''mutual information of two random variables estimated with kde
'''
nobs = len(x)
if not len(y) == nobs:
raise ValueError('both data arrays need to have the same size')
x = np.asarray(x, float)
y = np.asarray(y, float)
yx = np.vstack((y,x))
kde_x = gaussian_kde(x)(x)
kde_y = gaussian_kde(y)(y)
kde_yx = gaussian_kde(yx)(yx)
mi_obs = np.log(kde_yx) - np.log(kde_x) - np.log(kde_y)
mi = mi_obs.sum() / nobs
if normed:
mi_normed = np.sqrt(1. - np.exp(-2 * mi))
return mi_normed
else:
return mi
def mutualinfo_kde_2sample(y, x, normed=True):
'''mutual information of two random variables estimated with kde
'''
nobs = len(x)
x = np.asarray(x, float)
y = np.asarray(y, float)
#yx = np.vstack((y,x))
kde_x = gaussian_kde(x.T)(x.T)
kde_y = gaussian_kde(y.T)(x.T)
#kde_yx = gaussian_kde(yx)(yx)
mi_obs = np.log(kde_x) - np.log(kde_y)
if len(mi_obs) != nobs:
raise ValueError("Wrong number of observations")
mi = mi_obs.mean()
if normed:
mi_normed = np.sqrt(1. - np.exp(-2 * mi))
return mi_normed
else:
return mi
def mutualinfo_binned(y, x, bins, normed=True):
'''mutual information of two random variables estimated with kde
Notes
-----
bins='auto' selects the number of bins so that approximately 5 observations
are expected to be in each bin under the assumption of independence. This
follows roughly the description in Kahn et al. 2007
'''
nobs = len(x)
if not len(y) == nobs:
raise ValueError('both data arrays need to have the same size')
x = np.asarray(x, float)
y = np.asarray(y, float)
#yx = np.vstack((y,x))
## fyx, binsy, binsx = np.histogram2d(y, x, bins=bins)
## fx, binsx_ = np.histogram(x, bins=binsx)
## fy, binsy_ = np.histogram(y, bins=binsy)
if bins == 'auto':
ys = np.sort(y)
xs = np.sort(x)
#quantiles = np.array([0,0.25, 0.4, 0.6, 0.75, 1])
qbin_sqr = np.sqrt(5./nobs)
quantiles = np.linspace(0, 1, 1./qbin_sqr)
quantile_index = ((nobs-1)*quantiles).astype(int)
#move edges so that they don't coincide with an observation
shift = 1e-6 + np.ones(quantiles.shape)
shift[0] -= 2*1e-6
binsy = ys[quantile_index] + shift
binsx = xs[quantile_index] + shift
elif np.size(bins) == 1:
binsy = bins
binsx = bins
elif (len(bins) == 2):
binsy, binsx = bins
## if np.size(bins[0]) == 1:
## binsx = bins[0]
## if np.size(bins[1]) == 1:
## binsx = bins[1]
fx, binsx = np.histogram(x, bins=binsx)
fy, binsy = np.histogram(y, bins=binsy)
fyx, binsy, binsx = np.histogram2d(y, x, bins=(binsy, binsx))
pyx = fyx * 1. / nobs
px = fx * 1. / nobs
py = fy * 1. / nobs
mi_obs = pyx * (np.log(pyx+1e-10) - np.log(py)[:,None] - np.log(px))
mi = mi_obs.sum()
if normed:
mi_normed = np.sqrt(1. - np.exp(-2 * mi))
return mi_normed, (pyx, py, px, binsy, binsx), mi_obs
else:
return mi
if __name__ == '__main__':
import statsmodels.api as sm
funtype = ['linear', 'quadratic'][1]
nobs = 200
sig = 2#5.
#x = np.linspace(-3, 3, nobs) + np.random.randn(nobs)
x = np.sort(3*np.random.randn(nobs))
exog = sm.add_constant(x, prepend=True)
#y = 0 + np.log(1+x**2) + sig * np.random.randn(nobs)
if funtype == 'quadratic':
y = 0 + x**2 + sig * np.random.randn(nobs)
if funtype == 'linear':
y = 0 + x + sig * np.random.randn(nobs)
print('correlation')
print(np.corrcoef(y,x)[0, 1])
print('pearsonr', stats.pearsonr(y,x))
print('spearmanr', stats.spearmanr(y,x))
print('kendalltau', stats.kendalltau(y,x))
pxy, binsx, binsy = np.histogram2d(x,y, bins=5)
px, binsx_ = np.histogram(x, bins=binsx)
py, binsy_ = np.histogram(y, bins=binsy)
print('mutualinfo', infotheo.mutualinfo(px*1./nobs, py*1./nobs,
1e-15+pxy*1./nobs, logbase=np.e))
print('mutualinfo_kde normed', mutualinfo_kde(y,x))
print('mutualinfo_kde ', mutualinfo_kde(y,x, normed=False))
mi_normed, (pyx2, py2, px2, binsy2, binsx2), mi_obs = \
mutualinfo_binned(y, x, 5, normed=True)
print('mutualinfo_binned normed', mi_normed)
print('mutualinfo_binned ', mi_obs.sum())
mi_normed, (pyx2, py2, px2, binsy2, binsx2), mi_obs = \
mutualinfo_binned(y, x, 'auto', normed=True)
print('auto')
print('mutualinfo_binned normed', mi_normed)
print('mutualinfo_binned ', mi_obs.sum())
ys = np.sort(y)
xs = np.sort(x)
by = ys[((nobs-1)*np.array([0, 0.25, 0.4, 0.6, 0.75, 1])).astype(int)]
bx = xs[((nobs-1)*np.array([0, 0.25, 0.4, 0.6, 0.75, 1])).astype(int)]
mi_normed, (pyx2, py2, px2, binsy2, binsx2), mi_obs = \
mutualinfo_binned(y, x, (by,bx), normed=True)
print('quantiles')
print('mutualinfo_binned normed', mi_normed)
print('mutualinfo_binned ', mi_obs.sum())
doplot = 1#False
if doplot:
import matplotlib.pyplot as plt
plt.plot(x, y, 'o')
olsres = sm.OLS(y, exog).fit()
plt.plot(x, olsres.fittedvalues)
| bsd-3-clause |
heplesser/nest-simulator | pynest/examples/structural_plasticity.py | 8 | 13940 | # -*- coding: utf-8 -*-
#
# structural_plasticity.py
#
# This file is part of NEST.
#
# Copyright (C) 2004 The NEST Initiative
#
# NEST is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# NEST is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with NEST. If not, see <http://www.gnu.org/licenses/>.
"""
Structural Plasticity example
-----------------------------
This example shows a simple network of two populations where structural
plasticity is used. The network has 1000 neurons, 80% excitatory and
20% inhibitory. The simulation starts without any connectivity. A set of
homeostatic rules are defined, according to which structural plasticity will
create and delete synapses dynamically during the simulation until a desired
level of electrical activity is reached. The model of structural plasticity
used here corresponds to the formulation presented in [1]_.
At the end of the simulation, a plot of the evolution of the connectivity
in the network and the average calcium concentration in the neurons is created.
References
~~~~~~~~~~
.. [1] Butz, M., and van Ooyen, A. (2013). A simple rule for dendritic spine and axonal bouton formation can
account for cortical reorganization after focal retinal lesions. PLoS Comput. Biol. 9 (10), e1003259.
"""
####################################################################################
# First, we have import all necessary modules.
import nest
import numpy
import matplotlib.pyplot as plt
import sys
####################################################################################
# We define general simulation parameters
class StructralPlasticityExample:
def __init__(self):
# simulated time (ms)
self.t_sim = 200000.0
# simulation step (ms).
self.dt = 0.1
self.number_excitatory_neurons = 800
self.number_inhibitory_neurons = 200
# Structural_plasticity properties
self.update_interval = 10000.0
self.record_interval = 1000.0
# rate of background Poisson input
self.bg_rate = 10000.0
self.neuron_model = 'iaf_psc_exp'
####################################################################################
# In this implementation of structural plasticity, neurons grow
# connection points called synaptic elements. Synapses can be created
# between compatible synaptic elements. The growth of these elements is
# guided by homeostatic rules, defined as growth curves.
# Here we specify the growth curves for synaptic elements of excitatory
# and inhibitory neurons.
# Excitatory synaptic elements of excitatory neurons
self.growth_curve_e_e = {
'growth_curve': "gaussian",
'growth_rate': 0.0001, # (elements/ms)
'continuous': False,
'eta': 0.0, # Ca2+
'eps': 0.05, # Ca2+
}
# Inhibitory synaptic elements of excitatory neurons
self.growth_curve_e_i = {
'growth_curve': "gaussian",
'growth_rate': 0.0001, # (elements/ms)
'continuous': False,
'eta': 0.0, # Ca2+
'eps': self.growth_curve_e_e['eps'], # Ca2+
}
# Excitatory synaptic elements of inhibitory neurons
self.growth_curve_i_e = {
'growth_curve': "gaussian",
'growth_rate': 0.0004, # (elements/ms)
'continuous': False,
'eta': 0.0, # Ca2+
'eps': 0.2, # Ca2+
}
# Inhibitory synaptic elements of inhibitory neurons
self.growth_curve_i_i = {
'growth_curve': "gaussian",
'growth_rate': 0.0001, # (elements/ms)
'continuous': False,
'eta': 0.0, # Ca2+
'eps': self.growth_curve_i_e['eps'] # Ca2+
}
# Now we specify the neuron model.
self.model_params = {'tau_m': 10.0, # membrane time constant (ms)
# excitatory synaptic time constant (ms)
'tau_syn_ex': 0.5,
# inhibitory synaptic time constant (ms)
'tau_syn_in': 0.5,
't_ref': 2.0, # absolute refractory period (ms)
'E_L': -65.0, # resting membrane potential (mV)
'V_th': -50.0, # spike threshold (mV)
'C_m': 250.0, # membrane capacitance (pF)
'V_reset': -65.0 # reset potential (mV)
}
self.nodes_e = None
self.nodes_i = None
self.mean_ca_e = []
self.mean_ca_i = []
self.total_connections_e = []
self.total_connections_i = []
####################################################################################
# We initialize variables for the postsynaptic currents of the
# excitatory, inhibitory, and external synapses. These values were
# calculated from a PSP amplitude of 1 for excitatory synapses,
# -1 for inhibitory synapses and 0.11 for external synapses.
self.psc_e = 585.0
self.psc_i = -585.0
self.psc_ext = 6.2
def prepare_simulation(self):
nest.ResetKernel()
nest.set_verbosity('M_ERROR')
####################################################################################
# We set global kernel parameters. Here we define the resolution
# for the simulation, which is also the time resolution for the update
# of the synaptic elements.
nest.SetKernelStatus(
{
'resolution': self.dt
}
)
####################################################################################
# Set Structural Plasticity synaptic update interval which is how often
# the connectivity will be updated inside the network. It is important
# to notice that synaptic elements and connections change on different
# time scales.
nest.SetKernelStatus({
'structural_plasticity_update_interval': self.update_interval,
})
####################################################################################
# Now we define Structural Plasticity synapses. In this example we create
# two synapse models, one for excitatory and one for inhibitory synapses.
# Then we define that excitatory synapses can only be created between a
# pre-synaptic element called `Axon_ex` and a postsynaptic element
# called `Den_ex`. In a similar manner, synaptic elements for inhibitory
# synapses are defined.
nest.CopyModel('static_synapse', 'synapse_ex')
nest.SetDefaults('synapse_ex', {'weight': self.psc_e, 'delay': 1.0})
nest.CopyModel('static_synapse', 'synapse_in')
nest.SetDefaults('synapse_in', {'weight': self.psc_i, 'delay': 1.0})
nest.SetKernelStatus({
'structural_plasticity_synapses': {
'synapse_ex': {
'synapse_model': 'synapse_ex',
'post_synaptic_element': 'Den_ex',
'pre_synaptic_element': 'Axon_ex',
},
'synapse_in': {
'synapse_model': 'synapse_in',
'post_synaptic_element': 'Den_in',
'pre_synaptic_element': 'Axon_in',
},
}
})
def create_nodes(self):
"""
Assign growth curves to synaptic elements
"""
synaptic_elements = {
'Den_ex': self.growth_curve_e_e,
'Den_in': self.growth_curve_e_i,
'Axon_ex': self.growth_curve_e_e,
}
synaptic_elements_i = {
'Den_ex': self.growth_curve_i_e,
'Den_in': self.growth_curve_i_i,
'Axon_in': self.growth_curve_i_i,
}
####################################################################################
# Then it is time to create a population with 80% of the total network
# size excitatory neurons and another one with 20% of the total network
# size of inhibitory neurons.
self.nodes_e = nest.Create('iaf_psc_alpha',
self.number_excitatory_neurons,
{'synaptic_elements': synaptic_elements})
self.nodes_i = nest.Create('iaf_psc_alpha',
self.number_inhibitory_neurons,
{'synaptic_elements': synaptic_elements_i})
self.nodes_e.synaptic_elements = synaptic_elements
self.nodes_i.synaptic_elements = synaptic_elements_i
def connect_external_input(self):
"""
We create and connect the Poisson generator for external input
"""
noise = nest.Create('poisson_generator')
noise.rate = self.bg_rate
nest.Connect(noise, self.nodes_e, 'all_to_all',
{'weight': self.psc_ext, 'delay': 1.0})
nest.Connect(noise, self.nodes_i, 'all_to_all',
{'weight': self.psc_ext, 'delay': 1.0})
####################################################################################
# In order to save the amount of average calcium concentration in each
# population through time we create the function ``record_ca``. Here we use
# the value of `Ca` for every neuron in the network and then
# store the average.
def record_ca(self):
ca_e = self.nodes_e.Ca, # Calcium concentration
self.mean_ca_e.append(numpy.mean(ca_e))
ca_i = self.nodes_i.Ca, # Calcium concentration
self.mean_ca_i.append(numpy.mean(ca_i))
####################################################################################
# In order to save the state of the connectivity in the network through time
# we create the function ``record_connectivity``. Here we retrieve the number
# of connected pre-synaptic elements of each neuron. The total amount of
# excitatory connections is equal to the total amount of connected excitatory
# pre-synaptic elements. The same applies for inhibitory connections.
def record_connectivity(self):
syn_elems_e = self.nodes_e.synaptic_elements
syn_elems_i = self.nodes_i.synaptic_elements
self.total_connections_e.append(sum(neuron['Axon_ex']['z_connected']
for neuron in syn_elems_e))
self.total_connections_i.append(sum(neuron['Axon_in']['z_connected']
for neuron in syn_elems_i))
####################################################################################
# We define a function to plot the recorded values
# at the end of the simulation.
def plot_data(self):
fig, ax1 = plt.subplots()
ax1.axhline(self.growth_curve_e_e['eps'],
linewidth=4.0, color='#9999FF')
ax1.plot(self.mean_ca_e, 'b',
label='Ca Concentration Excitatory Neurons', linewidth=2.0)
ax1.axhline(self.growth_curve_i_e['eps'],
linewidth=4.0, color='#FF9999')
ax1.plot(self.mean_ca_i, 'r',
label='Ca Concentration Inhibitory Neurons', linewidth=2.0)
ax1.set_ylim([0, 0.275])
ax1.set_xlabel("Time in [s]")
ax1.set_ylabel("Ca concentration")
ax2 = ax1.twinx()
ax2.plot(self.total_connections_e, 'm',
label='Excitatory connections', linewidth=2.0, linestyle='--')
ax2.plot(self.total_connections_i, 'k',
label='Inhibitory connections', linewidth=2.0, linestyle='--')
ax2.set_ylim([0, 2500])
ax2.set_ylabel("Connections")
ax1.legend(loc=1)
ax2.legend(loc=4)
plt.savefig('StructuralPlasticityExample.eps', format='eps')
####################################################################################
# It is time to specify how we want to perform the simulation. In this
# function we first enable structural plasticity in the network and then we
# simulate in steps. On each step we record the calcium concentration and the
# connectivity. At the end of the simulation, the plot of connections and
# calcium concentration through time is generated.
def simulate(self):
if nest.NumProcesses() > 1:
sys.exit("For simplicity, this example only works " +
"for a single process.")
nest.EnableStructuralPlasticity()
print("Starting simulation")
sim_steps = numpy.arange(0, self.t_sim, self.record_interval)
for i, step in enumerate(sim_steps):
nest.Simulate(self.record_interval)
self.record_ca()
self.record_connectivity()
if i % 20 == 0:
print("Progress: " + str(i / 2) + "%")
print("Simulation finished successfully")
####################################################################################
# Finally we take all the functions that we have defined and create the sequence
# for our example. We prepare the simulation, create the nodes for the network,
# connect the external input and then simulate. Please note that as we are
# simulating 200 biological seconds in this example, it will take a few minutes
# to complete.
if __name__ == '__main__':
example = StructralPlasticityExample()
# Prepare simulation
example.prepare_simulation()
example.create_nodes()
example.connect_external_input()
# Start simulation
example.simulate()
example.plot_data()
| gpl-2.0 |
neuroidss/neon | neon/diagnostics/visualize_rnn.py | 9 | 6325 | # ----------------------------------------------------------------------------
# Copyright 2014 Nervana Systems Inc.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ----------------------------------------------------------------------------
"""
Visualization for recurrent neural networks
"""
import numpy as np
from neon.util.compat import range
class VisualizeRNN(object):
"""
Visualzing weight matrices during training
"""
def __init__(self):
import matplotlib
matplotlib.use('Agg')
from matplotlib import pyplot as plt # noqa
matplotlib.rcParams['pdf.fonttype'] = 42 # ensure true type font
self.plt = matplotlib.pyplot
self.plt.interactive(1)
def plot_weights(self, weights_in, weights_rec, weights_out):
"""
Visizualize the three weight matrices after every epoch. Serves to
check that weights are structured, not exploding, and get upated
"""
self.plt.figure(2)
self.plt.clf()
self.plt.subplot(1, 3, 1)
self.plt.imshow(weights_in.T, vmin=-1, vmax=1, interpolation='nearest')
self.plt.title('input.T')
self.plt.subplot(1, 3, 2)
self.plt.imshow(weights_rec, vmin=-1, vmax=1, interpolation='nearest')
self.plt.title('recurrent')
self.plt.subplot(1, 3, 3)
self.plt.imshow(weights_out, vmin=-1, vmax=1, interpolation='nearest')
self.plt.title('output')
self.plt.colorbar()
self.plt.draw()
self.plt.show()
def plot_lstm_wts(self, lstm_layer, scale=1, fig=4):
"""
Visizualize the three weight matrices after every epoch. Serves to
check that weights are structured, not exploding, and get upated
"""
self.plt.figure(fig)
self.plt.clf()
pltidx = 1
for lbl, wts in zip(lstm_layer.param_names, lstm_layer.params[:4]):
self.plt.subplot(2, 4, pltidx)
self.plt.imshow(wts.asnumpyarray().T, vmin=-scale, vmax=scale,
interpolation='nearest')
self.plt.title(lbl + ' Wx.T')
pltidx += 1
for lbl, wts, bs in zip(lstm_layer.param_names,
lstm_layer.params[4:8],
lstm_layer.params[8:12]):
self.plt.subplot(2, 4, pltidx)
self.plt.imshow(np.hstack((wts.asnumpyarray(),
bs.asnumpyarray(),
bs.asnumpyarray())).T,
vmin=-scale, vmax=scale, interpolation='nearest')
self.plt.title(lbl + ' Wh.T')
pltidx += 1
self.plt.draw()
self.plt.show()
def plot_lstm_acts(self, lstm_layer, scale=1, fig=4):
acts_lbl = ['i_t', 'f_t', 'o_t', 'g_t', 'net_i', 'c_t', 'c_t', 'c_phi']
acts_stp = [0, 0, 0, 1, 0, 0, 1, 1]
self.plt.figure(fig)
self.plt.clf()
for idx, lbl in enumerate(acts_lbl):
act_tsr = getattr(lstm_layer, lbl)[acts_stp[idx]]
self.plt.subplot(2, 4, idx+1)
self.plt.imshow(act_tsr.asnumpyarray().T,
vmin=-scale, vmax=scale, interpolation='nearest')
self.plt.title(lbl + '[' + str(acts_stp[idx]) + '].T')
self.plt.draw()
self.plt.show()
def plot_error(self, suberror_list, error_list):
self.plt.figure(1)
self.plt.clf()
self.plt.plot(np.arange(len(suberror_list)) /
np.float(len(suberror_list)) *
len(error_list), suberror_list)
self.plt.plot(error_list, linewidth=2)
self.plt.ylim((min(suberror_list), max(error_list)))
self.plt.draw()
self.plt.show()
def plot_activations(self, pre1, out1, pre2, out2, targets):
"""
Loop over tau unrolling steps, at each time step show the pre-acts
and outputs of the recurrent layer and output layer. Note that the
pre-acts are actually the g', so if the activation is linear it will
be one.
"""
self.plt.figure(3)
self.plt.clf()
for i in range(len(pre1)): # loop over unrolling
self.plt.subplot(len(pre1), 5, 5 * i + 1)
self.plt.imshow(pre1[i].asnumpyarray(), vmin=-1, vmax=1,
interpolation='nearest')
if i == 0:
self.plt.title('pre1 or g\'1')
self.plt.subplot(len(pre1), 5, 5 * i + 2)
self.plt.imshow(out1[i].asnumpyarray(), vmin=-1, vmax=1,
interpolation='nearest')
if i == 0:
self.plt.title('out1')
self.plt.subplot(len(pre1), 5, 5 * i + 3)
self.plt.imshow(pre2[i].asnumpyarray(), vmin=-1, vmax=1,
interpolation='nearest')
if i == 0:
self.plt.title('pre2 or g\'2')
self.plt.subplot(len(pre1), 5, 5 * i + 4)
self.plt.imshow(out2[i].asnumpyarray(), vmin=-1, vmax=1,
interpolation='nearest')
if i == 0:
self.plt.title('out2')
self.plt.subplot(len(pre1), 5, 5 * i + 5)
self.plt.imshow(targets[i].asnumpyarray(),
vmin=-1, vmax=1, interpolation='nearest')
if i == 0:
self.plt.title('target')
self.plt.draw()
self.plt.show()
def print_text(self, inputs, outputs):
"""
Moved this here so it's legal to use numpy.
"""
print("Prediction inputs")
print(np.argmax(inputs, 0).asnumpyarray().astype(np.int8).view('c'))
print("Prediction outputs")
print(np.argmax(outputs, 0).asnumpyarray().astype(np.int8).view('c'))
| apache-2.0 |
SamHames/scikit-image | doc/ext/plot2rst.py | 1 | 18498 | """
Example generation from python files.
Generate the rst files for the examples by iterating over the python
example files. Files that generate images should start with 'plot'.
To generate your own examples, add this extension to the list of
``extensions``in your Sphinx configuration file. In addition, make sure the
example directory(ies) in `plot2rst_paths` (see below) points to a directory
with examples named `plot_*.py` and include an `index.rst` file.
This code was adapted from scikit-image, which took it from scikit-learn.
Options
-------
The ``plot2rst`` extension accepts the following options:
plot2rst_paths : length-2 tuple, or list of tuples
Tuple or list of tuples of paths to (python plot, generated rst) files,
i.e. (source, destination). Note that both paths are relative to Sphinx
'source' directory. Defaults to ('../examples', 'auto_examples')
plot2rst_rcparams : dict
Matplotlib configuration parameters. See
http://matplotlib.sourceforge.net/users/customizing.html for details.
plot2rst_default_thumb : str
Path (relative to doc root) of default thumbnail image.
plot2rst_thumb_shape : float
Shape of thumbnail in pixels. The image is resized to fit within this shape
and the excess is filled with white pixels. This fixed size ensures that
that gallery images are displayed in a grid.
plot2rst_plot_tag : str
When this tag is found in the example file, the current plot is saved and
tag is replaced with plot path. Defaults to 'PLOT2RST.current_figure'.
Suggested CSS definitions
-------------------------
div.body h2 {
border-bottom: 1px solid #BBB;
clear: left;
}
/*---- example gallery ----*/
.gallery.figure {
float: left;
margin: 1em;
}
.gallery.figure img{
display: block;
margin-left: auto;
margin-right: auto;
width: 200px;
}
.gallery.figure .caption {
width: 200px;
text-align: center !important;
}
"""
import os
import shutil
import token
import tokenize
import traceback
import numpy as np
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
from skimage import io
from skimage import transform
from skimage.util.dtype import dtype_range
from notebook import Notebook
from docutils.core import publish_parts
LITERALINCLUDE = """
.. literalinclude:: {src_name}
:lines: {code_start}-
"""
CODE_LINK = """
**Python source code:** :download:`download <{0}>`
(generated using ``skimage`` |version|)
"""
NOTEBOOK_LINK = """
**IPython Notebook:** :download:`download <{0}>`
(generated using ``skimage`` |version|)
"""
TOCTREE_TEMPLATE = """
.. toctree::
:hidden:
%s
"""
IMAGE_TEMPLATE = """
.. image:: images/%s
:align: center
"""
GALLERY_IMAGE_TEMPLATE = """
.. figure:: %(thumb)s
:figclass: gallery
:target: ./%(source)s.html
:ref:`example_%(link_name)s`
"""
class Path(str):
"""Path object for manipulating directory and file paths."""
def __init__(self, path):
super(Path, self).__init__(path)
@property
def isdir(self):
return os.path.isdir(self)
@property
def exists(self):
"""Return True if path exists"""
return os.path.exists(self)
def pjoin(self, *args):
"""Join paths. `p` prefix prevents confusion with string method."""
return self.__class__(os.path.join(self, *args))
def psplit(self):
"""Split paths. `p` prefix prevents confusion with string method."""
return [self.__class__(p) for p in os.path.split(self)]
def makedirs(self):
if not self.exists:
os.makedirs(self)
def listdir(self):
return os.listdir(self)
def format(self, *args, **kwargs):
return self.__class__(super(Path, self).format(*args, **kwargs))
def __add__(self, other):
return self.__class__(super(Path, self).__add__(other))
def __iadd__(self, other):
return self.__add__(other)
def setup(app):
app.connect('builder-inited', generate_example_galleries)
app.add_config_value('plot2rst_paths',
('../examples', 'auto_examples'), True)
app.add_config_value('plot2rst_rcparams', {}, True)
app.add_config_value('plot2rst_default_thumb', None, True)
app.add_config_value('plot2rst_thumb_shape', (250, 300), True)
app.add_config_value('plot2rst_plot_tag', 'PLOT2RST.current_figure', True)
app.add_config_value('plot2rst_index_name', 'index', True)
def generate_example_galleries(app):
cfg = app.builder.config
doc_src = Path(os.path.abspath(app.builder.srcdir)) # path/to/doc/source
if isinstance(cfg.plot2rst_paths, tuple):
cfg.plot2rst_paths = [cfg.plot2rst_paths]
for src_dest in cfg.plot2rst_paths:
plot_path, rst_path = [Path(p) for p in src_dest]
example_dir = doc_src.pjoin(plot_path)
rst_dir = doc_src.pjoin(rst_path)
generate_examples_and_gallery(example_dir, rst_dir, cfg)
def generate_examples_and_gallery(example_dir, rst_dir, cfg):
"""Generate rst from examples and create gallery to showcase examples."""
if not example_dir.exists:
print("No example directory found at", example_dir)
return
rst_dir.makedirs()
# we create an index.rst with all examples
gallery_index = file(rst_dir.pjoin('index'+cfg.source_suffix), 'w')
# Here we don't use an os.walk, but we recurse only twice: flat is
# better than nested.
write_gallery(gallery_index, example_dir, rst_dir, cfg)
for d in sorted(example_dir.listdir()):
example_sub = example_dir.pjoin(d)
if example_sub.isdir:
rst_sub = rst_dir.pjoin(d)
rst_sub.makedirs()
write_gallery(gallery_index, example_sub, rst_sub, cfg, depth=1)
gallery_index.flush()
def write_gallery(gallery_index, src_dir, rst_dir, cfg, depth=0):
"""Generate the rst files for an example directory, i.e. gallery.
Write rst files from python examples and add example links to gallery.
Parameters
----------
gallery_index : file
Index file for plot gallery.
src_dir : 'str'
Source directory for python examples.
rst_dir : 'str'
Destination directory for rst files generated from python examples.
cfg : config object
Sphinx config object created by Sphinx.
"""
index_name = cfg.plot2rst_index_name + cfg.source_suffix
gallery_template = src_dir.pjoin(index_name)
if not os.path.exists(gallery_template):
print(src_dir)
print(80*'_')
print('Example directory %s does not have a %s file'
% (src_dir, index_name))
print('Skipping this directory')
print(80*'_')
return
gallery_description = file(gallery_template).read()
gallery_index.write('\n\n%s\n\n' % gallery_description)
rst_dir.makedirs()
examples = [fname for fname in sorted(src_dir.listdir(), key=_plots_first)
if fname.endswith('py')]
ex_names = [ex[:-3] for ex in examples] # strip '.py' extension
if depth == 0:
sub_dir = Path('')
else:
sub_dir_list = src_dir.psplit()[-depth:]
sub_dir = Path('/'.join(sub_dir_list) + '/')
gallery_index.write(TOCTREE_TEMPLATE % (sub_dir + '\n '.join(ex_names)))
for src_name in examples:
try:
write_example(src_name, src_dir, rst_dir, cfg)
except Exception:
print("Exception raised while running:")
print("%s in %s" % (src_name, src_dir))
print('~' * 60)
traceback.print_exc()
print('~' * 60)
continue
link_name = sub_dir.pjoin(src_name)
link_name = link_name.replace(os.path.sep, '_')
if link_name.startswith('._'):
link_name = link_name[2:]
info = {}
info['thumb'] = sub_dir.pjoin('images/thumb', src_name[:-3] + '.png')
info['source'] = sub_dir + src_name[:-3]
info['link_name'] = link_name
gallery_index.write(GALLERY_IMAGE_TEMPLATE % info)
def _plots_first(fname):
"""Decorate filename so that examples with plots are displayed first."""
if not (fname.startswith('plot') and fname.endswith('.py')):
return 'zz' + fname
return fname
def write_example(src_name, src_dir, rst_dir, cfg):
"""Write rst file from a given python example.
Parameters
----------
src_name : str
Name of example file.
src_dir : 'str'
Source directory for python examples.
rst_dir : 'str'
Destination directory for rst files generated from python examples.
cfg : config object
Sphinx config object created by Sphinx.
"""
last_dir = src_dir.psplit()[-1]
# to avoid leading . in file names, and wrong names in links
if last_dir == '.' or last_dir == 'examples':
last_dir = Path('')
else:
last_dir += '_'
src_path = src_dir.pjoin(src_name)
example_file = rst_dir.pjoin(src_name)
shutil.copyfile(src_path, example_file)
image_dir = rst_dir.pjoin('images')
thumb_dir = image_dir.pjoin('thumb')
notebook_dir = rst_dir.pjoin('notebook')
image_dir.makedirs()
thumb_dir.makedirs()
notebook_dir.makedirs()
base_image_name = os.path.splitext(src_name)[0]
image_path = image_dir.pjoin(base_image_name + '_{0}.png')
basename, py_ext = os.path.splitext(src_name)
rst_path = rst_dir.pjoin(basename + cfg.source_suffix)
notebook_path = notebook_dir.pjoin(basename + '.ipynb')
if _plots_are_current(src_path, image_path) and rst_path.exists and \
notebook_path.exists:
return
blocks = split_code_and_text_blocks(example_file)
if blocks[0][2].startswith('#!'):
blocks.pop(0) # don't add shebang line to rst file.
rst_link = '.. _example_%s:\n\n' % (last_dir + src_name)
figure_list, rst = process_blocks(blocks, src_path, image_path, cfg)
has_inline_plots = any(cfg.plot2rst_plot_tag in b[2] for b in blocks)
if has_inline_plots:
example_rst = ''.join([rst_link, rst])
else:
# print first block of text, display all plots, then display code.
first_text_block = [b for b in blocks if b[0] == 'text'][0]
label, (start, end), content = first_text_block
figure_list = save_all_figures(image_path)
rst_blocks = [IMAGE_TEMPLATE % f.lstrip('/') for f in figure_list]
example_rst = rst_link
example_rst += eval(content)
example_rst += ''.join(rst_blocks)
code_info = dict(src_name=src_name, code_start=end)
example_rst += LITERALINCLUDE.format(**code_info)
example_rst += CODE_LINK.format(src_name)
ipnotebook_name = src_name.replace('.py', '.ipynb')
ipnotebook_name = './notebook/' + ipnotebook_name
example_rst += NOTEBOOK_LINK.format(ipnotebook_name)
f = open(rst_path, 'w')
f.write(example_rst)
f.flush()
thumb_path = thumb_dir.pjoin(src_name[:-3] + '.png')
first_image_file = image_dir.pjoin(figure_list[0].lstrip('/'))
if first_image_file.exists:
first_image = io.imread(first_image_file)
save_thumbnail(first_image, thumb_path, cfg.plot2rst_thumb_shape)
if not thumb_path.exists:
if cfg.plot2rst_default_thumb is None:
print("WARNING: No plots found and default thumbnail not defined.")
print("Specify 'plot2rst_default_thumb' in Sphinx config file.")
else:
shutil.copy(cfg.plot2rst_default_thumb, thumb_path)
# Export example to IPython notebook
nb = Notebook()
for (cell_type, _, content) in blocks:
content = content.rstrip('\n')
if cell_type == 'code':
nb.add_cell(content, cell_type='code')
else:
content = content.replace('"""', '')
content = '\n'.join([line for line in content.split('\n') if
not line.startswith('.. image')])
html = publish_parts(content, writer_name='html')['html_body']
nb.add_cell(html, cell_type='markdown')
with open(notebook_path, 'w') as f:
f.write(nb.json())
def save_thumbnail(image, thumb_path, shape):
"""Save image as a thumbnail with the specified shape.
The image is first resized to fit within the specified shape and then
centered in an array of the specified shape before saving.
"""
rescale = min(float(w_1) / w_2 for w_1, w_2 in zip(shape, image.shape))
small_shape = (rescale * np.asarray(image.shape[:2])).astype(int)
small_image = transform.resize(image, small_shape)
if len(image.shape) == 3:
shape = shape + (image.shape[2],)
background_value = dtype_range[small_image.dtype.type][1]
thumb = background_value * np.ones(shape, dtype=small_image.dtype)
i = (shape[0] - small_shape[0]) // 2
j = (shape[1] - small_shape[1]) // 2
thumb[i:i+small_shape[0], j:j+small_shape[1]] = small_image
io.imsave(thumb_path, thumb)
def _plots_are_current(src_path, image_path):
first_image_file = Path(image_path.format(1))
needs_replot = (not first_image_file.exists or
_mod_time(first_image_file) <= _mod_time(src_path))
return not needs_replot
def _mod_time(file_path):
return os.stat(file_path).st_mtime
def split_code_and_text_blocks(source_file):
"""Return list with source file separated into code and text blocks.
Returns
-------
blocks : list of (label, (start, end+1), content)
List where each element is a tuple with the label ('text' or 'code'),
the (start, end+1) line numbers, and content string of block.
"""
block_edges, idx_first_text_block = get_block_edges(source_file)
with open(source_file) as f:
source_lines = f.readlines()
# Every other block should be a text block
idx_text_block = np.arange(idx_first_text_block, len(block_edges), 2)
blocks = []
slice_ranges = zip(block_edges[:-1], block_edges[1:])
for i, (start, end) in enumerate(slice_ranges):
block_label = 'text' if i in idx_text_block else 'code'
# subtract 1 from indices b/c line numbers start at 1, not 0
content = ''.join(source_lines[start-1:end-1])
blocks.append((block_label, (start, end), content))
return blocks
def get_block_edges(source_file):
"""Return starting line numbers of code and text blocks
Returns
-------
block_edges : list of int
Line number for the start of each block. Note the
idx_first_text_block : {0 | 1}
0 if first block is text then, else 1 (second block better be text).
"""
block_edges = []
with open(source_file) as f:
token_iter = tokenize.generate_tokens(f.readline)
for token_tuple in token_iter:
t_id, t_str, (srow, scol), (erow, ecol), src_line = token_tuple
if (token.tok_name[t_id] == 'STRING' and scol == 0):
# Add one point to line after text (for later slicing)
block_edges.extend((srow, erow+1))
idx_first_text_block = 0
# when example doesn't start with text block.
if not block_edges[0] == 1:
block_edges.insert(0, 1)
idx_first_text_block = 1
# when example doesn't end with text block.
if not block_edges[-1] == erow: # iffy: I'm using end state of loop
block_edges.append(erow)
return block_edges, idx_first_text_block
def process_blocks(blocks, src_path, image_path, cfg):
"""Run source, save plots as images, and convert blocks to rst.
Parameters
----------
blocks : list of block tuples
Code and text blocks from example. See `split_code_and_text_blocks`.
src_path : str
Path to example file.
image_path : str
Path where plots are saved (format string which accepts figure number).
cfg : config object
Sphinx config object created by Sphinx.
Returns
-------
figure_list : list
List of figure names saved by the example.
rst_text : str
Text with code wrapped code-block directives.
"""
src_dir, src_name = src_path.psplit()
if not src_name.startswith('plot'):
return [], ''
# index of blocks which have inline plots
inline_tag = cfg.plot2rst_plot_tag
idx_inline_plot = [i for i, b in enumerate(blocks)
if inline_tag in b[2]]
image_dir, image_fmt_str = image_path.psplit()
figure_list = []
plt.rcdefaults()
plt.rcParams.update(cfg.plot2rst_rcparams)
plt.close('all')
example_globals = {}
rst_blocks = []
fig_num = 1
for i, (blabel, brange, bcontent) in enumerate(blocks):
if blabel == 'code':
exec(bcontent, example_globals)
rst_blocks.append(codestr2rst(bcontent))
else:
if i in idx_inline_plot:
plt.savefig(image_path.format(fig_num))
figure_name = image_fmt_str.format(fig_num)
fig_num += 1
figure_list.append(figure_name)
figure_link = os.path.join('images', figure_name)
bcontent = bcontent.replace(inline_tag, figure_link)
rst_blocks.append(docstr2rst(bcontent))
return figure_list, '\n'.join(rst_blocks)
def codestr2rst(codestr):
"""Return reStructuredText code block from code string"""
code_directive = ".. code-block:: python\n\n"
indented_block = '\t' + codestr.replace('\n', '\n\t')
return code_directive + indented_block
def docstr2rst(docstr):
"""Return reStructuredText from docstring"""
idx_whitespace = len(docstr.rstrip()) - len(docstr)
whitespace = docstr[idx_whitespace:]
return eval(docstr) + whitespace
def save_all_figures(image_path):
"""Save all matplotlib figures.
Parameters
----------
image_path : str
Path where plots are saved (format string which accepts figure number).
"""
figure_list = []
image_dir, image_fmt_str = image_path.psplit()
fig_mngr = matplotlib._pylab_helpers.Gcf.get_all_fig_managers()
for fig_num in (m.num for m in fig_mngr):
# Set the fig_num figure as the current figure as we can't
# save a figure that's not the current figure.
plt.figure(fig_num)
plt.savefig(image_path.format(fig_num))
figure_list.append(image_fmt_str.format(fig_num))
return figure_list
| bsd-3-clause |
h2educ/scikit-learn | sklearn/gaussian_process/tests/test_gaussian_process.py | 267 | 6813 | """
Testing for Gaussian Process module (sklearn.gaussian_process)
"""
# Author: Vincent Dubourg <vincent.dubourg@gmail.com>
# Licence: BSD 3 clause
from nose.tools import raises
from nose.tools import assert_true
import numpy as np
from sklearn.gaussian_process import GaussianProcess
from sklearn.gaussian_process import regression_models as regression
from sklearn.gaussian_process import correlation_models as correlation
from sklearn.datasets import make_regression
from sklearn.utils.testing import assert_greater
f = lambda x: x * np.sin(x)
X = np.atleast_2d([1., 3., 5., 6., 7., 8.]).T
X2 = np.atleast_2d([2., 4., 5.5, 6.5, 7.5]).T
y = f(X).ravel()
def test_1d(regr=regression.constant, corr=correlation.squared_exponential,
random_start=10, beta0=None):
# MLE estimation of a one-dimensional Gaussian Process model.
# Check random start optimization.
# Test the interpolating property.
gp = GaussianProcess(regr=regr, corr=corr, beta0=beta0,
theta0=1e-2, thetaL=1e-4, thetaU=1e-1,
random_start=random_start, verbose=False).fit(X, y)
y_pred, MSE = gp.predict(X, eval_MSE=True)
y2_pred, MSE2 = gp.predict(X2, eval_MSE=True)
assert_true(np.allclose(y_pred, y) and np.allclose(MSE, 0.)
and np.allclose(MSE2, 0., atol=10))
def test_2d(regr=regression.constant, corr=correlation.squared_exponential,
random_start=10, beta0=None):
# MLE estimation of a two-dimensional Gaussian Process model accounting for
# anisotropy. Check random start optimization.
# Test the interpolating property.
b, kappa, e = 5., .5, .1
g = lambda x: b - x[:, 1] - kappa * (x[:, 0] - e) ** 2.
X = np.array([[-4.61611719, -6.00099547],
[4.10469096, 5.32782448],
[0.00000000, -0.50000000],
[-6.17289014, -4.6984743],
[1.3109306, -6.93271427],
[-5.03823144, 3.10584743],
[-2.87600388, 6.74310541],
[5.21301203, 4.26386883]])
y = g(X).ravel()
thetaL = [1e-4] * 2
thetaU = [1e-1] * 2
gp = GaussianProcess(regr=regr, corr=corr, beta0=beta0,
theta0=[1e-2] * 2, thetaL=thetaL,
thetaU=thetaU,
random_start=random_start, verbose=False)
gp.fit(X, y)
y_pred, MSE = gp.predict(X, eval_MSE=True)
assert_true(np.allclose(y_pred, y) and np.allclose(MSE, 0.))
eps = np.finfo(gp.theta_.dtype).eps
assert_true(np.all(gp.theta_ >= thetaL - eps)) # Lower bounds of hyperparameters
assert_true(np.all(gp.theta_ <= thetaU + eps)) # Upper bounds of hyperparameters
def test_2d_2d(regr=regression.constant, corr=correlation.squared_exponential,
random_start=10, beta0=None):
# MLE estimation of a two-dimensional Gaussian Process model accounting for
# anisotropy. Check random start optimization.
# Test the GP interpolation for 2D output
b, kappa, e = 5., .5, .1
g = lambda x: b - x[:, 1] - kappa * (x[:, 0] - e) ** 2.
f = lambda x: np.vstack((g(x), g(x))).T
X = np.array([[-4.61611719, -6.00099547],
[4.10469096, 5.32782448],
[0.00000000, -0.50000000],
[-6.17289014, -4.6984743],
[1.3109306, -6.93271427],
[-5.03823144, 3.10584743],
[-2.87600388, 6.74310541],
[5.21301203, 4.26386883]])
y = f(X)
gp = GaussianProcess(regr=regr, corr=corr, beta0=beta0,
theta0=[1e-2] * 2, thetaL=[1e-4] * 2,
thetaU=[1e-1] * 2,
random_start=random_start, verbose=False)
gp.fit(X, y)
y_pred, MSE = gp.predict(X, eval_MSE=True)
assert_true(np.allclose(y_pred, y) and np.allclose(MSE, 0.))
@raises(ValueError)
def test_wrong_number_of_outputs():
gp = GaussianProcess()
gp.fit([[1, 2, 3], [4, 5, 6]], [1, 2, 3])
def test_more_builtin_correlation_models(random_start=1):
# Repeat test_1d and test_2d for several built-in correlation
# models specified as strings.
all_corr = ['absolute_exponential', 'squared_exponential', 'cubic',
'linear']
for corr in all_corr:
test_1d(regr='constant', corr=corr, random_start=random_start)
test_2d(regr='constant', corr=corr, random_start=random_start)
test_2d_2d(regr='constant', corr=corr, random_start=random_start)
def test_ordinary_kriging():
# Repeat test_1d and test_2d with given regression weights (beta0) for
# different regression models (Ordinary Kriging).
test_1d(regr='linear', beta0=[0., 0.5])
test_1d(regr='quadratic', beta0=[0., 0.5, 0.5])
test_2d(regr='linear', beta0=[0., 0.5, 0.5])
test_2d(regr='quadratic', beta0=[0., 0.5, 0.5, 0.5, 0.5, 0.5])
test_2d_2d(regr='linear', beta0=[0., 0.5, 0.5])
test_2d_2d(regr='quadratic', beta0=[0., 0.5, 0.5, 0.5, 0.5, 0.5])
def test_no_normalize():
gp = GaussianProcess(normalize=False).fit(X, y)
y_pred = gp.predict(X)
assert_true(np.allclose(y_pred, y))
def test_random_starts():
# Test that an increasing number of random-starts of GP fitting only
# increases the reduced likelihood function of the optimal theta.
n_samples, n_features = 50, 3
np.random.seed(0)
rng = np.random.RandomState(0)
X = rng.randn(n_samples, n_features) * 2 - 1
y = np.sin(X).sum(axis=1) + np.sin(3 * X).sum(axis=1)
best_likelihood = -np.inf
for random_start in range(1, 5):
gp = GaussianProcess(regr="constant", corr="squared_exponential",
theta0=[1e-0] * n_features,
thetaL=[1e-4] * n_features,
thetaU=[1e+1] * n_features,
random_start=random_start, random_state=0,
verbose=False).fit(X, y)
rlf = gp.reduced_likelihood_function()[0]
assert_greater(rlf, best_likelihood - np.finfo(np.float32).eps)
best_likelihood = rlf
def test_mse_solving():
# test the MSE estimate to be sane.
# non-regression test for ignoring off-diagonals of feature covariance,
# testing with nugget that renders covariance useless, only
# using the mean function, with low effective rank of data
gp = GaussianProcess(corr='absolute_exponential', theta0=1e-4,
thetaL=1e-12, thetaU=1e-2, nugget=1e-2,
optimizer='Welch', regr="linear", random_state=0)
X, y = make_regression(n_informative=3, n_features=60, noise=50,
random_state=0, effective_rank=1)
gp.fit(X, y)
assert_greater(1000, gp.predict(X, eval_MSE=True)[1].mean())
| bsd-3-clause |
yonglehou/scikit-learn | examples/manifold/plot_swissroll.py | 330 | 1446 | """
===================================
Swiss Roll reduction with LLE
===================================
An illustration of Swiss Roll reduction
with locally linear embedding
"""
# Author: Fabian Pedregosa -- <fabian.pedregosa@inria.fr>
# License: BSD 3 clause (C) INRIA 2011
print(__doc__)
import matplotlib.pyplot as plt
# This import is needed to modify the way figure behaves
from mpl_toolkits.mplot3d import Axes3D
Axes3D
#----------------------------------------------------------------------
# Locally linear embedding of the swiss roll
from sklearn import manifold, datasets
X, color = datasets.samples_generator.make_swiss_roll(n_samples=1500)
print("Computing LLE embedding")
X_r, err = manifold.locally_linear_embedding(X, n_neighbors=12,
n_components=2)
print("Done. Reconstruction error: %g" % err)
#----------------------------------------------------------------------
# Plot result
fig = plt.figure()
try:
# compatibility matplotlib < 1.0
ax = fig.add_subplot(211, projection='3d')
ax.scatter(X[:, 0], X[:, 1], X[:, 2], c=color, cmap=plt.cm.Spectral)
except:
ax = fig.add_subplot(211)
ax.scatter(X[:, 0], X[:, 2], c=color, cmap=plt.cm.Spectral)
ax.set_title("Original data")
ax = fig.add_subplot(212)
ax.scatter(X_r[:, 0], X_r[:, 1], c=color, cmap=plt.cm.Spectral)
plt.axis('tight')
plt.xticks([]), plt.yticks([])
plt.title('Projected data')
plt.show()
| bsd-3-clause |
mlperf/training_results_v0.7 | Fujitsu/benchmarks/resnet/implementations/implementation_open/mxnet/example/rcnn/symdata/loader.py | 11 | 8759 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import mxnet as mx
import numpy as np
from symdata.anchor import AnchorGenerator, AnchorSampler
from symdata.image import imdecode, resize, transform, get_image, tensor_vstack
def load_test(filename, short, max_size, mean, std):
# read and transform image
im_orig = imdecode(filename)
im, im_scale = resize(im_orig, short, max_size)
height, width = im.shape[:2]
im_info = mx.nd.array([height, width, im_scale])
# transform into tensor and normalize
im_tensor = transform(im, mean, std)
# for 1-batch inference purpose, cannot use batchify (or nd.stack) to expand dims
im_tensor = mx.nd.array(im_tensor).expand_dims(0)
im_info = mx.nd.array(im_info).expand_dims(0)
# transform cv2 BRG image to RGB for matplotlib
im_orig = im_orig[:, :, (2, 1, 0)]
return im_tensor, im_info, im_orig
def generate_batch(im_tensor, im_info):
"""return batch"""
data = [im_tensor, im_info]
data_shapes = [('data', im_tensor.shape), ('im_info', im_info.shape)]
data_batch = mx.io.DataBatch(data=data, label=None, provide_data=data_shapes, provide_label=None)
return data_batch
class TestLoader(mx.io.DataIter):
def __init__(self, roidb, batch_size, short, max_size, mean, std):
super(TestLoader, self).__init__()
# save parameters as properties
self._roidb = roidb
self._batch_size = batch_size
self._short = short
self._max_size = max_size
self._mean = mean
self._std = std
# infer properties from roidb
self._size = len(self._roidb)
self._index = np.arange(self._size)
# decide data and label names (only for training)
self._data_name = ['data', 'im_info']
self._label_name = None
# status variable
self._cur = 0
self._data = None
self._label = None
# get first batch to fill in provide_data and provide_label
self.next()
self.reset()
@property
def provide_data(self):
return [(k, v.shape) for k, v in zip(self._data_name, self._data)]
@property
def provide_label(self):
return None
def reset(self):
self._cur = 0
def iter_next(self):
return self._cur + self._batch_size <= self._size
def next(self):
if self.iter_next():
data_batch = mx.io.DataBatch(data=self.getdata(), label=self.getlabel(),
pad=self.getpad(), index=self.getindex(),
provide_data=self.provide_data, provide_label=self.provide_label)
self._cur += self._batch_size
return data_batch
else:
raise StopIteration
def getdata(self):
indices = self.getindex()
im_tensor, im_info = [], []
for index in indices:
roi_rec = self._roidb[index]
b_im_tensor, b_im_info, _ = get_image(roi_rec, self._short, self._max_size, self._mean, self._std)
im_tensor.append(b_im_tensor)
im_info.append(b_im_info)
im_tensor = mx.nd.array(tensor_vstack(im_tensor, pad=0))
im_info = mx.nd.array(tensor_vstack(im_info, pad=0))
self._data = im_tensor, im_info
return self._data
def getlabel(self):
return None
def getindex(self):
cur_from = self._cur
cur_to = min(cur_from + self._batch_size, self._size)
return np.arange(cur_from, cur_to)
def getpad(self):
return max(self._cur + self.batch_size - self._size, 0)
class AnchorLoader(mx.io.DataIter):
def __init__(self, roidb, batch_size, short, max_size, mean, std,
feat_sym, anchor_generator: AnchorGenerator, anchor_sampler: AnchorSampler,
shuffle=False):
super(AnchorLoader, self).__init__()
# save parameters as properties
self._roidb = roidb
self._batch_size = batch_size
self._short = short
self._max_size = max_size
self._mean = mean
self._std = std
self._feat_sym = feat_sym
self._ag = anchor_generator
self._as = anchor_sampler
self._shuffle = shuffle
# infer properties from roidb
self._size = len(roidb)
self._index = np.arange(self._size)
# decide data and label names
self._data_name = ['data', 'im_info', 'gt_boxes']
self._label_name = ['label', 'bbox_target', 'bbox_weight']
# status variable
self._cur = 0
self._data = None
self._label = None
# get first batch to fill in provide_data and provide_label
self.next()
self.reset()
@property
def provide_data(self):
return [(k, v.shape) for k, v in zip(self._data_name, self._data)]
@property
def provide_label(self):
return [(k, v.shape) for k, v in zip(self._label_name, self._label)]
def reset(self):
self._cur = 0
if self._shuffle:
np.random.shuffle(self._index)
def iter_next(self):
return self._cur + self._batch_size <= self._size
def next(self):
if self.iter_next():
data_batch = mx.io.DataBatch(data=self.getdata(), label=self.getlabel(),
pad=self.getpad(), index=self.getindex(),
provide_data=self.provide_data, provide_label=self.provide_label)
self._cur += self._batch_size
return data_batch
else:
raise StopIteration
def getdata(self):
indices = self.getindex()
im_tensor, im_info, gt_boxes = [], [], []
for index in indices:
roi_rec = self._roidb[index]
b_im_tensor, b_im_info, b_gt_boxes = get_image(roi_rec, self._short, self._max_size, self._mean, self._std)
im_tensor.append(b_im_tensor)
im_info.append(b_im_info)
gt_boxes.append(b_gt_boxes)
im_tensor = mx.nd.array(tensor_vstack(im_tensor, pad=0))
im_info = mx.nd.array(tensor_vstack(im_info, pad=0))
gt_boxes = mx.nd.array(tensor_vstack(gt_boxes, pad=-1))
self._data = im_tensor, im_info, gt_boxes
return self._data
def getlabel(self):
im_tensor, im_info, gt_boxes = self._data
# all stacked image share same anchors
_, out_shape, _ = self._feat_sym.infer_shape(data=im_tensor.shape)
feat_height, feat_width = out_shape[0][-2:]
anchors = self._ag.generate(feat_height, feat_width)
# assign anchor according to their real size encoded in im_info
label, bbox_target, bbox_weight = [], [], []
for batch_ind in range(im_info.shape[0]):
b_im_info = im_info[batch_ind].asnumpy()
b_gt_boxes = gt_boxes[batch_ind].asnumpy()
b_im_height, b_im_width = b_im_info[:2]
b_label, b_bbox_target, b_bbox_weight = self._as.assign(anchors, b_gt_boxes, b_im_height, b_im_width)
b_label = b_label.reshape((feat_height, feat_width, -1)).transpose((2, 0, 1)).flatten()
b_bbox_target = b_bbox_target.reshape((feat_height, feat_width, -1)).transpose((2, 0, 1))
b_bbox_weight = b_bbox_weight.reshape((feat_height, feat_width, -1)).transpose((2, 0, 1))
label.append(b_label)
bbox_target.append(b_bbox_target)
bbox_weight.append(b_bbox_weight)
label = mx.nd.array(tensor_vstack(label, pad=-1))
bbox_target = mx.nd.array(tensor_vstack(bbox_target, pad=0))
bbox_weight = mx.nd.array(tensor_vstack(bbox_weight, pad=0))
self._label = label, bbox_target, bbox_weight
return self._label
def getindex(self):
cur_from = self._cur
cur_to = min(cur_from + self._batch_size, self._size)
return np.arange(cur_from, cur_to)
def getpad(self):
return max(self._cur + self.batch_size - self._size, 0)
| apache-2.0 |
BV-DR/foamBazar | pythonScripts/oldStuff/fsTemplate2D.py | 1 | 13549 | #!/usr/bin/env python
#########################################################################
# Filename: fsTemplate2D.py #
# Date: 2018-May-14 #
# Version: 1. #
# Author: Alexis Benhamou #
# Org.: Bureau Veritas, (HO, France) #
# Email: alexis.benhamou@bureauveritas.com #
#########################################################################
import re
import os, sys
import shutil
import time, datetime
import math as mt
import numpy as np
import pandas as pd
from PyFoam.RunDictionary.ParsedParameterFile import ParsedParameterFile
from fsTools import *
from subprocess import call, Popen
from scipy import interpolate as interp
from inputFiles.fvSchemes import FvSchemes
from inputFiles.fvSolution import FvSolution
from inputFiles.controlDict import ControlDict
from inputFiles.decomposeParDict import DecomposeParDict
from inputFiles.dynamicMeshDict import DynamicMeshDict
from inputFiles.waveProperties import WaveProperties, RelaxZone, WaveCondition
from inputFiles.boundaryCondition import writeAllBoundaries
from inputFiles.turbulenceProperties import writeTurbulenceProperties
from inputFiles.transportProperties import TransportProperties
from inputFiles.gravity import Gravity
def setBoundaries(param):
mycase = os.path.join(param.case,'grid_'+str(param.gridLevel))
boundfile = os.path.join(mycase,"constant","polyMesh","boundary")
boundDict = ParsedParameterFile(boundfile,boundaryDict=True)
nbound = int(len(boundDict)/2)
for i in range(nbound):
if boundDict[2*i] in ['domainX0','domainX1']:
boundDict[2*i+1]['type'] = 'empty'
elif param.symmetry and (boundDict[2*i] in ['domainY0']):
boundDict[2*i+1]['type'] = 'symmetryPlane'
boundDict.writeFile()
def copyMesh(param,overwrite=False):
mycase = os.path.join(param.case,'grid_'+str(param.gridLevel))
mymesh = os.path.join(param.meshDir,param.case,'grid_'+str(param.gridLevel))
if os.path.exists(mycase):
if overwrite:
shutil.rmtree(mycase)
else:
valid = {"yes": True, "y": True, "ye": True, "no": False, "n": False}
res = input('Case "{}" already exists, do you want to overwrite ? (y/n) '.format(mycase)).lower()
if valid.get(res,False):
shutil.rmtree(mycase)
else:
print('Exiting')
os._exit(1)
os.makedirs(mycase)
consfolder = os.path.join(mycase,"constant")
if not os.path.exists(consfolder): os.makedirs(consfolder)
if param.meshTime=='latestTime':
timeFolders = getFoamTimeFolders(mymesh)
meshTimeFolder = timeFolders[-1]
elif param.meshTime=='constant':
meshTimeFolder = 'constant'
else:
meshTimeFolder = param.meshTime
print('Copy mesh from folder '+meshTimeFolder)
shutil.copytree(mymesh+r'/'+meshTimeFolder+r'/polyMesh',mycase+r'/constant/polyMesh')
shutil.copytree(mymesh+r'/constant/triSurface',mycase+r'/constant/triSurface')
setBoundaries(param)
def create2DCase(param,runScript=True):
mycase = os.path.join(param.case,'grid_'+str(param.gridLevel))
print('Create system folder input files')
sysfolder = os.path.join(mycase,"system")
if not os.path.exists(sysfolder): os.makedirs(sysfolder)
#controlDict
if param.outputForces:
if len(param.hullPatch)>0: forcesPatch = param.hullPatch
else: forcesPatch = param.case
else: forcesPatch = None
controlDict = ControlDict( case = mycase,
startFrom = param.startTime,
endTime = param.endTime,
deltaT = param.timeStep,
writeInterval = param.writeInterval,
purgeWrite = param.purgeWrite,
writePrecision = 15,
forcesPatch = forcesPatch,
rhoWater = 1025,
OFversion = param.OFversion,
version = "foamStar" )
controlDict.writeFile()
#fvSchemes
fvSchemes = FvSchemes( case = mycase,
simType = param.scheme,
orthogonalCorrection = "implicit",
version = "foamStar" )
fvSchemes.writeFile()
#fvSolution
fvSolution = FvSolution( case = mycase,
useEuler = param.scheme=='Euler',
version = "foamStar" )
fvSolution.writeFile()
#decomposeParDict
decomposeParDict = DecomposeParDict( case = mycase,
nProcs = param.nProcs )
decomposeParDict.writeFile()
print('Create constant folder input files')
#waveProperties
filename = os.path.join(mycase,'constant','waveProperties')
waveCond = WaveCondition( waveType = param.wave )
relaxZones = []
if param.sideRelaxZone is not None:
bBox = findCFDBoundingBox(mycase,False)
if param.sideRelaxZone>0:
relaxSide = RelaxZone( "side" , relax=True, waveCondition=waveCond, origin=[0., bBox[4], 0.], orientation = [ 0. , -1. , 0.], bound=param.sideRelaxZone)
else:
relaxSide = RelaxZone( "side" , relax=True, waveCondition=waveCond, origin=[0., bBox[4], 0.], orientation = [ 0. , -1. , 0.], bound=0.5*bBox[4])
relaxZones += [relaxSide]
waveProperties = WaveProperties( filename,
initWaveCondition = waveCond,
relaxZones = relaxZones,
version = "foamStar" )
waveProperties.writeFile()
#cell.Set
if param.sideRelaxZone is not None:
filename = os.path.join(mycase,'cell.Set')
waveProperties.writeBlendingZoneBatch(filename)
#dynamicMeshDict
shutil.copyfile(param.dispSignal,os.path.join(mycase,"dispSignal.dat"))
dynamicMeshDict = DynamicMeshDict( case = mycase,
type = 'solid',
dispFile = "dispSignal.dat",
OFversion = param.OFversion,
version = "foamStar")
dynamicMeshDict.writeFile()
#g
gravity = Gravity( case = mycase, g = param.gravity )
gravity.writeFile()
#turbulenceProperties, RASProperties
writeTurbulenceProperties( mycase , "laminar" )
#transportProperties
transportProperties = TransportProperties( case = mycase,
rhoWater = 1025,
version = "foamStar")
transportProperties.writeFile()
print('Create 0 folder input files')
zerofolder = os.path.join(mycase,"0")
orgfolder = os.path.join(mycase,"0","org")
if not os.path.exists(zerofolder): os.makedirs(zerofolder)
if not os.path.exists(orgfolder): os.makedirs(orgfolder)
#alpha water, p_rgh, U, pointDisplacement
writeAllBoundaries( case = mycase,
case2D = True,
symmetryPlane = param.symmetry,
struct = '"'+param.case+'|wetSurf"',
version = "foamStar" )
#Allrun
print('Create run scripts')
arun = os.path.join(mycase,'Allrun')
with open(arun,'w') as f:
f.write('#! /bin/bash\n')
f.write('set -x\n\n')
f.write('(\n')
# f.write(' cp -r ../../snap/Grid1/constant/polyMesh/ constant/\n')
# f.write(' cp constant/org/boundary constant/polyMesh/\n')
if param.sideRelaxZone is not None:
f.write(' setSet -batch cell.Set\n')
f.write(' setsToZones -noFlipMap\n')
f.write(' cp -rf 0/org/* 0/\n')
if param.translateLength!=0.:
f.write(' transformPoints -translate "( 0. 0. {})"\n'.format(param.translateLength))
f.write(' decomposePar -cellDist\n')
f.write(' mpirun -np {:d} initWaveField -parallel\n'.format(param.nProcs))
f.write(') 2>&1 | tee log.init\n')
os.chmod(arun, 0o755)
#Allclean
aclean = os.path.join(mycase,'Allclean')
with open(aclean,'w') as f:
f.write('#! /bin/bash\n')
f.write('cd ${0%/*} || exit 1 # run from this directory\n\n')
f.write('function clean_log()\n')
f.write('{\n')
f.write(' rm -fr log.*\n')
f.write('}\n\n')
f.write('function clean_mesh()\n')
f.write('{\n')
f.write(' rm -fr background.msh VTK\n')
f.write(' rm -fr 0/{ccx,ccy,ccz,*Level,polyMesh/cellMap}*\n')
f.write(' rm -fr constant/modeshapes/{cfd,mapper,modalInfo}_body\n')
f.write(' rm -fr constant/extendedFeatureEdgeMesh/\n')
f.write(' rm -fr constant/polyMesh/{sets,*Level*,*level*,*Index*,*History*}\n')
f.write('}\n\n')
f.write('function clean_parallel_mesh()\n')
f.write('{\n')
f.write(' rm -fr processor*\n')
f.write('}\n\n')
f.write('function clean_0()\n')
f.write('{\n')
f.write(' rm -fr 0/*.gz\n')
f.write('}\n\n')
f.write('eval clean_log\n')
f.write('eval clean_mesh\n')
f.write('eval clean_parallel_mesh\n')
f.write('eval clean_0\n')
os.chmod(aclean, 0o755)
#run Allrun script
if runScript:
p = Popen(['./Allclean'], cwd=mycase)
p.wait()
p = Popen(['./Allrun'], cwd=mycase)
p.wait()
#create file for Paraview
open(os.path.join(mycase,'a.foam'), 'a').close()
#run.sh
run = os.path.join(mycase,'run.sh')
with open(run,'w') as f:
f.write('#!/bin/bash -l\n')
f.write('#SBATCH -J {}\n\n'.format(mycase))
f.write('# 5 hour wall-clock\n')
f.write('#SBATCH --account I1608251\n')
f.write('#SBATCH -t 3-00:00:00\n')
f.write('#SBATCH -n {:d}\n'.format(param.nProcs))
f.write('#SBATCH -o log.run-%j\n\n')
f.write('module load gcc/4.9.3 openmpi/1.8.4-gcc lapack/3.6.1/gcc/4.9.3\n')
f.write('export FOAM_INST_DIR=/data/I1608251/OpenFOAM;\n')
if param.OFversion==2: f.write('source /data/I1608251/OpenFOAM/OpenFOAM-2.4.x/etc/bashrc;\n')
elif param.OFversion==3: f.write('source /data/I1608251/OpenFOAM/OpenFOAM-3.0.x/etc/bashrc;\n')
elif param.OFversion==5: f.write('source /data/I1608251/OpenFOAM/OpenFOAM-5.x/etc/bashrc;\n')
f.write('export LC_ALL=C\n\n')
f.write('mpirun {} -parallel\n'.format(param.solver))
def tarCase(param):
mycase = os.path.join(param.case,'grid_'+str(param.gridLevel))
print('Creating archive {}.tar.gz'.format(mycase))
process = 'tar czf {}.tar.gz {}'.format(os.path.join(param.case,param.case+'_g'+str(param.gridLevel)),mycase)
subprocess.call(process, shell=True)
#*** Main execution start here *************************************************
#Read input file
DEFAULT_PARAM = {'case' : 'newCase',
'meshDir' : 'mesh',
'meshTime' : 'constant',
'gridLevel' : [1],
'symmetry' : False,
'outputForces' : False,
'hullPatch' : '',
'startTime' : 'latestTime',
'endTime' : 10,
'timeStep' : 0.01,
'writeInterval' : 1,
'purgeWrite' : 0,
'scheme' : 'Euler',
'nProcs' : 4,
'nOuterCorrectors' : 5,
'wave' : "noWaves",
'waveH' : 0.0,
'waveT' : 0.0,
'velocity' : 0.0,
'depth' : 100.,
'sideRelaxZone' : None,
'dispSignal' : None,
'solver' : 'foamStar',
'OFversion' : 3,
'translateLength' : 0.0,
'gravity' : 9.81
}
DEFAULT_ARGS = {'overwrite' : False,
'runScript' : True,
'tar' : False
}
class Struct:
def __init__(self, **entries):
self.__dict__.update(entries)
def fsTemplate2D(userParam={}, userArgs={}):
startTime = time.time()
param = DEFAULT_PARAM
param.update(userParam)
param = Struct(**param)
arg = DEFAULT_ARGS
arg.update(userArgs)
arg = Struct(**arg)
#mycase = os.path.join(param.case,'Grid_'+str(param.gridLevel))
copyMesh(param,overwrite=arg.overwrite)
create2DCase(param,runScript=arg.runScript)
if arg.tar: tarCase(param)
endTime = time.time()
print('Mesh generation completed in '+str(datetime.timedelta(seconds=(endTime-startTime))))
| gpl-3.0 |
deisi/SFG2D | sfg2d/widgets.py | 1 | 66147 | import os
from glob import glob
from collections import Counter
import numpy as np
from json import dump, load
from traitlets import TraitError
import matplotlib.pyplot as plt
import matplotlib.ticker as mtick
import matplotlib.gridspec as gridspec
from traitlets import validate
from ipywidgets import IntRangeSlider
from .core import SfgRecord, concatenate_list_of_SfgRecords
from sfg2d.utils.config import CONFIG
X_PIXEL_INDEX = CONFIG['X_PIXEL_INDEX']
Y_PIXEL_INDEX = CONFIG['Y_PIXEL_INDEX']
SPEC_INDEX = CONFIG['SPEC_INDEX']
FRAME_AXIS_INDEX = CONFIG['FRAME_AXIS_INDEX']
PP_INDEX = CONFIG['PP_INDEX']
PIXEL = CONFIG['PIXEL']
debug = 0
class WidgetBase():
"""A Base class for my widgets.
Uses SfgRecord object as data container.
Consists out of several ipywidgets.
Plots are rendered using matplotlib.
Define any ipwidget you need within this class, within the
*WidgetBase._init_widget* function. Default or context
dependet options of the widgets can be set during the
*WidgetBase._conf_widget_with_data* function.
The observers of the widgets are set within the *_init_observer*
function, or if it is an figure updating widget within the
*_init_figure_observers* function.
If an observer is defined, also define an unobserver in the
*_unobserver* function.
"""
def __init__(self, data=SfgRecord(), fig=None, ax=None,
central_wl=674, vis_wl=800, figsize=None):
# SfgRecord obj holding the data.
self.data = data
# 4 dim numpy array representing the baseline
# Internal objects
# Figure to draw on
self._fig = fig
# Central wavelength of the camera
# Size of the figure
self._figsize = figsize
# List of widgets that update the figure
self._figure_widgets = []
# Buffer to save autoscale values with.
self._autoscale_buffer = [None, None]
self._autoscale_buffer_2 = [None, None]
# Buffer to save x_rois upon switching data.
self._rois_x_pixel_buffer = [slice(None, None)]
# Buffer unpumped and pumped data throughout switching data files
self._unpumped_index_buffer = 0
self._pumped_index_buffer = 1
# List of widgets to display
self.children = []
# Setup all widgets
self._init_widget()
def __call__(self):
"""Use call to actually Render the widgets on the notebook."""
from IPython.display import display
self._conf_widget_with_data()
self._init_observer()
self._init_figure_observers()
self._update_figure()
display(self.children)
self.fig
def _init_widget(self):
"""Init all widgets.
Add widgets within this function. If possible you can give default
properties to the widgets already within this function. Also use
this function to combine many widgets into e.g. boxes
"""
import ipywidgets as wi
# ## Any widget that we need at some point can be added here.
# Widget to enter a folder path as string
self.wTextFolder = wi.Text(
layout=wi.Layout(width='90%'),
)
# Selection dialogue to select data from a list of files
self.wSelectFile = wi.SelectMultiple(
layout=wi.Layout(width='41%'),
)
# Checkbox to toggle the visibility of the baseline data
self.wCheckShowBase = wi.Checkbox(
description='Baseline',
value=False,
)
# Checkbox to toggel the visiblitiy of the norm
self.wCheckShowNorm = wi.Checkbox(
description='Norm',
value=False
)
# Checkbox to toggle visibility of bleach
self.wCheckShowBleach = wi.Checkbox(
description='Bleach',
value=False,
)
# Dropdown to Choose Type of Bleach
self.wDropdownBleachOpt = wi.Dropdown(
description='Opt:',
options=['rel', 'abs'],
value='rel',
)
# Dropdown to Choose property of bleach
self.wDropdownBleachProp = wi.Dropdown(
description='Prop:',
options=['rawData', 'basesubed', 'normalized'],
value='basesubed',
)
# Toggle to show Trace of Bleach
self.wCheckShowTracesBleach = wi.Checkbox(
description='Bleach',
value=False,
)
self.wCheckShowTracesRawData = wi.Checkbox(
description='Raw',
value=True,
)
self.wCheckShowTracesBasesubed = wi.Checkbox(
description='Basesubed',
value=False,
)
self.wCheckShowTracesNormalized = wi.Checkbox(
description='Normalized',
value=False,
)
# Region slice to select index for zero_time_subtraction
self.wRangeZeroTime = IntRangeSliderGap(
description="Zero Time",
value=(0, 1), continuous_update=False,
)
# Snap pixel roi.
self.wSnapXRoi = wi.Button(
description="Snap X Region"
)
# Checkbox to toggle the zero_time suntraction of bleach data
self.wCheckShowZeroTimeSubtraction = wi.Checkbox(
description='Sub Zero Time',
value=False,
)
# Slider to select the width of the smoothing kernel
self.wIntSliderSmooth = wi.IntSlider(
continuous_update=False, description="Smooth",
min=1, max=19, step=2,
)
# Slider to select smoothing of baseline
# TODO Is not used yet
self.wIntSliderSmoothBase = wi.IntSlider(
continuous_update=False, description="Smooth",
min=self.wIntSliderSmooth.min,
max=self.wIntSliderSmooth.max,
step=2, value=1,
)
# Checkbox to toggle the Autoscale functionality of matplotlib
self.wCheckAutoscale = wi.Checkbox(
description="Autoscale",
value=True,
)
# Checkbox to toggle the Autoscale functionality of matplotlib
self.wCheckAutoscaleTrace = wi.Checkbox(
description="Autoscale Trace",
value=True,
)
# Slider to select the visible y-pixel/spectra range
self.wRangeSliderPixelY = IntRangeSliderGap(
continuous_update=False, description="Spectra Region"
)
# Slider to select spectra step size.
self.wIntTextPixelYStep = wi.BoundedIntText(
description='Spectra Stepsize', value=1, min=1,
layout=wi.Layout(width='180px',),
)
self.wCheckSpectraMean = wi.Checkbox(
description='Spectra Mean',
value=False
)
self.wDropdownSpectraMode = wi.Dropdown(
description='Spectra Mode',
options=['Index', 'Region'],
value='Region',
layout=wi.Layout(width='180px',),
)
# Slider to select the overall visible x-pixel range
self.wRangeSliderPixelX = IntRangeSliderGap(
continuous_update=False, description="X Region",
max=PIXEL, value=(0, PIXEL),
)
# Slider to select the x-pixel range used within traces
self.wRangeSliderTracePixelX = IntRangeSliderGap(
continuous_update=False, description="Trace Region",
max=PIXEL, value=(int(PIXEL*0.40), int(PIXEL*0.6)),
)
# Textbox to enter central wavelength of the camera in nm
self.wTextCentralWl = wi.FloatText(
description='Central Wl', value=self.data.central_wl,
layout=wi.Layout(
width='180px',
),
)
# Dropdown menu to select x-axis calibration.
self.wDropdownCalib = wi.Dropdown(
description='x-axis', options=['pixel', 'wavelength', 'wavenumber'],
layout=self.wTextCentralWl.layout,
)
# Textbox to enter the wavelength of the upconversion photon
# in nm.
self.wTextVisWl = wi.FloatText(
description='Vis Wl', value=self.data.vis_wl,
layout=self.wTextCentralWl.layout
)
# Slider to select visible pp-delay spectrum
self.wSliderPPDelay = wi.IntSlider(
description="Delay Index", continuous_update=False,
)
self.wRangeSliderPPDelay = IntRangeSliderGap(
continuous_update=False, description="Delay Region",
)
self.wCheckDelayMedian = wi.Checkbox(
description='Delay Median', value=False, disabled=False
)
# Dropdown to choose how Baseline or IR data gets send.
self.wDropdownDelayMode = wi.Dropdown(
description="Delay Mode", value="Index",
options=["Index", "Region"],
layout=wi.Layout(width='180px',)
)
# Slider to select range of frames used for median calculation.
self.wSliderFrame = wi.IntSlider(
description='Frame Index', continuous_update=False
)
self.wRangeSliderFrame = IntRangeSliderGap(
continuous_update=False, description="Frame Region"
)
# Checkbox to toggle the frame wise calculation of a median spectrum.
self.wCheckFrameMedian = wi.Checkbox(
description='Frame Median',
)
# Dropdown to choos how Baseline and IR data gest send
self.wDropdownFrameMode = wi.Dropdown(
description="Frame Mode", value="Index",
options=["Index", "Region"],
layout=wi.Layout(width='180px',)
)
# Slider to select frames for median calculation.
self.wSliderFrame = wi.IntSlider(
description='Frame', continuous_update=False
)
# Textbox to enter an additional constant offset to the baseline.
self.wTextBaselineOffset = wi.FloatText(
description='Offset', value=0,
layout=wi.Layout(width='180px'),
)
# Textbox to enter the index of the pumped spectrum.
self.wIntTextPumped = wi.BoundedIntText(
value=0,
min=0,
max=400, # Number of spectra/ypixels
description='Pumped',
layout=wi.Layout(width='180px'),
)
# Textbox to enter the index of the unpumped spectrum.
self.wIntTextUnpumped = wi.BoundedIntText(
value=1,
min=0,
max=400,
description='Unpumped',
layout=self.wIntTextPumped.layout,
)
# Checkbox to toggle visibility of Raw Spectra.
self.wCheckShowRawData = wi.Checkbox(
description='RawData',
value=True,
)
# Checkbox to toggle visibility of Basesubed.
self.wCheckShowBasesubed = wi.Checkbox(
description='Basesubed',
value=False,
)
# Checkbox to toggle visibility of Normalized.
self.wCheckShowNormalized = wi.Checkbox(
description='Normalized',
value=False,
)
# Dropdown to toggle view of the summed spectra
self.wDropShowTrace = wi.Dropdown(
options=["Raw", "Normalized", "Bleach"],
description='Trace',
value="Raw",
layout=self.wTextCentralWl.layout,
)
self.wTextSaveRecord = wi.Text(
description="File"
)
self.wButtonSaveRecord = wi.Button(
description="Save Record"
)
# ### Aligning boxers ###
self._data_box = wi.VBox([
wi.HBox([
wi.Label("Folder"),
self.wTextFolder,
]),
wi.HBox([
wi.Label('File'),
self.wSelectFile,
]),
])
self._signal_box = wi.VBox([
wi.HBox([
self.wSliderPPDelay,
self.wCheckDelayMedian,
self.wRangeSliderPPDelay,
self.wDropdownDelayMode,
]),
wi.HBox([
self.wSliderFrame,
self.wCheckFrameMedian,
self.wRangeSliderFrame,
self.wDropdownFrameMode,
]),
wi.HBox([
self.wRangeSliderPixelY,
self.wCheckSpectraMean,
self.wIntTextPixelYStep,
self.wRangeSliderPixelX,
]),
wi.HBox([
self.wIntSliderSmooth,
self.wRangeSliderTracePixelX,
])
])
self._calib_box = wi.HBox([
self.wDropdownCalib,
self.wTextCentralWl,
self.wTextVisWl,
self.wCheckAutoscale,
self.wCheckAutoscaleTrace,
])
self._save_record_box = wi.HBox([
self.wTextSaveRecord,
self.wButtonSaveRecord,
])
# List of widgets that update the figure on value change
self._figure_widgets = [
self.wSelectFile,
self.wSliderPPDelay,
self.wRangeSliderPPDelay,
self.wSliderFrame,
self.wCheckDelayMedian,
self.wRangeSliderFrame,
self.wRangeSliderPixelY,
self.wIntTextPixelYStep,
self.wRangeSliderPixelX,
self.wRangeSliderTracePixelX,
self.wCheckFrameMedian,
self.wTextVisWl,
self.wTextCentralWl,
self.wCheckAutoscale,
self.wDropdownCalib,
self.wCheckAutoscaleTrace,
self.wCheckShowNorm,
self.wIntSliderSmooth,
self.wCheckShowBase,
self.wCheckShowBleach,
self.wDropdownBleachOpt,
self.wDropdownBleachProp,
self.wCheckShowRawData,
self.wCheckShowBasesubed,
self.wCheckShowNormalized,
self.wCheckShowTracesBleach,
self.wCheckShowTracesRawData,
self.wCheckShowTracesBasesubed,
self.wCheckShowTracesNormalized,
self.wCheckSpectraMean,
self.wDropdownSpectraMode,
self.wDropdownDelayMode,
self.wDropdownFrameMode,
self.wDropShowTrace,
self.wTextBaselineOffset,
self.wIntTextPumped,
self.wIntTextUnpumped,
self.wCheckShowZeroTimeSubtraction,
self.wRangeZeroTime,
]
# Upon saving the gui state these widgets get saved
self._save_widgets = {
'folder': self.wTextFolder,
'file': self.wSelectFile,
'showBaseline': self.wCheckShowBase,
'checkDelayMedian': self.wCheckDelayMedian,
'showBleach': self.wCheckShowBleach,
'bleachOpt': self.wDropdownBleachOpt,
'bleachProp': self.wDropdownBleachProp,
'showTracesBleach': self.wCheckShowTracesBleach,
'showTracesRawData': self.wCheckShowTracesRawData,
'showTracesBasesubed': self.wCheckShowTracesBasesubed,
'showTracesNormalized': self.wCheckShowTracesNormalized,
'delayMode': self.wDropdownDelayMode,
'frameMode': self.wDropdownFrameMode,
'smoothSlider': self.wIntSliderSmooth,
'smoothBase': self.wIntSliderSmoothBase,
'autoscale': self.wCheckAutoscale,
'autoscaleTrace': self.wCheckAutoscaleTrace,
'pixelY': self.wRangeSliderPixelY,
'pixelY_step': self.wIntTextPixelYStep,
'pixelXpixel': self.wRangeSliderPixelX,
'tracePixelX': self.wRangeSliderTracePixelX,
'centralWl': self.wTextCentralWl,
'calib': self.wDropdownCalib,
'visWl': self.wTextVisWl,
'showNorm': self.wCheckShowNorm,
'pp_delay_slice': self.wRangeSliderPPDelay,
'frame_region': self.wRangeSliderFrame,
'frame_index': self.wSliderFrame,
'frameMedian': self.wCheckFrameMedian,
'frame': self.wSliderFrame,
'baselineOffset': self.wTextBaselineOffset,
'pumped': self.wIntTextPumped,
'unpumped': self.wIntTextUnpumped,
'bleachZeroTimeSubtraction': self.wCheckShowZeroTimeSubtraction,
'showTrace': self.wDropShowTrace,
'spectraMean': self.wCheckSpectraMean,
'spectraMode': self.wDropdownSpectraMode,
'showRawData': self.wCheckShowRawData,
'showBasesubed': self.wCheckShowBasesubed,
'showNormalized': self.wCheckShowNormalized,
'zeroTimeSelec': self.wRangeZeroTime,
'saveRecord': self.wTextSaveRecord,
}
def _conf_widget_with_data(self):
"""Set all widget options and default values according to data.
This uses the data to set the state of the widget. Thus one calles
it usually after loading new data. During operation of the widget
this is usually not called, because then the widget updates the data.
"""
def _set_range_slider_options(slider, record_data_index):
"""Set options of a gaped range slider.
slider: The slider to set the options of,
record_data_index: Index position of the property to set.
"""
slider.max = self.data.rawData.shape[record_data_index]
if self.data.rawData.shape[record_data_index] == 1:
slider.disabled = True
else:
slider.disabled = False
if np.any(np.array(slider.value) >=
slider.max):
slider.value = (0, slider.max)
def _set_int_slider_options(slider, record_data_index):
"""Set options of a slider.
slider: The slider to set the options of,
record_data_index: Index position of the property to set.
"""
slider.max = self.data.rawData.shape[
record_data_index
] - 1
if slider.value > self.wSliderPPDelay.max:
slider.value = self.wSliderPPDelay.max
if slider.max == 1:
slider.disabled = True
else:
slider.disabled = False
# TODO Maybe I should split this up in conf_widget_options
# and conf_widget_values.
_set_range_slider_options(self.wRangeSliderPPDelay, PP_INDEX)
_set_range_slider_options(self.wRangeSliderFrame, FRAME_AXIS_INDEX)
_set_range_slider_options(self.wRangeSliderPixelY, Y_PIXEL_INDEX)
self.wIntTextPixelYStep.max = self.wRangeSliderPixelY.max
if self.wIntTextPixelYStep.value > self.wIntTextPixelYStep.max:
self.wIntTextPixelYStep.value = self.wIntTextPixelYStep.max
_set_range_slider_options(self.wRangeSliderTracePixelX, X_PIXEL_INDEX)
_set_int_slider_options(self.wSliderPPDelay, PP_INDEX)
_set_int_slider_options(self.wSliderFrame, FRAME_AXIS_INDEX)
_set_range_slider_options(self.wRangeZeroTime, PP_INDEX)
self.wTextCentralWl.value = self.data.central_wl
self.wTextVisWl.value = self.data.vis_wl
# Currently not used.
self.wSliderFrame.max = self.data.base.shape[
FRAME_AXIS_INDEX
] - 1
if self.wSliderFrame.max == 1:
self.wSliderFrame.disabled = True
else:
self.wSliderFrame.disabled = False
if self.wSliderFrame.value > self.wSliderFrame.max:
self.wSliderFrame.value = self.wSliderFrame.max
self.wIntTextPumped.max = self.data.rawData.shape[Y_PIXEL_INDEX] - 1
self.wIntTextUnpumped.max = self.wIntTextPumped.max
self.wIntTextUnpumped.value = self.data.unpumped_index
self.wIntTextPumped.value = self.data.pumped_index
if self.wIntTextPumped.value == self.wIntTextUnpumped.value:
self.wIntTextUnpumped.value += 1
self.wTextBaselineOffset.value = self.data.baseline_offset
def _init_figure_observers(self):
"""All observers that call the *update_figure_callback* """
# Because during widget runtime it can be necessary to stop
# and restart the automatic figure updating to prevent flickering
# and to speed up the gui. There is a special function to
# set up the observers and also to remove the observers in the
# figures_widgets list.
for widget in self._figure_widgets:
widget.observe(self._update_figure_callback, "value")
def _unobserve_figure(self):
"""Unobserver figure observers."""
for widget in self._figure_widgets:
try:
widget.unobserve(self._update_figure_callback, 'value')
except ValueError:
if debug:
print('Cant unobserve {} description is {}'.format(
widget, widget.description
))
def _init_observer(self):
"""Set all observer of all subwidgets."""
# This registers the callback functions to the gui elements.
# After a call of _init_observer, the gui elements start to
# actually do something, namely what ever is defined within the
# callback function of the observer.
self.wTextFolder.on_submit(self._on_folder_submit)
self.wSelectFile.observe(self._load_data, 'value')
self.wDropdownCalib.observe(self._on_calib_changed, "value")
self.wTextCentralWl.observe(self.x_spec_renew, "value")
self.wTextVisWl.observe(self.x_spec_renew, "value")
self.wIntTextPumped.observe(self._on_pumped_index_changed, "value")
self.wIntTextUnpumped.observe(self._on_unpumped_index_changed, "value")
self.wCheckDelayMedian.observe(self._on_delay_median_clicked, "value")
self.wDropdownDelayMode.observe(self._on_delay_mode_changed, "value")
self.wCheckFrameMedian.observe(self._on_frame_median_clicked, "value")
self.wDropdownFrameMode.observe(self._on_frame_mode_changed, "value")
self.wRangeSliderPixelX.observe(self._set_roi_x_pixel_spec, "value")
self.wRangeSliderTracePixelX.observe(self._set_roi_trace_x_pixel,
"value")
self.wRangeSliderFrame.observe(self._set_roi_frames,
"value")
self.wRangeSliderPixelY.observe(self._set_roi_spectra,
"value")
self.wRangeSliderPPDelay.observe(self._set_roi_delays,
"value")
self.wButtonSaveRecord.on_click(self._on_save_record)
self.wCheckShowZeroTimeSubtraction.observe(
self._set_zero_time_subtraction, "value"
)
self.wTextBaselineOffset.observe(
self._on_baseline_offset_changed, "value"
)
self.wRangeZeroTime.observe(self._set_zero_time_selec, "value")
self.wSnapXRoi.on_click(self._snap_x_roi)
#self._init_figure_observers()
def _on_folder_submit(self, new=None):
"""Called when folder is changed."""
if not os.path.isdir(self.wTextFolder.value):
print('Warning folder {} not found'.format(self.wTextFolder.value))
return
if debug:
print("_on_folder_submit_called")
if debug > 1:
print("fnames:", self.fnames)
# The *with* is a workaround. I need it in the test functions,
# not the gui. Anyways, it doesn't quite work.
with self.wSelectFile.hold_trait_notifications():
self.wSelectFile.options = self.fnames
def _load_data(self, new=None):
"""Update the internal data objects.
Loads data from hdd, and sets data properties according to gui settings.
Afterwards, the gui settings are configured agains the data again to
ensure consistency.
Sheme:
load data ---> update data ---> configure widget options and values
"""
if len(self.wSelectFile.value) == 0:
return
elif len(self.wSelectFile.value) == 1:
self.data = SfgRecord(
self.folder + "/" + self.wSelectFile.value[0]
)
else:
records = [SfgRecord(self.folder + "/" + fname)
for fname in self.wSelectFile.value]
self.data = concatenate_list_of_SfgRecords(records)
self._unobserve_figure()
self._set_zero_time_subtraction(None)
self._set_roi_trace_x_pixel()
self._set_roi_frames()
self._set_roi_spectra()
self._set_roi_delays()
self._set_pumped_index()
# Deactivating the observers here prevents flickering
# and unneeded calls of _update_figure. Thus we
# call it manually after a recall of _init_observer
self._conf_widget_with_data()
self._init_figure_observers()
self._update_figure()
#print("keep figures unobserved: ", keep_figure_unobserved)
def _set_roi_x_pixel_spec(self, new=None):
self.data.roi_x_pixel_spec = self.wRangeSliderPixelX.slice
def _set_roi_trace_x_pixel(self, new=None):
self._rois_x_pixel_buffer[0] = slice(*self.wRangeSliderTracePixelX.value)
self.data.rois_x_pixel_trace = self._rois_x_pixel_buffer
def _set_roi_frames(self, new=None):
self.data.roi_frames = slice(*self.wRangeSliderFrame.value)
def _set_roi_spectra(self, new=None):
self.data.roi_spectra = self.spec_slice
def _set_roi_delays(self, new=None):
self.data.roi_delay = self.wRangeSliderPPDelay.slice
def _set_pumped_index(self, new=None):
self.data.unpumped_index = self._unpumped_index_buffer
self.data.pumped_index = self._pumped_index_buffer
def x_spec_renew(self, new={}):
"""Renew calibration according to gui."""
self.data.central_wl = self.wTextCentralWl.value
self.data.vis_wl = self.wTextVisWl.value
def _on_delay_median_clicked(self, new=None):
if self.wCheckDelayMedian.value:
self.wDropdownDelayMode.value = "Region"
def _on_frame_median_clicked(self, new=None):
if self.wCheckFrameMedian.value:
self.wDropdownFrameMode.value = "Region"
self.wSliderFrame.disabled = True
else:
self.wSliderFrame.disabled = False
def _on_frame_mode_changed(self, new=None):
if self.wDropdownFrameMode.value == "Index":
self.wCheckFrameMedian.value = False
def _on_delay_mode_changed(self, new=None):
if self.wDropdownDelayMode.value == "Region":
self.wSliderPPDelay.disabled = True
else:
self.wSliderPPDelay.disabled = False
def _on_calib_changed(self, new=None):
"""Calibration changed."""
self.x_spec_renew()
self.wCheckAutoscale.value = True
def _on_pumped_index_changed(self, new=None):
"""Reset Bleach related properties."""
self._pumped_index_buffer = self.wIntTextPumped.value
self.data.pumped_index = self.wIntTextPumped.value
def _on_unpumped_index_changed(self, new=None):
self._unpumped_index_buffer = self.wIntTextUnpumped.value
self.data.unpumped_index = self.wIntTextUnpumped.value
def _set_zero_time_subtraction(self, new=None):
self.data.zero_time_subtraction = \
self.wCheckShowZeroTimeSubtraction.value
def _set_zero_time_selec(self, new=None):
self.data.zero_time_selec = self.wRangeZeroTime.slice
def _on_baseline_offset_changed(self, new=None):
self.data.baseline_offset = self.wTextBaselineOffset.value
def _on_save_record(self, new=None):
fname = self.folder + '/' + self.wTextSaveRecord.value
data = self.data.keep_frames()
data.save(fname)
def _snap_x_roi(self, new=None):
self.data.rois_x_pixel_trace.append(self.wRangeSliderTracePixelX.slice)
# We want to be able to save the snaps throghout different data sets.
self._rois_x_pixel_buffer = self.data.rois_x_pixel_trace
self._update_figure()
@property
def folder(self):
return os.path.abspath(self.wTextFolder.value)
@property
def fnames(self):
return _filter_fnames(self.wTextFolder.value)
@property
def pp_delay_slice(self):
"""PP Delay index Slice"""
return self.wRangeSliderPPDelay.slice
@property
def pp_delay_selected(self):
if self.wDropdownDelayMode.value == "Index":
return _slider_int_to_slice(self.wSliderPPDelay)
return self.wRangeSliderPPDelay.slice
@property
def frame_selected(self):
"""Gui selected frame slice."""
if self.wDropdownFrameMode.value == "Index":
return _slider_int_to_slice(self.wSliderFrame)
return self.wRangeSliderFrame.slice
@property
def spec_slice(self):
"""Specta slice/Y-Pixel slice."""
sl = self.wRangeSliderPixelY.slice
ret = slice(sl.start, sl.stop, self.wIntTextPixelYStep.value)
return ret
@property
def x_pixel_slice(self):
return self.wRangeSliderPixelX.slice
@property
def x_trace_pixel_slice(self):
"""X Pixel slice."""
return self.wRangeSliderTracePixelX.slice
@property
def x_spec(self):
"""X data of the *Signal* plot. """
if self.wDropdownCalib.value == 'pixel':
x = self.data.pixel
elif self.wDropdownCalib.value == 'wavelength':
x = self.data.wavelength
elif self.wDropdownCalib.value == 'wavenumber':
x = self.data.wavenumber
return x
def spectra(self, prop, kwargs_prop={}):
"""Use settings of the gui to select spectra data from SfgRecord."""
kwargs = dict(
prop=prop,
kwargs_prop=kwargs_prop,
roi_delay=self.pp_delay_selected,
roi_frames=self.frame_selected,
roi_spectra=self.spec_slice,
roi_pixel=self.x_pixel_slice,
frame_med=self.wCheckFrameMedian.value,
delay_mean=self.wCheckDelayMedian.value,
spectra_mean=self.wCheckSpectraMean.value,
medfilt_pixel=self.wIntSliderSmooth.value,
)
return self.data.select(**kwargs)
def trace(self, prop, kwargs_prop={}):
"""Use settings of gui to susbelect data for trace."""
kwargs = dict(
prop=prop,
kwargs_prop=kwargs_prop,
roi_delay=self.pp_delay_slice,
roi_frames=self.frame_selected,
roi_spectra=self.spec_slice,
roi_pixel=self.x_trace_pixel_slice,
frame_med=self.wCheckFrameMedian.value,
spectra_mean=self.wCheckSpectraMean.value,
medfilt_pixel=self.wIntSliderSmooth.value,
)
return self.data.trace(**kwargs)
def select_traces(self, y_property):
"""Use settings of gui to susbelect data for traces."""
kwargs = dict(
y_property=y_property,
x_property='pp_delays',
roi_delay=self.wRangeSliderPPDelay.slice,
roi_frames=self.frame_selected,
roi_spectra=self.spec_slice,
frame_med=self.wCheckFrameMedian.value,
spectra_mean=self.wCheckSpectraMean.value,
pixel_mean=True,
medfilt_pixel=self.wIntSliderSmooth.value,
)
ret_shape = list(self.data.subselect(**kwargs)[1].shape)
ret_shape[3] = len(self.data.rois_x_pixel_trace)
ret = np.zeros(ret_shape)
for i in range(len(self.data.rois_x_pixel_trace)):
roi_x_pixel = self.data.rois_x_pixel_trace[i]
x, y = self.data.subselect(roi_pixel=roi_x_pixel, **kwargs)
ret[:, :, :, i] = y[:, :, :, 0]
return x, ret
@property
def x_vlines(self):
ret = [self.x_spec[self.x_trace_pixel_slice.start],
self.x_spec[self.x_trace_pixel_slice.stop - 1]]
return ret
class WidgetPlots():
"""Plotly Base plotting backend."""
def __init__(self):
import plotly.graph_objs as go
# Plotly figure obj
self.figure = go.Figure()
# List of plotly data object to plot on the figure
self.data = []
# Plotly layout obj for the figure.
self.layout = go.Layout()
def _update_figure(self):
pass
def _init_figure(self):
pass
class WidgetFigures():
"""Collect figure init and update functions within this class"""
axes_grid = np.array([[]]) # a 2d array with the figure axes
@property
def fig(self):
return self._fig
@property
def axes(self):
return self._fig.axes
def redraw_figure(self):
"""This forces matplotlib to update the figure canvas."""
self._fig.canvas.draw()
for ax in self.axes:
ax.figure.canvas.draw()
def _update_figure(self):
# OVERWRITE THIS FUNCTION
pass
def _update_figure_callback(self, new):
"""A callback version of _update_figure for usage in observers."""
self._update_figure()
def init_single_figure(self):
"""Init the fiures and axes"""
try:
conds = (
Counter(self.axes) != Counter(
self.axes_grid.flatten().tolist()
),
len(self.axes) is 0
)
if not self._fig:
self._fig, self.axes_grid = plt.subplots(
1, 1, figsize=self._figsize, squeeze=False
)
# This allows for redrawing the axis on an already existing figure.
elif any(conds):
self._fig.set_size_inches(self._figsize, forward=True)
self.axes_grid = np.array([[self._fig.add_subplot(111)]])
except TypeError:
pass
def init_two_figures(self):
"""Init the two axis figure."""
try:
conds = (
Counter(self.axes) != Counter(
self.axes_grid.flatten().tolist()
),
len(self.axes) is 0
)
if not self._fig:
self._fig, self.axes_grid = plt.subplots(
1, 2, figsize=self._figsize, squeeze=False
)
# This allows for redrawing the axis on an already existing figure.
elif any(conds):
self._fig.set_size_inches(self._figsize, forward=True)
self.axes_grid = np.array([[
self._fig.add_subplot(121),
self._fig.add_subplot(122)
]])
except TypeError:
pass
def _plot_spec(self, xdata, ydata, ax, label_base=""):
"""Plot the basic 4d data types of the data record.
xdata: The x_axis of the plot.
ydata: 4d array.
ax: matplotlib axis."""
initial = True
for delay_index in range(len(ydata)):
delay = ydata[delay_index]
for frame_index in range(len(delay)):
frame = delay[frame_index]
for spectrum_index in range(len(frame)):
spectrum = frame[spectrum_index]
if initial:
initial = False
else:
label_base = ''
label_str = self._append_identifier(label_base).format(
self.pp_delay_selected.start + delay_index,
self.pp_delay_selected.stop,
self.frame_selected.start + frame_index,
self.frame_selected.stop,
self.spec_slice.start + spectrum_index,
self.spec_slice.stop
)
ax.plot(xdata, spectrum, label=label_str)
def _plot_traces(self, xdata, ydata, ax, label_base=''):
initial = True
y = ydata.T
for pixel in y:
for spec in pixel:
for frame in spec:
label_str = label_base + '{:.0f}-{:.0f}'.format(
int(self.data.rois_x_pixel_trace[0].start),
int(self.data.rois_x_pixel_trace[0].stop)
)
ax.plot(xdata, frame.T, '-o', label=label_str)
def _plot_rawData(self, ax):
if not self.wCheckShowRawData.value:
return
self._plot_spec(self.x_spec[self.x_pixel_slice],
self.spectra('rawData'), ax, 'RawData\n')
def _plot_basesubed(self, ax):
if not self.wCheckShowBasesubed.value:
return
self._plot_spec(self.x_spec[self.x_pixel_slice], self.spectra('basesubed'),
ax, 'Basesubed\n')
def _plot_normalized(self, ax):
if not self.wCheckShowNormalized.value:
return
self._plot_spec(self.x_spec[self.x_pixel_slice], self.spectra('normalized'), ax, 'RawData\n')
def _plot_base(self, ax):
if not self.wCheckShowBase.value:
return
self._plot_spec(self.x_spec[self.x_pixel_slice], self.spectra('base'), ax, 'RawData\n')
def _plot_norm(self, ax):
if not self.wCheckShowNorm.value:
return
self._plot_spec(self.x_spec[self.x_pixel_slice], self.spectra('norm'), ax, 'RawData\n')
def _plot_bleach(self, ax):
if not self.wCheckShowBleach.value:
return
self._plot_spec(
self.x_spec[self.x_pixel_slice],
self.spectra(
'bleach',
kwargs_prop=dict(
opt=self.wDropdownBleachOpt.value,
prop=self.wDropdownBleachProp.value
)
),
ax,
'Bleach\n',
)
def _plot_traces_rawData(self, ax):
if not self.wCheckShowTracesRawData.value:
return
xdata, ydata, yerr = self.trace('rawData')
self._plot_traces(xdata, ydata, ax, 'Raw\n')
def _plot_traces_basesubed(self, ax):
if not self.wCheckShowTracesBasesubed.value:
return
xdata, ydata, yerr = self.trace('basesubed')
self._plot_traces(xdata, ydata, ax, 'Basesubed\n')
def _plot_traces_normalized(self, ax):
if not self.wCheckShowTracesNormalized.value:
return
xdata, ydata, yerr = self.trace('normalized')
self._plot_traces(xdata, ydata, ax, 'Normalized\n')
def _plot_traces_bleach(self, ax):
if not self.wCheckShowTracesBleach.value:
return
xdata, ydata, yerr = self.trace(
'bleach',
kwargs_prop=dict(
opt=self.wDropdownBleachOpt.value,
prop=self.wDropdownBleachProp.value
),
)
self._plot_traces(xdata, ydata, ax, 'Bleach\n')
def plot_spec(self, ax):
self._plot_rawData(ax)
self._plot_basesubed(ax)
self._plot_normalized(ax)
self._plot_base(ax)
self._plot_norm(ax)
self._plot_bleach(ax)
ax.set_title(self._x_spec_title)
ax.set_xlabel(self.x_spec_label)
def plot_traces(self, ax):
self._plot_traces_bleach(ax)
self._plot_traces_rawData(ax)
self._plot_traces_basesubed(ax)
self._plot_traces_normalized(ax)
ax.set_xlabel('pp delay / fs')
ax.set_title('Trace')
ax.legend()
def _on_ax0_lim_changed(self, new=None):
"""Callback for the *Signal* axis."""
# Called when the xlim of the `Signal` plot is changed
self._autoscale_buffer = _lims2buffer(self.axes[0])
def _on_ax1_lim_changed(self, new=None):
# Called when the ylim of the `Signal` plot is changed
self._autoscale_buffer_2 = _lims2buffer(self.axes[1])
@property
def x_spec_label(self):
"""x axis label of the spec plot"""
if self.wDropdownCalib.value == 'wavenumber':
ret = r"Wavenumber/cm$^{-1}$"
elif self.wDropdownCalib.value == 'wavelength':
ret = "Wavelength/nm"
else:
ret = "Pixel"
return ret
@property
def _x_spec_title(self):
"""Title of the spec plot."""
if self.wDropdownDelayMode.value == 'Index':
return "Delay {} fs".format(
self.data.pp_delays[self.pp_delay_selected.start]
)
else:
return "Delay {} - {} fs".format(
self.data.pp_delays[self.data.roi_delay][0],
self.data.pp_delays[self.data.roi_delay][-1]
)
def _append_identifier(self, label_base):
"""Append identifier to label string for plots."""
if self.wCheckDelayMedian.value:
label_base += 'D[{0}:{1}]_'
else:
label_base += 'D[{0}]_'
if self.wCheckFrameMedian.value:
label_base += 'F[{2}:{3}]_'
else:
label_base += 'F[{2}]_'
if self.wCheckSpectraMean.value:
label_base += 'S[{4}:{5}]'
else:
label_base += 'S[{4}]'
return label_base
class BaselineTab(WidgetBase, WidgetFigures):
def __init__(self, figsize=(8, 6), **kwargs):
super().__init__(figsize=figsize, **kwargs)
def _init_widget(self):
"""Init the widgets that are to be shown."""
import ipywidgets as wi
super()._init_widget()
self.wRangeSliderTracePixelX.layout.visibility = 'hidden'
self.wCheckAutoscaleTrace.layout.visibility = 'hidden'
self.wRangeSliderPixelX.layout.visibility = 'hidden'
self.wCheckShowBase.value = False
self.children = wi.VBox([
self._data_box,
self._signal_box,
self._calib_box,
self._save_record_box
])
def _init_figure(self):
"""Init the fiures and axes"""
self.init_single_figure()
def _update_figure(self):
"""Is called on all gui element changes.
This function renders the plot. When ever you want to make changes
visible in the figure you need to call this."""
self._init_figure()
ax = self.axes[0]
ax.clear()
self.plot_spec(ax)
ax.legend(framealpha=0.5)
ax.set_xlabel(self.wDropdownCalib.value)
ax.set_title('Baseline')
ax.callbacks.connect('xlim_changed', self._on_ax0_lim_changed)
ax.callbacks.connect('ylim_changed', self._on_ax0_lim_changed)
if self.wCheckAutoscale.value:
self._autoscale_buffer_2 = _lims2buffer(ax)
else:
_buffer2lims(ax, self._autoscale_buffer_2)
self.redraw_figure()
@property
def to_base(self):
"""Y data to be send on Set Baseline button press."""
return self.spectra('rawData')
class IRTab(WidgetBase, WidgetFigures):
"""Widget to visualize IRTab type data.
IRTab Type data has a SfrRecord and a BaselineTab """
def __init__(self, figsize=(8, 6), **kwargs):
super().__init__(figsize=figsize, **kwargs)
def _init_widget(self):
"""Init the widgets that are to be shown."""
import ipywidgets as wi
super()._init_widget()
# This allows the data to be used for normalization from start on
self.data.rawData += 1
self.wRangeSliderTracePixelX.layout.visibility = 'hidden'
self.wCheckAutoscaleTrace.layout.visibility = 'hidden'
self.wRangeSliderPixelX.layout.visibility = 'hidden'
self.wCheckShowBase.value = False
show_box = wi.HBox([
self.wCheckShowRawData,
self.wCheckShowBasesubed,
self.wCheckShowBase,
])
self.children = wi.VBox([
self._data_box,
self._signal_box,
self.wTextBaselineOffset,
show_box,
self._calib_box,
self._save_record_box
])
def _init_figure(self):
"""Init the fiures and axes"""
self.init_single_figure()
def _update_figure(self):
"""Is called on all gui element changes.
This function renders the plot. When ever you want to make changes
visible in the figure you need to call this."""
self._init_figure()
ax = self.axes[0]
ax.clear()
self.plot_spec(ax)
ax.legend(framealpha=0.5)
ax.set_xlabel(self.wDropdownCalib.value)
ax.set_title('Spectrum')
ax.callbacks.connect('xlim_changed', self._on_ax0_lim_changed)
ax.callbacks.connect('ylim_changed', self._on_ax0_lim_changed)
if self.wCheckAutoscale.value:
self._autoscale_buffer_2 = _lims2buffer(ax)
else:
_buffer2lims(ax, self._autoscale_buffer_2)
self.redraw_figure()
def _init_observer(self):
super()._init_observer()
@property
def to_norm(self):
"""The property that gets exported to the Record tab if one clickes.
Send IR."""
return self.spectra('basesubed')
class RecordTab(WidgetBase, WidgetFigures):
def __init__(self, central_wl=674, vis_wl=812, figsize=(10, 4), **kwargs):
"""Plotting gui based on the SfgRecord class as a data backend.
Parameters
----------
data : Optional, SfgRecord obj.
Default dataset to start with. If None, an empty one is created.
fig: Optional, matplotlib figure
The figure to draw on.
Defaults to create a new one.
ax: Optional, matplotlib axes. The axes to draw on.
Defaults to create a new one.
central_wl: float
Central wavelength of the camera to start with.
If none is given, it tryes to find out from by investigating the
metadata.
vis_wl: float
Wavelength of the visible to begin with.
Example:
-------
test = RecordTab()
test()
# Type the Folder you want to investigate in the folder Text box and
# press RETURN.
# A list of selectable files will appear on the right side.
"""
super().__init__(central_wl=central_wl, vis_wl=vis_wl, figsize=figsize,
**kwargs)
self._ax_xlim = None
self._ax_ylim = None
def _init_figure(self):
"""Init the two axis figure."""
self.init_two_figures()
# TODO Axes is too small on summed
def _init_widget(self):
"""Init all widgets that are to be drawn."""
import ipywidgets as wi
super()._init_widget()
# self.children is the widget we are rendering up on call.
show_box = wi.VBox([
wi.HBox([
self.wSnapXRoi,
self.wTextBaselineOffset,
]),
wi.HBox([
self.wCheckShowRawData,
self.wCheckShowBasesubed,
self.wCheckShowBase,
self.wCheckShowNorm,
self.wCheckShowNormalized,
]),
wi.VBox([
wi.Label("Bleach:"),
wi.HBox([
self.wCheckShowBleach,
self.wDropdownBleachOpt,
self.wDropdownBleachProp,
]),
]),
wi.VBox([
wi.Label("Traces:"),
wi.HBox([
self.wCheckShowTracesRawData,
self.wCheckShowTracesBasesubed,
self.wCheckShowTracesNormalized,
self.wCheckShowTracesBleach,
]),
]),
])
bleach_box = wi.HBox([
self.wIntTextPumped,
self.wIntTextUnpumped,
self.wCheckShowZeroTimeSubtraction,
self.wRangeZeroTime,
])
self.children = wi.VBox([
self._data_box,
self._signal_box,
show_box,
bleach_box,
self._calib_box,
self._save_record_box
])
def _update_figure(self):
"""Update the figure of the gui."""
self._init_figure()
fontsize = 8
ax = self.axes[0]
ax.clear()
self.plot_spec(ax)
ax.set_xticklabels(ax.get_xticks(), fontsize=fontsize)
ax.set_yticklabels(ax.get_yticks(), fontsize=fontsize)
ax.yaxis.set_major_formatter(mtick.FormatStrFormatter('%.2e'))
ax.xaxis.set_major_formatter(mtick.FormatStrFormatter('%d'))
ax.callbacks.connect('xlim_changed', self._on_ax0_lim_changed)
ax.callbacks.connect('ylim_changed', self._on_ax0_lim_changed)
if self.wCheckAutoscale.value:
self._autoscale_buffer = _lims2buffer(ax)
else:
_buffer2lims(ax, self._autoscale_buffer)
ax.vlines(self.x_vlines, *ax.get_ylim(),
linestyle="dashed")
ax.legend(framealpha=0.5)
ax = self.axes[1]
ax.clear()
self.plot_traces(ax)
ax.set_xticklabels(ax.get_xticks(), fontsize=fontsize)
ax.set_yticklabels(ax.get_yticks(), fontsize=fontsize)
ax.yaxis.set_major_formatter(mtick.FormatStrFormatter('%.3g'))
ax.xaxis.set_major_formatter(mtick.FormatStrFormatter('%d'))
ax.yaxis.tick_right()
ax.callbacks.connect('xlim_changed', self._on_ax1_lim_changed)
ax.callbacks.connect('ylim_changed', self._on_ax1_lim_changed)
if self.wCheckAutoscaleTrace.value:
self._autoscale_buffer_2 = _lims2buffer(ax)
else:
_buffer2lims(ax, self._autoscale_buffer_2)
self.redraw_figure()
# This is broken
class ImgView(WidgetBase):
"""A Class to view full spe images."""
def __init__(self, *args, figsize=(8, 6), **kwargs):
super().__init__(*args, figsize=figsize, **kwargs)
self.axes_grid = np.array([[]])
def _init_figure(self):
if not self._fig:
self._fig = plt.figure(self._figsize)
gs = gridspec.GridSpec(2, 2, width_ratios=[1, 3],
height_ratios=[3, 1])
ax = self._fig.add_subplot(gs[0, 1])
self._fig.add_subplot(gs[0, 0], sharey=ax)
self._fig.add_subplot(gs[1, 1], sharex=ax)
elif self._fig and len(self.axes) is not 3:
self._fig.set_size_inches(self._figsize, forward=True)
gs = gridspec.GridSpec(2, 2, width_ratios=[1, 3],
height_ratios=[3, 1])
ax = self._fig.add_subplot(gs[0, 1])
self._fig.add_subplot(gs[0, 0], sharey=ax)
self._fig.add_subplot(gs[1, 1], sharex=ax)
def _update_figure(self):
self._init_figure()
view_data = self.data.rawData[
self.pp_delay_index, self.frame_index
]
ax = self.axes[0]
plt.sca(ax)
ax.clear()
img = ax.imshow(
view_data,
interpolation=self.w_interpolate.value,
origin="lower",
aspect="auto"
)
plt.colorbar(img)
axl = self.axes[1]
axl.clear()
y_slice = self.wRangeSliderPixelY.slice
view_data2 = self.data.rawData[
self.pp_delay_selected.start, self.wRangeSliderFrame.value[0], y_slice
].sum(Y_PIXEL_INDEX)
axl.plot(view_data2)
def _init_widget(self):
import ipywidgets as wi
super()._init_widget()
self.wIntSliderSmooth.visible = False
self.wIntSliderSmooth.disabled = True
self.wRangeSliderPPDelay.visible = False
self.wRangeSliderPPDelay.disabled = True
self.w_interpolate = wi.Dropdown(
description="Interpolation",
options=('none', 'nearest', 'bilinear', 'bicubic',
'spline16', 'spline36', 'hanning', 'hamming', 'hermite',
'kaiser', 'quadric', 'catrom', 'gaussian', 'bessel',
'mitchell', 'sinc', 'lanczos'),
value="nearest",
)
self.children = wi.VBox([
self.wVBoxSignal,
wi.HBox(
[self.wDropdownCalib, self.wTextCentralWl, self.wTextVisWl]
),
self.w_interpolate,
])
def _init_observer(self):
super()._init_observer()
self.w_interpolate.observe(self._update_figure_callback, "value")
class Dashboard():
def __init__(self, *args, **kwargs):
self.widgets = args
self.fig = None
self.ax = None
def __call__(self):
pass
class PumpProbe():
"""Tabed dashboard.
The first page shows two axis.
On the first axes one sees the raw signal. And possibly
a baseline. Each y-pixel of the ccd camera gets projected into a single.
spectra line on this first axes. With the *Show Baseline* Button one can
toggle the visibility of the Baseline. Autoscale prevents the axes from
re-scaling up on data change. Numorus sliders allow for inspection of the
data.
The second axes shows the Trace of each spectrum vs pump-probe time delay.
This is only use full if you do pump-probe experiment. Otherwise this axis
will only show to you the a single point with the value of the sum(area) of
the spectrum from axes one.
The second page shows A single Spectrum and possibly a baseline.
The third page shows, after usage of the normalize button the quotient
of the first and the second page spectrum. This allows for IRTab
Normalization."""
def __init__(self):
import ipywidgets as wi
self.tabed_widgets = (
RecordTab(),
IRTab(),
BaselineTab(),
)
self.tab_record = self.tabed_widgets[0]
self.tab_ir = self.tabed_widgets[1]
self.tab_record_baseline = self.tabed_widgets[2]
children = []
self.wi_fig = plt.figure()
# Names given explicitly to preserver order of tabs.
for tabed_widget in self.tabed_widgets:
tabed_widget._conf_widget_with_data()
children.append(tabed_widget.children)
tabed_widget._fig = self.wi_fig
self.w_tabs = wi.Tab(children=children)
names = ("Pump-Probe", "IR", "Baseline")
for i in range(len(names)):
self.w_tabs.set_title(i, names[i])
self.children = self.w_tabs
self.wButtonSetBaseline = wi.Button(description='Set Baseline')
self.wButtonSetIrBaseline = wi.Button(description='Set Ir Baseline')
self.wButtonNormalize = wi.Button(description='Set Normalize')
self.wButtonSaveGui = wi.Button(description='Save Gui')
self.wButtonLoadGui = wi.Button(description='Load Gui')
self.children = wi.VBox([
self.w_tabs,
wi.HBox([
self.wButtonSetBaseline,
self.wButtonSetIrBaseline,
self.wButtonNormalize,
self.wButtonSaveGui,
self.wButtonLoadGui,
])
])
def __call__(self):
from IPython.display import display
for tabed_widget in self.tabed_widgets:
tabed_widget._init_observer()
self._init_observer()
# Render record tab as default.
self.tab_record._update_figure()
display(self.children)
def _init_observer(self):
"""Initialize widgets of the GUI.
Observers within this function interact between tabs, of independent
of tabs.
"""
if debug:
print("Dasboards._init_observer called")
def test_widgets(tab):
"""List of widgets of a tab that change the data such that,
test must be run before Ir or Baseline can be set."""
return (
tab.wSliderPPDelay,
tab.wRangeSliderPPDelay,
tab.wCheckDelayMedian,
tab.wDropdownDelayMode,
tab.wSliderFrame,
tab.wRangeSliderFrame,
tab.wCheckFrameMedian,
tab.wDropdownFrameMode,
tab.wIntSliderSmooth,
tab.wRangeSliderPixelY,
tab.wIntTextPixelYStep,
tab.wSelectFile,
)
self.w_tabs.observe(self._on_tab_changed, 'selected_index')
self.wButtonSetBaseline.on_click(self._on_setBaseline_clicked)
self.wButtonSetIrBaseline.on_click(self._on_setIRBaseline_clicked)
self.wButtonNormalize.on_click(self._on_set_normalize)
for widget in test_widgets(self.tab_ir):
widget.observe(
self._test_normalizability,
"value"
)
for widget in test_widgets(self.tab_record_baseline):
widget.observe(self._test_Record_baseline, "value")
widget.observe(self._test_IR_Baseline, "value")
self.tab_record_baseline.wSelectFile.observe(
self._test_Record_baseline,
"value"
)
self.tab_record_baseline.wSelectFile.observe(
self._test_normalizability,
"value"
)
self.wButtonSaveGui.on_click(self._on_save_gui_clicked)
self.wButtonLoadGui.on_click(self._on_load_gui_clicked)
def _on_tab_changed(self, new):
if debug:
print("Dashboard._on_tab_changed called")
axes = self.wi_fig.axes
for ax in axes:
self.wi_fig.delaxes(ax)
page = self.w_tabs.selected_index
widget = self.tabed_widgets[page]
widget._update_figure()
def _on_setBaseline_clicked(self, new):
"""Called when set baseline is clicked."""
if not self._test_baseline_on_tab(
self.tab_record, self.wButtonSetBaseline
):
return
self.tab_record.data.base = self.tab_record_baseline.to_base
self.wButtonSetBaseline.style.button_color = "green"
self.tabed_widgets[self.w_tabs.selected_index]._update_figure()
def _on_setIRBaseline_clicked(self, new):
"""Called when set ir baseline is clicked."""
if not self._test_baseline_on_tab(
self.tab_ir, self.wButtonSetIrBaseline
):
return
self.tab_ir.data.base = self.tab_record_baseline.to_base
self.wButtonSetIrBaseline.style.button_color = "green"
self.tabed_widgets[self.w_tabs.selected_index]._update_figure()
def _on_set_normalize(self, new):
if debug:
print("Normalize._on_set_normalize called.")
if not self._test_normalizability():
return
self.tab_record.data.norm = self.tab_ir.to_norm
# Update current plot
self.wButtonNormalize.style.button_color = "green"
self.tabed_widgets[self.w_tabs.selected_index]._update_figure()
def _on_save_gui_clicked(self, new):
"""Save gui status to a json text file.
Each tab of the dashboard gets a separate list entry. Each widget value
is saved as an dictionary of widget names and values."""
save_file = self.tab_record.folder + '/.last_state.json'
with open(save_file, 'w') as outfile:
save_list = []
for i in range(len(self.tabed_widgets)):
w = self.tabed_widgets[i]
save_dict = {}
for name, saveable_widget in w._save_widgets.items():
save_dict[name] = saveable_widget.value
save_list.append(save_dict)
dump(save_list, outfile, indent=4,
separators=(',', ': '), sort_keys=True)
def _on_load_gui_clicked(self, new):
def _pop_and_set(name):
value = saved_values.pop(name)
w._save_widgets[name].value = value
def _read_and_set(name):
value = saved_values.get(name)
widget = w._save_widgets.get(name)
if isinstance(value, type(None)):
return
if isinstance(widget, type(None)):
return
widget.value = value
try:
infile = open(self.tab_record.folder + '/.last_state.json', 'r')
except FileNotFoundError:
pass
else:
with infile:
imp = load(infile)
# Loop over tabs
for i in range(len(self.tabed_widgets)):
saved_values = imp[i]
w = self.tabed_widgets[i]
# read folder file and baseline as the first
_pop_and_set('folder')
w._on_folder_submit(None)
_pop_and_set('file')
w._load_data()
w._unobserve_figure()
for name in saved_values.keys():
try:
_read_and_set(name)
except TraitError:
msg = "Can't load {} with value {}".format(
name,
saved_values[name]
)
print(msg)
break
w._init_figure_observers()
self._on_tab_changed(None)
def _test_normalizability(self, new=None):
"""Test if the data of w1 can be used to normalize the data of w0."""
try:
norm = np.ones_like(self.tab_record.data.rawData) *\
self.tab_ir.to_norm
self.wButtonNormalize.style.button_color = 'orange'
if np.all(self.tab_record.data.norm == norm):
self.wButtonNormalize.style.button_color = 'green'
except ValueError:
self.wButtonNormalize.style.button_color = 'red'
return False
return True
def _test_baseline_on_tab(self, tab, button):
"""Test if baseline data of tab can be setted.
tab: tab to set the baselinedata of
button: button that was clicked and that sould be colored accordingly.
"""
try:
base = np.ones_like(tab.data.rawData) *\
self.tab_record_baseline.to_base
button.style.button_color = 'orange'
# Must use _base here because .base has offset correction.
if isinstance(tab.data._base, type(None)):
return True
if np.all(tab.data._base == base):
button.style.button_color = 'green'
except ValueError:
button.style.button_color = 'red'
return False
return True
def _test_IR_Baseline(self, new=None):
return self._test_baseline_on_tab(
self.tab_ir,
self.wButtonSetIrBaseline
)
def _test_Record_baseline(self, new=None):
return self._test_baseline_on_tab(
self.tab_record,
self.wButtonSetBaseline
)
# #### Helper function
def _filter_fnames(folder_path):
"""Return list of known files in a folder."""
fnames = np.sort(glob(os.path.normcase(folder_path + '/*')))
# Only .dat, .spe and .npz are known
mask = [
any(conds) for conds in zip(
[".dat" in s for s in fnames],
[".spe" in s for s in fnames],
[".npz" in s for s in fnames],
)
]
fnames = fnames[np.where(mask)]
# Remove AVG
fnames = fnames[np.where(["AVG" not in s for s in fnames])]
fnames = [os.path.split(elm)[1] for elm in fnames]
return fnames
def _slider_range_to_slice(range_value_tuple, max):
"""Transform a tuple into a slice accounting for overlapping"""
if range_value_tuple[0] != range_value_tuple[1]:
return slice(*range_value_tuple)
if range_value_tuple[1] != max:
return slice(range_value_tuple[0], range_value_tuple[1]+1)
return slice(range_value_tuple[0]-1, range_value_tuple[1])
def _slider_int_to_slice(slider):
return slice(slider.value, slider.value+1)
def to_slice(attribute):
# This can be used as a decorator, to get slices from Rangedwidgets
# I'm currently not using it, beacuse I think its more complicated,
# then explicitly calling the rangeSlider_to_slice function on the
# Sliders.
def _to_slice(f):
def wrapper(self, *args):
widget = getattr(self, attribute)
return slice(*widget.value)
return wrapper
return _to_slice
def _lims2buffer(ax):
"""Set buffer values according to axis"""
buffer = [None, None]
buffer[0] = list(ax.get_xlim())
buffer[1] = list(ax.get_ylim())
return buffer
def _buffer2lims(ax, buffer):
if not isinstance(buffer[0], type(None)):
ax.set_xlim(*buffer[0])
if not isinstance(buffer[1], type(None)):
ax.set_ylim(*buffer[1])
def _set_rangeSlider_num_to_label(lines, sliceObj, label_base=""):
"""Use a rangeSlider, to add rangeSlider values to label_base
lines: The lines to set the label of.
y_slice: The rangeSlider to extract values from
label_base: base string of the label that the number is appended to."""
j = 0
for i in range(*sliceObj.indices(sliceObj.stop)):
label = label_base + str(i)
line = lines[j]
line.set_label(label)
j += 1
class IntRangeSliderGap(IntRangeSlider):
"""A Ranged slider with enforced gap."""
@validate('value')
def enforce_gap(self, proposal):
gap = 1
min, max = proposal.value
oldmin, oldmax = self.value
if min == self.max:
min -= 1
if (max-min) < gap:
if oldmin == min:
# max changed
max = min + gap
else:
min = max - gap
return (min, max)
@property
def slice(self):
return slice(*self.value)
#### End of helper functions
| mit |
yavalvas/yav_com | build/matplotlib/lib/matplotlib/colorbar.py | 1 | 49309 | '''
Colorbar toolkit with two classes and a function:
:class:`ColorbarBase`
the base class with full colorbar drawing functionality.
It can be used as-is to make a colorbar for a given colormap;
a mappable object (e.g., image) is not needed.
:class:`Colorbar`
the derived class for use with images or contour plots.
:func:`make_axes`
a function for resizing an axes and adding a second axes
suitable for a colorbar
The :meth:`~matplotlib.figure.Figure.colorbar` method uses :func:`make_axes`
and :class:`Colorbar`; the :func:`~matplotlib.pyplot.colorbar` function
is a thin wrapper over :meth:`~matplotlib.figure.Figure.colorbar`.
'''
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import six
from six.moves import xrange, zip
import warnings
import numpy as np
import matplotlib as mpl
import matplotlib.artist as martist
import matplotlib.cbook as cbook
import matplotlib.collections as collections
import matplotlib.colors as colors
import matplotlib.contour as contour
import matplotlib.cm as cm
import matplotlib.gridspec as gridspec
import matplotlib.patches as mpatches
import matplotlib.path as mpath
import matplotlib.ticker as ticker
import matplotlib.transforms as mtrans
from matplotlib import docstring
make_axes_kw_doc = '''
============= ====================================================
Property Description
============= ====================================================
*orientation* vertical or horizontal
*fraction* 0.15; fraction of original axes to use for colorbar
*pad* 0.05 if vertical, 0.15 if horizontal; fraction
of original axes between colorbar and new image axes
*shrink* 1.0; fraction by which to shrink the colorbar
*aspect* 20; ratio of long to short dimensions
*anchor* (0.0, 0.5) if vertical; (0.5, 1.0) if horizontal;
the anchor point of the colorbar axes
*panchor* (1.0, 0.5) if vertical; (0.5, 0.0) if horizontal;
the anchor point of the colorbar parent axes. If
False, the parent axes' anchor will be unchanged
============= ====================================================
'''
colormap_kw_doc = '''
============ ====================================================
Property Description
============ ====================================================
*extend* [ 'neither' | 'both' | 'min' | 'max' ]
If not 'neither', make pointed end(s) for out-of-
range values. These are set for a given colormap
using the colormap set_under and set_over methods.
*extendfrac* [ *None* | 'auto' | length | lengths ]
If set to *None*, both the minimum and maximum
triangular colorbar extensions with have a length of
5% of the interior colorbar length (this is the
default setting). If set to 'auto', makes the
triangular colorbar extensions the same lengths as
the interior boxes (when *spacing* is set to
'uniform') or the same lengths as the respective
adjacent interior boxes (when *spacing* is set to
'proportional'). If a scalar, indicates the length
of both the minimum and maximum triangular colorbar
extensions as a fraction of the interior colorbar
length. A two-element sequence of fractions may also
be given, indicating the lengths of the minimum and
maximum colorbar extensions respectively as a
fraction of the interior colorbar length.
*extendrect* [ *False* | *True* ]
If *False* the minimum and maximum colorbar extensions
will be triangular (the default). If *True* the
extensions will be rectangular.
*spacing* [ 'uniform' | 'proportional' ]
Uniform spacing gives each discrete color the same
space; proportional makes the space proportional to
the data interval.
*ticks* [ None | list of ticks | Locator object ]
If None, ticks are determined automatically from the
input.
*format* [ None | format string | Formatter object ]
If None, the
:class:`~matplotlib.ticker.ScalarFormatter` is used.
If a format string is given, e.g., '%.3f', that is
used. An alternative
:class:`~matplotlib.ticker.Formatter` object may be
given instead.
*drawedges* [ False | True ] If true, draw lines at color
boundaries.
============ ====================================================
The following will probably be useful only in the context of
indexed colors (that is, when the mappable has norm=NoNorm()),
or other unusual circumstances.
============ ===================================================
Property Description
============ ===================================================
*boundaries* None or a sequence
*values* None or a sequence which must be of length 1 less
than the sequence of *boundaries*. For each region
delimited by adjacent entries in *boundaries*, the
color mapped to the corresponding value in values
will be used.
============ ===================================================
'''
colorbar_doc = '''
Add a colorbar to a plot.
Function signatures for the :mod:`~matplotlib.pyplot` interface; all
but the first are also method signatures for the
:meth:`~matplotlib.figure.Figure.colorbar` method::
colorbar(**kwargs)
colorbar(mappable, **kwargs)
colorbar(mappable, cax=cax, **kwargs)
colorbar(mappable, ax=ax, **kwargs)
arguments:
*mappable*
the :class:`~matplotlib.image.Image`,
:class:`~matplotlib.contour.ContourSet`, etc. to
which the colorbar applies; this argument is mandatory for the
:meth:`~matplotlib.figure.Figure.colorbar` method but optional for the
:func:`~matplotlib.pyplot.colorbar` function, which sets the
default to the current image.
keyword arguments:
*cax*
None | axes object into which the colorbar will be drawn
*ax*
None | parent axes object(s) from which space for a new
colorbar axes will be stolen. If a list of axes is given
they will all be resized to make room for the colorbar axes.
*use_gridspec*
False | If *cax* is None, a new *cax* is created as an instance of
Axes. If *ax* is an instance of Subplot and *use_gridspec* is True,
*cax* is created as an instance of Subplot using the
grid_spec module.
Additional keyword arguments are of two kinds:
axes properties:
%s
colorbar properties:
%s
If *mappable* is a :class:`~matplotlib.contours.ContourSet`, its *extend*
kwarg is included automatically.
Note that the *shrink* kwarg provides a simple way to keep a vertical
colorbar, for example, from being taller than the axes of the mappable
to which the colorbar is attached; but it is a manual method requiring
some trial and error. If the colorbar is too tall (or a horizontal
colorbar is too wide) use a smaller value of *shrink*.
For more precise control, you can manually specify the positions of
the axes objects in which the mappable and the colorbar are drawn. In
this case, do not use any of the axes properties kwargs.
It is known that some vector graphics viewer (svg and pdf) renders white gaps
between segments of the colorbar. This is due to bugs in the viewers not
matplotlib. As a workaround the colorbar can be rendered with overlapping
segments::
cbar = colorbar()
cbar.solids.set_edgecolor("face")
draw()
However this has negative consequences in other circumstances. Particularly
with semi transparent images (alpha < 1) and colorbar extensions and is not
enabled by default see (issue #1188).
returns:
:class:`~matplotlib.colorbar.Colorbar` instance; see also its base class,
:class:`~matplotlib.colorbar.ColorbarBase`. Call the
:meth:`~matplotlib.colorbar.ColorbarBase.set_label` method
to label the colorbar.
''' % (make_axes_kw_doc, colormap_kw_doc)
docstring.interpd.update(colorbar_doc=colorbar_doc)
def _set_ticks_on_axis_warn(*args, **kw):
# a top level function which gets put in at the axes'
# set_xticks set_yticks by _patch_ax
warnings.warn("Use the colorbar set_ticks() method instead.")
class ColorbarBase(cm.ScalarMappable):
'''
Draw a colorbar in an existing axes.
This is a base class for the :class:`Colorbar` class, which is the
basis for the :func:`~matplotlib.pyplot.colorbar` function and the
:meth:`~matplotlib.figure.Figure.colorbar` method, which are the
usual ways of creating a colorbar.
It is also useful by itself for showing a colormap. If the *cmap*
kwarg is given but *boundaries* and *values* are left as None,
then the colormap will be displayed on a 0-1 scale. To show the
under- and over-value colors, specify the *norm* as::
colors.Normalize(clip=False)
To show the colors versus index instead of on the 0-1 scale,
use::
norm=colors.NoNorm.
Useful attributes:
:attr:`ax`
the Axes instance in which the colorbar is drawn
:attr:`lines`
a list of LineCollection if lines were drawn, otherwise
an empty list
:attr:`dividers`
a LineCollection if *drawedges* is True, otherwise None
Useful public methods are :meth:`set_label` and :meth:`add_lines`.
'''
_slice_dict = {'neither': slice(0, None),
'both': slice(1, -1),
'min': slice(1, None),
'max': slice(0, -1)}
def __init__(self, ax, cmap=None,
norm=None,
alpha=None,
values=None,
boundaries=None,
orientation='vertical',
ticklocation='auto',
extend='neither',
spacing='uniform', # uniform or proportional
ticks=None,
format=None,
drawedges=False,
filled=True,
extendfrac=None,
extendrect=False,
label='',
):
#: The axes that this colorbar lives in.
self.ax = ax
self._patch_ax()
if cmap is None:
cmap = cm.get_cmap()
if norm is None:
norm = colors.Normalize()
self.alpha = alpha
cm.ScalarMappable.__init__(self, cmap=cmap, norm=norm)
self.values = values
self.boundaries = boundaries
self.extend = extend
self._inside = self._slice_dict[extend]
self.spacing = spacing
self.orientation = orientation
self.drawedges = drawedges
self.filled = filled
self.extendfrac = extendfrac
self.extendrect = extendrect
self.solids = None
self.lines = list()
self.outline = None
self.patch = None
self.dividers = None
if ticklocation == 'auto':
ticklocation = 'bottom' if orientation == 'horizontal' else 'right'
self.ticklocation = ticklocation
self.set_label(label)
if cbook.iterable(ticks):
self.locator = ticker.FixedLocator(ticks, nbins=len(ticks))
else:
self.locator = ticks # Handle default in _ticker()
if format is None:
if isinstance(self.norm, colors.LogNorm):
self.formatter = ticker.LogFormatterMathtext()
else:
self.formatter = ticker.ScalarFormatter()
elif cbook.is_string_like(format):
self.formatter = ticker.FormatStrFormatter(format)
else:
self.formatter = format # Assume it is a Formatter
# The rest is in a method so we can recalculate when clim changes.
self.config_axis()
self.draw_all()
def _extend_lower(self):
"""Returns whether the lower limit is open ended."""
return self.extend in ('both', 'min')
def _extend_upper(self):
"""Returns whether the uper limit is open ended."""
return self.extend in ('both', 'max')
def _patch_ax(self):
# bind some methods to the axes to warn users
# against using those methods.
self.ax.set_xticks = _set_ticks_on_axis_warn
self.ax.set_yticks = _set_ticks_on_axis_warn
def draw_all(self):
'''
Calculate any free parameters based on the current cmap and norm,
and do all the drawing.
'''
self._process_values()
self._find_range()
X, Y = self._mesh()
C = self._values[:, np.newaxis]
self._config_axes(X, Y)
if self.filled:
self._add_solids(X, Y, C)
def config_axis(self):
ax = self.ax
if self.orientation == 'vertical':
ax.xaxis.set_ticks([])
# location is either one of 'bottom' or 'top'
ax.yaxis.set_label_position(self.ticklocation)
ax.yaxis.set_ticks_position(self.ticklocation)
else:
ax.yaxis.set_ticks([])
# location is either one of 'left' or 'right'
ax.xaxis.set_label_position(self.ticklocation)
ax.xaxis.set_ticks_position(self.ticklocation)
self._set_label()
def update_ticks(self):
"""
Force the update of the ticks and ticklabels. This must be
called whenever the tick locator and/or tick formatter changes.
"""
ax = self.ax
ticks, ticklabels, offset_string = self._ticker()
if self.orientation == 'vertical':
ax.yaxis.set_ticks(ticks)
ax.set_yticklabels(ticklabels)
ax.yaxis.get_major_formatter().set_offset_string(offset_string)
else:
ax.xaxis.set_ticks(ticks)
ax.set_xticklabels(ticklabels)
ax.xaxis.get_major_formatter().set_offset_string(offset_string)
def set_ticks(self, ticks, update_ticks=True):
"""
set tick locations. Tick locations are updated immediately unless
update_ticks is *False*. To manually update the ticks, call
*update_ticks* method explicitly.
"""
if cbook.iterable(ticks):
self.locator = ticker.FixedLocator(ticks, nbins=len(ticks))
else:
self.locator = ticks
if update_ticks:
self.update_ticks()
def set_ticklabels(self, ticklabels, update_ticks=True):
"""
set tick labels. Tick labels are updated immediately unless
update_ticks is *False*. To manually update the ticks, call
*update_ticks* method explicitly.
"""
if isinstance(self.locator, ticker.FixedLocator):
self.formatter = ticker.FixedFormatter(ticklabels)
if update_ticks:
self.update_ticks()
else:
warnings.warn("set_ticks() must have been called.")
def _config_axes(self, X, Y):
'''
Make an axes patch and outline.
'''
ax = self.ax
ax.set_frame_on(False)
ax.set_navigate(False)
xy = self._outline(X, Y)
ax.update_datalim(xy)
ax.set_xlim(*ax.dataLim.intervalx)
ax.set_ylim(*ax.dataLim.intervaly)
if self.outline is not None:
self.outline.remove()
self.outline = mpatches.Polygon(
xy, edgecolor=mpl.rcParams['axes.edgecolor'],
facecolor='none',
linewidth=mpl.rcParams['axes.linewidth'],
closed=True,
zorder=2)
ax.add_artist(self.outline)
self.outline.set_clip_box(None)
self.outline.set_clip_path(None)
c = mpl.rcParams['axes.facecolor']
if self.patch is not None:
self.patch.remove()
self.patch = mpatches.Polygon(xy, edgecolor=c,
facecolor=c,
linewidth=0.01,
zorder=-1)
ax.add_artist(self.patch)
self.update_ticks()
def _set_label(self):
if self.orientation == 'vertical':
self.ax.set_ylabel(self._label, **self._labelkw)
else:
self.ax.set_xlabel(self._label, **self._labelkw)
def set_label(self, label, **kw):
'''
Label the long axis of the colorbar
'''
self._label = '%s' % (label, )
self._labelkw = kw
self._set_label()
def _outline(self, X, Y):
'''
Return *x*, *y* arrays of colorbar bounding polygon,
taking orientation into account.
'''
N = X.shape[0]
ii = [0, 1, N - 2, N - 1, 2 * N - 1, 2 * N - 2, N + 1, N, 0]
x = np.take(np.ravel(np.transpose(X)), ii)
y = np.take(np.ravel(np.transpose(Y)), ii)
x = x.reshape((len(x), 1))
y = y.reshape((len(y), 1))
if self.orientation == 'horizontal':
return np.hstack((y, x))
return np.hstack((x, y))
def _edges(self, X, Y):
'''
Return the separator line segments; helper for _add_solids.
'''
N = X.shape[0]
# Using the non-array form of these line segments is much
# simpler than making them into arrays.
if self.orientation == 'vertical':
return [list(zip(X[i], Y[i])) for i in xrange(1, N - 1)]
else:
return [list(zip(Y[i], X[i])) for i in xrange(1, N - 1)]
def _add_solids(self, X, Y, C):
'''
Draw the colors using :meth:`~matplotlib.axes.Axes.pcolormesh`;
optionally add separators.
'''
if self.orientation == 'vertical':
args = (X, Y, C)
else:
args = (np.transpose(Y), np.transpose(X), np.transpose(C))
kw = dict(cmap=self.cmap,
norm=self.norm,
alpha=self.alpha,
edgecolors='None')
# Save, set, and restore hold state to keep pcolor from
# clearing the axes. Ordinarily this will not be needed,
# since the axes object should already have hold set.
_hold = self.ax.ishold()
self.ax.hold(True)
col = self.ax.pcolormesh(*args, **kw)
self.ax.hold(_hold)
#self.add_observer(col) # We should observe, not be observed...
if self.solids is not None:
self.solids.remove()
self.solids = col
if self.dividers is not None:
self.dividers.remove()
self.dividers = None
if self.drawedges:
linewidths = (0.5 * mpl.rcParams['axes.linewidth'],)
self.dividers = collections.LineCollection(self._edges(X, Y),
colors=(mpl.rcParams['axes.edgecolor'],),
linewidths=linewidths)
self.ax.add_collection(self.dividers)
def add_lines(self, levels, colors, linewidths, erase=True):
'''
Draw lines on the colorbar.
*colors* and *linewidths* must be scalars or
sequences the same length as *levels*.
Set *erase* to False to add lines without first
removing any previously added lines.
'''
y = self._locate(levels)
igood = (y < 1.001) & (y > -0.001)
y = y[igood]
if cbook.iterable(colors):
colors = np.asarray(colors)[igood]
if cbook.iterable(linewidths):
linewidths = np.asarray(linewidths)[igood]
N = len(y)
x = np.array([0.0, 1.0])
X, Y = np.meshgrid(x, y)
if self.orientation == 'vertical':
xy = [list(zip(X[i], Y[i])) for i in xrange(N)]
else:
xy = [list(zip(Y[i], X[i])) for i in xrange(N)]
col = collections.LineCollection(xy, linewidths=linewidths)
if erase and self.lines:
for lc in self.lines:
lc.remove()
self.lines = []
self.lines.append(col)
col.set_color(colors)
self.ax.add_collection(col)
def _ticker(self):
'''
Return the sequence of ticks (colorbar data locations),
ticklabels (strings), and the corresponding offset string.
'''
locator = self.locator
formatter = self.formatter
if locator is None:
if self.boundaries is None:
if isinstance(self.norm, colors.NoNorm):
nv = len(self._values)
base = 1 + int(nv / 10)
locator = ticker.IndexLocator(base=base, offset=0)
elif isinstance(self.norm, colors.BoundaryNorm):
b = self.norm.boundaries
locator = ticker.FixedLocator(b, nbins=10)
elif isinstance(self.norm, colors.LogNorm):
locator = ticker.LogLocator()
else:
locator = ticker.MaxNLocator()
else:
b = self._boundaries[self._inside]
locator = ticker.FixedLocator(b, nbins=10)
if isinstance(self.norm, colors.NoNorm):
intv = self._values[0], self._values[-1]
else:
intv = self.vmin, self.vmax
locator.create_dummy_axis(minpos=intv[0])
formatter.create_dummy_axis(minpos=intv[0])
locator.set_view_interval(*intv)
locator.set_data_interval(*intv)
formatter.set_view_interval(*intv)
formatter.set_data_interval(*intv)
b = np.array(locator())
ticks = self._locate(b)
inrange = (ticks > -0.001) & (ticks < 1.001)
ticks = ticks[inrange]
b = b[inrange]
formatter.set_locs(b)
ticklabels = [formatter(t, i) for i, t in enumerate(b)]
offset_string = formatter.get_offset()
return ticks, ticklabels, offset_string
def _process_values(self, b=None):
'''
Set the :attr:`_boundaries` and :attr:`_values` attributes
based on the input boundaries and values. Input boundaries
can be *self.boundaries* or the argument *b*.
'''
if b is None:
b = self.boundaries
if b is not None:
self._boundaries = np.asarray(b, dtype=float)
if self.values is None:
self._values = 0.5 * (self._boundaries[:-1]
+ self._boundaries[1:])
if isinstance(self.norm, colors.NoNorm):
self._values = (self._values + 0.00001).astype(np.int16)
return
self._values = np.array(self.values)
return
if self.values is not None:
self._values = np.array(self.values)
if self.boundaries is None:
b = np.zeros(len(self.values) + 1, 'd')
b[1:-1] = 0.5 * (self._values[:-1] - self._values[1:])
b[0] = 2.0 * b[1] - b[2]
b[-1] = 2.0 * b[-2] - b[-3]
self._boundaries = b
return
self._boundaries = np.array(self.boundaries)
return
# Neither boundaries nor values are specified;
# make reasonable ones based on cmap and norm.
if isinstance(self.norm, colors.NoNorm):
b = self._uniform_y(self.cmap.N + 1) * self.cmap.N - 0.5
v = np.zeros((len(b) - 1,), dtype=np.int16)
v[self._inside] = np.arange(self.cmap.N, dtype=np.int16)
if self._extend_lower():
v[0] = -1
if self._extend_upper():
v[-1] = self.cmap.N
self._boundaries = b
self._values = v
return
elif isinstance(self.norm, colors.BoundaryNorm):
b = list(self.norm.boundaries)
if self._extend_lower():
b = [b[0] - 1] + b
if self._extend_upper():
b = b + [b[-1] + 1]
b = np.array(b)
v = np.zeros((len(b) - 1,), dtype=float)
bi = self.norm.boundaries
v[self._inside] = 0.5 * (bi[:-1] + bi[1:])
if self._extend_lower():
v[0] = b[0] - 1
if self._extend_upper():
v[-1] = b[-1] + 1
self._boundaries = b
self._values = v
return
else:
self.norm.vmin, self.norm.vmax = mtrans.nonsingular(self.norm.vmin,
self.norm.vmax,
expander=0.1)
if not self.norm.scaled():
self.norm.vmin = 0
self.norm.vmax = 1
b = self.norm.inverse(self._uniform_y(self.cmap.N + 1))
if self._extend_lower():
b[0] = b[0] - 1
if self._extend_upper():
b[-1] = b[-1] + 1
self._process_values(b)
def _find_range(self):
'''
Set :attr:`vmin` and :attr:`vmax` attributes to the first and
last boundary excluding extended end boundaries.
'''
b = self._boundaries[self._inside]
self.vmin = b[0]
self.vmax = b[-1]
def _central_N(self):
'''number of boundaries **before** extension of ends'''
nb = len(self._boundaries)
if self.extend == 'both':
nb -= 2
elif self.extend in ('min', 'max'):
nb -= 1
return nb
def _extended_N(self):
'''
Based on the colormap and extend variable, return the
number of boundaries.
'''
N = self.cmap.N + 1
if self.extend == 'both':
N += 2
elif self.extend in ('min', 'max'):
N += 1
return N
def _get_extension_lengths(self, frac, automin, automax, default=0.05):
'''
Get the lengths of colorbar extensions.
A helper method for _uniform_y and _proportional_y.
'''
# Set the default value.
extendlength = np.array([default, default])
if isinstance(frac, six.string_types):
if frac.lower() == 'auto':
# Use the provided values when 'auto' is required.
extendlength[0] = automin
extendlength[1] = automax
else:
# Any other string is invalid.
raise ValueError('invalid value for extendfrac')
elif frac is not None:
try:
# Try to set min and max extension fractions directly.
extendlength[:] = frac
# If frac is a sequence contaning None then NaN may
# be encountered. This is an error.
if np.isnan(extendlength).any():
raise ValueError()
except (TypeError, ValueError):
# Raise an error on encountering an invalid value for frac.
raise ValueError('invalid value for extendfrac')
return extendlength
def _uniform_y(self, N):
'''
Return colorbar data coordinates for *N* uniformly
spaced boundaries, plus ends if required.
'''
if self.extend == 'neither':
y = np.linspace(0, 1, N)
else:
automin = automax = 1. / (N - 1.)
extendlength = self._get_extension_lengths(self.extendfrac,
automin, automax,
default=0.05)
if self.extend == 'both':
y = np.zeros(N + 2, 'd')
y[0] = 0. - extendlength[0]
y[-1] = 1. + extendlength[1]
elif self.extend == 'min':
y = np.zeros(N + 1, 'd')
y[0] = 0. - extendlength[0]
else:
y = np.zeros(N + 1, 'd')
y[-1] = 1. + extendlength[1]
y[self._inside] = np.linspace(0, 1, N)
return y
def _proportional_y(self):
'''
Return colorbar data coordinates for the boundaries of
a proportional colorbar.
'''
if isinstance(self.norm, colors.BoundaryNorm):
y = (self._boundaries - self._boundaries[0])
y = y / (self._boundaries[-1] - self._boundaries[0])
else:
y = self.norm(self._boundaries.copy())
if self.extend == 'min':
# Exclude leftmost interval of y.
clen = y[-1] - y[1]
automin = (y[2] - y[1]) / clen
automax = (y[-1] - y[-2]) / clen
elif self.extend == 'max':
# Exclude rightmost interval in y.
clen = y[-2] - y[0]
automin = (y[1] - y[0]) / clen
automax = (y[-2] - y[-3]) / clen
else:
# Exclude leftmost and rightmost intervals in y.
clen = y[-2] - y[1]
automin = (y[2] - y[1]) / clen
automax = (y[-2] - y[-3]) / clen
extendlength = self._get_extension_lengths(self.extendfrac,
automin, automax,
default=0.05)
if self.extend in ('both', 'min'):
y[0] = 0. - extendlength[0]
if self.extend in ('both', 'max'):
y[-1] = 1. + extendlength[1]
yi = y[self._inside]
norm = colors.Normalize(yi[0], yi[-1])
y[self._inside] = norm(yi)
return y
def _mesh(self):
'''
Return X,Y, the coordinate arrays for the colorbar pcolormesh.
These are suitable for a vertical colorbar; swapping and
transposition for a horizontal colorbar are done outside
this function.
'''
x = np.array([0.0, 1.0])
if self.spacing == 'uniform':
y = self._uniform_y(self._central_N())
else:
y = self._proportional_y()
self._y = y
X, Y = np.meshgrid(x, y)
if self._extend_lower() and not self.extendrect:
X[0, :] = 0.5
if self._extend_upper() and not self.extendrect:
X[-1, :] = 0.5
return X, Y
def _locate(self, x):
'''
Given a set of color data values, return their
corresponding colorbar data coordinates.
'''
if isinstance(self.norm, (colors.NoNorm, colors.BoundaryNorm)):
b = self._boundaries
xn = x
else:
# Do calculations using normalized coordinates so
# as to make the interpolation more accurate.
b = self.norm(self._boundaries, clip=False).filled()
xn = self.norm(x, clip=False).filled()
# The rest is linear interpolation with extrapolation at ends.
ii = np.searchsorted(b, xn)
i0 = ii - 1
itop = (ii == len(b))
ibot = (ii == 0)
i0[itop] -= 1
ii[itop] -= 1
i0[ibot] += 1
ii[ibot] += 1
db = np.take(b, ii) - np.take(b, i0)
y = self._y
dy = np.take(y, ii) - np.take(y, i0)
z = np.take(y, i0) + (xn - np.take(b, i0)) * dy / db
return z
def set_alpha(self, alpha):
self.alpha = alpha
def remove(self):
"""
Remove this colorbar from the figure
"""
fig = self.ax.figure
fig.delaxes(self.ax)
class Colorbar(ColorbarBase):
"""
This class connects a :class:`ColorbarBase` to a
:class:`~matplotlib.cm.ScalarMappable` such as a
:class:`~matplotlib.image.AxesImage` generated via
:meth:`~matplotlib.axes.Axes.imshow`.
It is not intended to be instantiated directly; instead,
use :meth:`~matplotlib.figure.Figure.colorbar` or
:func:`~matplotlib.pyplot.colorbar` to make your colorbar.
"""
def __init__(self, ax, mappable, **kw):
# Ensure the given mappable's norm has appropriate vmin and vmax set
# even if mappable.draw has not yet been called.
mappable.autoscale_None()
self.mappable = mappable
kw['cmap'] = cmap = mappable.cmap
kw['norm'] = norm = mappable.norm
if isinstance(mappable, contour.ContourSet):
CS = mappable
kw['alpha'] = mappable.get_alpha()
kw['boundaries'] = CS._levels
kw['values'] = CS.cvalues
kw['extend'] = CS.extend
#kw['ticks'] = CS._levels
kw.setdefault('ticks', ticker.FixedLocator(CS.levels, nbins=10))
kw['filled'] = CS.filled
ColorbarBase.__init__(self, ax, **kw)
if not CS.filled:
self.add_lines(CS)
else:
if getattr(cmap, 'colorbar_extend', False) is not False:
kw.setdefault('extend', cmap.colorbar_extend)
if isinstance(mappable, martist.Artist):
kw['alpha'] = mappable.get_alpha()
ColorbarBase.__init__(self, ax, **kw)
def on_mappable_changed(self, mappable):
"""
Updates this colorbar to match the mappable's properties.
Typically this is automatically registered as an event handler
by :func:`colorbar_factory` and should not be called manually.
"""
self.set_cmap(mappable.get_cmap())
self.set_clim(mappable.get_clim())
self.update_normal(mappable)
def add_lines(self, CS, erase=True):
'''
Add the lines from a non-filled
:class:`~matplotlib.contour.ContourSet` to the colorbar.
Set *erase* to False if these lines should be added to
any pre-existing lines.
'''
if not isinstance(CS, contour.ContourSet) or CS.filled:
raise ValueError('add_lines is only for a ContourSet of lines')
tcolors = [c[0] for c in CS.tcolors]
tlinewidths = [t[0] for t in CS.tlinewidths]
# The following was an attempt to get the colorbar lines
# to follow subsequent changes in the contour lines,
# but more work is needed: specifically, a careful
# look at event sequences, and at how
# to make one object track another automatically.
#tcolors = [col.get_colors()[0] for col in CS.collections]
#tlinewidths = [col.get_linewidth()[0] for lw in CS.collections]
#print 'tlinewidths:', tlinewidths
ColorbarBase.add_lines(self, CS.levels, tcolors, tlinewidths,
erase=erase)
def update_normal(self, mappable):
'''
update solid, lines, etc. Unlike update_bruteforce, it does
not clear the axes. This is meant to be called when the image
or contour plot to which this colorbar belongs is changed.
'''
self.draw_all()
if isinstance(self.mappable, contour.ContourSet):
CS = self.mappable
if not CS.filled:
self.add_lines(CS)
def update_bruteforce(self, mappable):
'''
Destroy and rebuild the colorbar. This is
intended to become obsolete, and will probably be
deprecated and then removed. It is not called when
the pyplot.colorbar function or the Figure.colorbar
method are used to create the colorbar.
'''
# We are using an ugly brute-force method: clearing and
# redrawing the whole thing. The problem is that if any
# properties have been changed by methods other than the
# colorbar methods, those changes will be lost.
self.ax.cla()
# clearing the axes will delete outline, patch, solids, and lines:
self.outline = None
self.patch = None
self.solids = None
self.lines = list()
self.dividers = None
self.set_alpha(mappable.get_alpha())
self.cmap = mappable.cmap
self.norm = mappable.norm
self.config_axis()
self.draw_all()
if isinstance(self.mappable, contour.ContourSet):
CS = self.mappable
if not CS.filled:
self.add_lines(CS)
#if self.lines is not None:
# tcolors = [c[0] for c in CS.tcolors]
# self.lines.set_color(tcolors)
#Fixme? Recalculate boundaries, ticks if vmin, vmax have changed.
#Fixme: Some refactoring may be needed; we should not
# be recalculating everything if there was a simple alpha
# change.
def remove(self):
"""
Remove this colorbar from the figure. If the colorbar was created with
``use_gridspec=True`` then restore the gridspec to its previous value.
"""
ColorbarBase.remove(self)
self.mappable.callbacksSM.disconnect(self.mappable.colorbar_cid)
self.mappable.colorbar = None
self.mappable.colorbar_cid = None
try:
ax = self.mappable.axes
except AttributeError:
return
try:
gs = ax.get_subplotspec().get_gridspec()
subplotspec = gs.get_topmost_subplotspec()
except AttributeError:
# use_gridspec was False
pos = ax.get_position(original=True)
ax.set_position(pos)
else:
# use_gridspec was True
ax.set_subplotspec(subplotspec)
@docstring.Substitution(make_axes_kw_doc)
def make_axes(parents, location=None, orientation=None, fraction=0.15,
shrink=1.0, aspect=20, **kw):
'''
Resize and reposition parent axes, and return a child
axes suitable for a colorbar::
cax, kw = make_axes(parent, **kw)
Keyword arguments may include the following (with defaults):
location : [`None`|'left'|'right'|'top'|'bottom']
The position, relative to **parents**, where the colorbar axes
should be created. If None, the value will either come from the
given ``orientation``, else it will default to 'right'.
orientation : [`None`|'vertical'|'horizontal']
The orientation of the colorbar. Typically, this keyword shouldn't
be used, as it can be derived from the ``location`` keyword.
%s
Returns (cax, kw), the child axes and the reduced kw dictionary to be
passed when creating the colorbar instance.
'''
locations = ["left", "right", "top", "bottom"]
if orientation is not None and location is not None:
msg = ('position and orientation are mutually exclusive. '
'Consider setting the position to any of '
'{0}'.format(', '.join(locations)))
raise TypeError(msg)
# provide a default location
if location is None and orientation is None:
location = 'right'
# allow the user to not specify the location by specifying the
# orientation instead
if location is None:
location = 'right' if orientation == 'vertical' else 'bottom'
if location not in locations:
raise ValueError('Invalid colorbar location. Must be one '
'of %s' % ', '.join(locations))
default_location_settings = {'left': {'anchor': (1.0, 0.5),
'panchor': (0.0, 0.5),
'pad': 0.10,
'orientation': 'vertical'},
'right': {'anchor': (0.0, 0.5),
'panchor': (1.0, 0.5),
'pad': 0.05,
'orientation': 'vertical'},
'top': {'anchor': (0.5, 0.0),
'panchor': (0.5, 1.0),
'pad': 0.05,
'orientation': 'horizontal'},
'bottom': {'anchor': (0.5, 1.0),
'panchor': (0.5, 0.0),
'pad': 0.15, # backwards compat
'orientation': 'horizontal'},
}
loc_settings = default_location_settings[location]
# put appropriate values into the kw dict for passing back to
# the Colorbar class
kw['orientation'] = loc_settings['orientation']
kw['ticklocation'] = location
anchor = kw.pop('anchor', loc_settings['anchor'])
parent_anchor = kw.pop('panchor', loc_settings['panchor'])
pad = kw.pop('pad', loc_settings['pad'])
# turn parents into a list if it is not already
if not isinstance(parents, (list, tuple)):
parents = [parents]
fig = parents[0].get_figure()
if not all(fig is ax.get_figure() for ax in parents):
raise ValueError('Unable to create a colorbar axes as not all '
'parents share the same figure.')
# take a bounding box around all of the given axes
parents_bbox = mtrans.Bbox.union([ax.get_position(original=True).frozen()
for ax in parents])
pb = parents_bbox
if location in ('left', 'right'):
if location == 'left':
pbcb, _, pb1 = pb.splitx(fraction, fraction + pad)
else:
pb1, _, pbcb = pb.splitx(1 - fraction - pad, 1 - fraction)
pbcb = pbcb.shrunk(1.0, shrink).anchored(anchor, pbcb)
else:
if location == 'bottom':
pbcb, _, pb1 = pb.splity(fraction, fraction + pad)
else:
pb1, _, pbcb = pb.splity(1 - fraction - pad, 1 - fraction)
pbcb = pbcb.shrunk(shrink, 1.0).anchored(anchor, pbcb)
# define the aspect ratio in terms of y's per x rather than x's per y
aspect = 1.0 / aspect
# define a transform which takes us from old axes coordinates to
# new axes coordinates
shrinking_trans = mtrans.BboxTransform(parents_bbox, pb1)
# transform each of the axes in parents using the new transform
for ax in parents:
new_posn = shrinking_trans.transform(ax.get_position())
new_posn = mtrans.Bbox(new_posn)
ax.set_position(new_posn)
if parent_anchor is not False:
ax.set_anchor(parent_anchor)
cax = fig.add_axes(pbcb)
cax.set_aspect(aspect, anchor=anchor, adjustable='box')
return cax, kw
@docstring.Substitution(make_axes_kw_doc)
def make_axes_gridspec(parent, **kw):
'''
Resize and reposition a parent axes, and return a child axes
suitable for a colorbar. This function is similar to
make_axes. Prmary differences are
* *make_axes_gridspec* only handles the *orientation* keyword
and cannot handle the "location" keyword.
* *make_axes_gridspec* should only be used with a subplot parent.
* *make_axes* creates an instance of Axes. *make_axes_gridspec*
creates an instance of Subplot.
* *make_axes* updates the position of the
parent. *make_axes_gridspec* replaces the grid_spec attribute
of the parent with a new one.
While this function is meant to be compatible with *make_axes*,
there could be some minor differences.::
cax, kw = make_axes_gridspec(parent, **kw)
Keyword arguments may include the following (with defaults):
*orientation*
'vertical' or 'horizontal'
%s
All but the first of these are stripped from the input kw set.
Returns (cax, kw), the child axes and the reduced kw dictionary to be
passed when creating the colorbar instance.
'''
orientation = kw.setdefault('orientation', 'vertical')
kw['ticklocation'] = 'auto'
fraction = kw.pop('fraction', 0.15)
shrink = kw.pop('shrink', 1.0)
aspect = kw.pop('aspect', 20)
x1 = 1.0 - fraction
# for shrinking
pad_s = (1. - shrink) * 0.5
wh_ratios = [pad_s, shrink, pad_s]
gs_from_subplotspec = gridspec.GridSpecFromSubplotSpec
if orientation == 'vertical':
pad = kw.pop('pad', 0.05)
wh_space = 2 * pad / (1 - pad)
gs = gs_from_subplotspec(1, 2,
subplot_spec=parent.get_subplotspec(),
wspace=wh_space,
width_ratios=[x1 - pad, fraction]
)
gs2 = gs_from_subplotspec(3, 1,
subplot_spec=gs[1],
hspace=0.,
height_ratios=wh_ratios,
)
anchor = (0.0, 0.5)
panchor = (1.0, 0.5)
else:
pad = kw.pop('pad', 0.15)
wh_space = 2 * pad / (1 - pad)
gs = gs_from_subplotspec(2, 1,
subplot_spec=parent.get_subplotspec(),
hspace=wh_space,
height_ratios=[x1 - pad, fraction]
)
gs2 = gs_from_subplotspec(1, 3,
subplot_spec=gs[1],
wspace=0.,
width_ratios=wh_ratios,
)
aspect = 1.0 / aspect
anchor = (0.5, 1.0)
panchor = (0.5, 0.0)
parent.set_subplotspec(gs[0])
parent.update_params()
parent.set_position(parent.figbox)
parent.set_anchor(panchor)
fig = parent.get_figure()
cax = fig.add_subplot(gs2[1])
cax.set_aspect(aspect, anchor=anchor, adjustable='box')
return cax, kw
class ColorbarPatch(Colorbar):
"""
A Colorbar which is created using :class:`~matplotlib.patches.Patch`
rather than the default :func:`~matplotlib.axes.pcolor`.
It uses a list of Patch instances instead of a
:class:`~matplotlib.collections.PatchCollection` because the
latter does not allow the hatch pattern to vary among the
members of the collection.
"""
def __init__(self, ax, mappable, **kw):
# we do not want to override the behaviour of solids
# so add a new attribute which will be a list of the
# colored patches in the colorbar
self.solids_patches = []
Colorbar.__init__(self, ax, mappable, **kw)
def _add_solids(self, X, Y, C):
"""
Draw the colors using :class:`~matplotlib.patches.Patch`;
optionally add separators.
"""
# Save, set, and restore hold state to keep pcolor from
# clearing the axes. Ordinarily this will not be needed,
# since the axes object should already have hold set.
_hold = self.ax.ishold()
self.ax.hold(True)
kw = {'alpha': self.alpha, }
n_segments = len(C)
# ensure there are sufficent hatches
hatches = self.mappable.hatches * n_segments
patches = []
for i in xrange(len(X) - 1):
val = C[i][0]
hatch = hatches[i]
xy = np.array([[X[i][0], Y[i][0]],
[X[i][1], Y[i][0]],
[X[i + 1][1], Y[i + 1][0]],
[X[i + 1][0], Y[i + 1][1]]])
if self.orientation == 'horizontal':
# if horizontal swap the xs and ys
xy = xy[..., ::-1]
patch = mpatches.PathPatch(mpath.Path(xy),
facecolor=self.cmap(self.norm(val)),
hatch=hatch, linewidth=0,
antialiased=False, **kw)
self.ax.add_patch(patch)
patches.append(patch)
if self.solids_patches:
for solid in self.solids_patches:
solid.remove()
self.solids_patches = patches
if self.dividers is not None:
self.dividers.remove()
self.dividers = None
if self.drawedges:
self.dividers = collections.LineCollection(self._edges(X, Y),
colors=(mpl.rcParams['axes.edgecolor'],),
linewidths=(0.5 * mpl.rcParams['axes.linewidth'],))
self.ax.add_collection(self.dividers)
self.ax.hold(_hold)
def colorbar_factory(cax, mappable, **kwargs):
"""
Creates a colorbar on the given axes for the given mappable.
Typically, for automatic colorbar placement given only a mappable use
:meth:`~matplotlib.figure.Figure.colorbar`.
"""
# if the given mappable is a contourset with any hatching, use
# ColorbarPatch else use Colorbar
if (isinstance(mappable, contour.ContourSet)
and any([hatch is not None for hatch in mappable.hatches])):
cb = ColorbarPatch(cax, mappable, **kwargs)
else:
cb = Colorbar(cax, mappable, **kwargs)
cid = mappable.callbacksSM.connect('changed', cb.on_mappable_changed)
mappable.colorbar = cb
mappable.colorbar_cid = cid
return cb
| mit |
rajat1994/scikit-learn | examples/ensemble/plot_voting_probas.py | 316 | 2824 | """
===========================================================
Plot class probabilities calculated by the VotingClassifier
===========================================================
Plot the class probabilities of the first sample in a toy dataset
predicted by three different classifiers and averaged by the
`VotingClassifier`.
First, three examplary classifiers are initialized (`LogisticRegression`,
`GaussianNB`, and `RandomForestClassifier`) and used to initialize a
soft-voting `VotingClassifier` with weights `[1, 1, 5]`, which means that
the predicted probabilities of the `RandomForestClassifier` count 5 times
as much as the weights of the other classifiers when the averaged probability
is calculated.
To visualize the probability weighting, we fit each classifier on the training
set and plot the predicted class probabilities for the first sample in this
example dataset.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn.linear_model import LogisticRegression
from sklearn.naive_bayes import GaussianNB
from sklearn.ensemble import RandomForestClassifier
from sklearn.ensemble import VotingClassifier
clf1 = LogisticRegression(random_state=123)
clf2 = RandomForestClassifier(random_state=123)
clf3 = GaussianNB()
X = np.array([[-1.0, -1.0], [-1.2, -1.4], [-3.4, -2.2], [1.1, 1.2]])
y = np.array([1, 1, 2, 2])
eclf = VotingClassifier(estimators=[('lr', clf1), ('rf', clf2), ('gnb', clf3)],
voting='soft',
weights=[1, 1, 5])
# predict class probabilities for all classifiers
probas = [c.fit(X, y).predict_proba(X) for c in (clf1, clf2, clf3, eclf)]
# get class probabilities for the first sample in the dataset
class1_1 = [pr[0, 0] for pr in probas]
class2_1 = [pr[0, 1] for pr in probas]
# plotting
N = 4 # number of groups
ind = np.arange(N) # group positions
width = 0.35 # bar width
fig, ax = plt.subplots()
# bars for classifier 1-3
p1 = ax.bar(ind, np.hstack(([class1_1[:-1], [0]])), width, color='green')
p2 = ax.bar(ind + width, np.hstack(([class2_1[:-1], [0]])), width, color='lightgreen')
# bars for VotingClassifier
p3 = ax.bar(ind, [0, 0, 0, class1_1[-1]], width, color='blue')
p4 = ax.bar(ind + width, [0, 0, 0, class2_1[-1]], width, color='steelblue')
# plot annotations
plt.axvline(2.8, color='k', linestyle='dashed')
ax.set_xticks(ind + width)
ax.set_xticklabels(['LogisticRegression\nweight 1',
'GaussianNB\nweight 1',
'RandomForestClassifier\nweight 5',
'VotingClassifier\n(average probabilities)'],
rotation=40,
ha='right')
plt.ylim([0, 1])
plt.title('Class probabilities for sample 1 by different classifiers')
plt.legend([p1[0], p2[0]], ['class 1', 'class 2'], loc='upper left')
plt.show()
| bsd-3-clause |
alshedivat/tensorflow | tensorflow/contrib/timeseries/examples/lstm.py | 24 | 13826 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""A more advanced example, of building an RNN-based time series model."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
from os import path
import tempfile
import numpy
import tensorflow as tf
from tensorflow.contrib.timeseries.python.timeseries import estimators as ts_estimators
from tensorflow.contrib.timeseries.python.timeseries import model as ts_model
from tensorflow.contrib.timeseries.python.timeseries import state_management
try:
import matplotlib # pylint: disable=g-import-not-at-top
matplotlib.use("TkAgg") # Need Tk for interactive plots.
from matplotlib import pyplot # pylint: disable=g-import-not-at-top
HAS_MATPLOTLIB = True
except ImportError:
# Plotting requires matplotlib, but the unit test running this code may
# execute in an environment without it (i.e. matplotlib is not a build
# dependency). We'd still like to test the TensorFlow-dependent parts of this
# example.
HAS_MATPLOTLIB = False
_MODULE_PATH = path.dirname(__file__)
_DATA_FILE = path.join(_MODULE_PATH, "data/multivariate_periods.csv")
class _LSTMModel(ts_model.SequentialTimeSeriesModel):
"""A time series model-building example using an RNNCell."""
def __init__(self, num_units, num_features, exogenous_feature_columns=None,
dtype=tf.float32):
"""Initialize/configure the model object.
Note that we do not start graph building here. Rather, this object is a
configurable factory for TensorFlow graphs which are run by an Estimator.
Args:
num_units: The number of units in the model's LSTMCell.
num_features: The dimensionality of the time series (features per
timestep).
exogenous_feature_columns: A list of `tf.feature_column`s representing
features which are inputs to the model but are not predicted by
it. These must then be present for training, evaluation, and
prediction.
dtype: The floating point data type to use.
"""
super(_LSTMModel, self).__init__(
# Pre-register the metrics we'll be outputting (just a mean here).
train_output_names=["mean"],
predict_output_names=["mean"],
num_features=num_features,
exogenous_feature_columns=exogenous_feature_columns,
dtype=dtype)
self._num_units = num_units
# Filled in by initialize_graph()
self._lstm_cell = None
self._lstm_cell_run = None
self._predict_from_lstm_output = None
def initialize_graph(self, input_statistics=None):
"""Save templates for components, which can then be used repeatedly.
This method is called every time a new graph is created. It's safe to start
adding ops to the current default graph here, but the graph should be
constructed from scratch.
Args:
input_statistics: A math_utils.InputStatistics object.
"""
super(_LSTMModel, self).initialize_graph(input_statistics=input_statistics)
with tf.variable_scope("", use_resource=True):
# Use ResourceVariables to avoid race conditions.
self._lstm_cell = tf.nn.rnn_cell.LSTMCell(num_units=self._num_units)
# Create templates so we don't have to worry about variable reuse.
self._lstm_cell_run = tf.make_template(
name_="lstm_cell",
func_=self._lstm_cell,
create_scope_now_=True)
# Transforms LSTM output into mean predictions.
self._predict_from_lstm_output = tf.make_template(
name_="predict_from_lstm_output",
func_=functools.partial(tf.layers.dense, units=self.num_features),
create_scope_now_=True)
def get_start_state(self):
"""Return initial state for the time series model."""
return (
# Keeps track of the time associated with this state for error checking.
tf.zeros([], dtype=tf.int64),
# The previous observation or prediction.
tf.zeros([self.num_features], dtype=self.dtype),
# The most recently seen exogenous features.
tf.zeros(self._get_exogenous_embedding_shape(), dtype=self.dtype),
# The state of the RNNCell (batch dimension removed since this parent
# class will broadcast).
[tf.squeeze(state_element, axis=0)
for state_element
in self._lstm_cell.zero_state(batch_size=1, dtype=self.dtype)])
def _filtering_step(self, current_times, current_values, state, predictions):
"""Update model state based on observations.
Note that we don't do much here aside from computing a loss. In this case
it's easier to update the RNN state in _prediction_step, since that covers
running the RNN both on observations (from this method) and our own
predictions. This distinction can be important for probabilistic models,
where repeatedly predicting without filtering should lead to low-confidence
predictions.
Args:
current_times: A [batch size] integer Tensor.
current_values: A [batch size, self.num_features] floating point Tensor
with new observations.
state: The model's state tuple.
predictions: The output of the previous `_prediction_step`.
Returns:
A tuple of new state and a predictions dictionary updated to include a
loss (note that we could also return other measures of goodness of fit,
although only "loss" will be optimized).
"""
state_from_time, prediction, exogenous, lstm_state = state
with tf.control_dependencies(
[tf.assert_equal(current_times, state_from_time)]):
# Subtract the mean and divide by the variance of the series. Slightly
# more efficient if done for a whole window (using the normalize_features
# argument to SequentialTimeSeriesModel).
transformed_values = self._scale_data(current_values)
# Use mean squared error across features for the loss.
predictions["loss"] = tf.reduce_mean(
(prediction - transformed_values) ** 2, axis=-1)
# Keep track of the new observation in model state. It won't be run
# through the LSTM until the next _imputation_step.
new_state_tuple = (current_times, transformed_values,
exogenous, lstm_state)
return (new_state_tuple, predictions)
def _prediction_step(self, current_times, state):
"""Advance the RNN state using a previous observation or prediction."""
_, previous_observation_or_prediction, exogenous, lstm_state = state
# Update LSTM state based on the most recent exogenous and endogenous
# features.
inputs = tf.concat([previous_observation_or_prediction, exogenous],
axis=-1)
lstm_output, new_lstm_state = self._lstm_cell_run(
inputs=inputs, state=lstm_state)
next_prediction = self._predict_from_lstm_output(lstm_output)
new_state_tuple = (current_times, next_prediction,
exogenous, new_lstm_state)
return new_state_tuple, {"mean": self._scale_back_data(next_prediction)}
def _imputation_step(self, current_times, state):
"""Advance model state across a gap."""
# Does not do anything special if we're jumping across a gap. More advanced
# models, especially probabilistic ones, would want a special case that
# depends on the gap size.
return state
def _exogenous_input_step(
self, current_times, current_exogenous_regressors, state):
"""Save exogenous regressors in model state for use in _prediction_step."""
state_from_time, prediction, _, lstm_state = state
return (state_from_time, prediction,
current_exogenous_regressors, lstm_state)
def train_and_predict(
csv_file_name=_DATA_FILE, training_steps=200, estimator_config=None,
export_directory=None):
"""Train and predict using a custom time series model."""
# Construct an Estimator from our LSTM model.
categorical_column = tf.feature_column.categorical_column_with_hash_bucket(
key="categorical_exogenous_feature", hash_bucket_size=16)
exogenous_feature_columns = [
# Exogenous features are not part of the loss, but can inform
# predictions. In this example the features have no extra information, but
# are included as an API example.
tf.feature_column.numeric_column(
"2d_exogenous_feature", shape=(2,)),
tf.feature_column.embedding_column(
categorical_column=categorical_column, dimension=10)]
estimator = ts_estimators.TimeSeriesRegressor(
model=_LSTMModel(num_features=5, num_units=128,
exogenous_feature_columns=exogenous_feature_columns),
optimizer=tf.train.AdamOptimizer(0.001), config=estimator_config,
# Set state to be saved across windows.
state_manager=state_management.ChainingStateManager())
reader = tf.contrib.timeseries.CSVReader(
csv_file_name,
column_names=((tf.contrib.timeseries.TrainEvalFeatures.TIMES,)
+ (tf.contrib.timeseries.TrainEvalFeatures.VALUES,) * 5
+ ("2d_exogenous_feature",) * 2
+ ("categorical_exogenous_feature",)),
# Data types other than for `times` need to be specified if they aren't
# float32. In this case one of our exogenous features has string dtype.
column_dtypes=((tf.int64,) + (tf.float32,) * 7 + (tf.string,)))
train_input_fn = tf.contrib.timeseries.RandomWindowInputFn(
reader, batch_size=4, window_size=32)
estimator.train(input_fn=train_input_fn, steps=training_steps)
evaluation_input_fn = tf.contrib.timeseries.WholeDatasetInputFn(reader)
evaluation = estimator.evaluate(input_fn=evaluation_input_fn, steps=1)
# Predict starting after the evaluation
predict_exogenous_features = {
"2d_exogenous_feature": numpy.concatenate(
[numpy.ones([1, 100, 1]), numpy.zeros([1, 100, 1])],
axis=-1),
"categorical_exogenous_feature": numpy.array(
["strkey"] * 100)[None, :, None]}
(predictions,) = tuple(estimator.predict(
input_fn=tf.contrib.timeseries.predict_continuation_input_fn(
evaluation, steps=100,
exogenous_features=predict_exogenous_features)))
times = evaluation["times"][0]
observed = evaluation["observed"][0, :, :]
predicted_mean = numpy.squeeze(numpy.concatenate(
[evaluation["mean"][0], predictions["mean"]], axis=0))
all_times = numpy.concatenate([times, predictions["times"]], axis=0)
# Export the model in SavedModel format. We include a bit of extra boilerplate
# for "cold starting" as if we didn't have any state from the Estimator, which
# is the case when serving from a SavedModel. If Estimator output is
# available, the result of "Estimator.evaluate" can be passed directly to
# `tf.contrib.timeseries.saved_model_utils.predict_continuation` as the
# `continue_from` argument.
with tf.Graph().as_default():
filter_feature_tensors, _ = evaluation_input_fn()
with tf.train.MonitoredSession() as session:
# Fetch the series to "warm up" our state, which will allow us to make
# predictions for its future values. This is just a dictionary of times,
# values, and exogenous features mapping to numpy arrays. The use of an
# input_fn is just a convenience for the example; they can also be
# specified manually.
filter_features = session.run(filter_feature_tensors)
if export_directory is None:
export_directory = tempfile.mkdtemp()
input_receiver_fn = estimator.build_raw_serving_input_receiver_fn()
export_location = estimator.export_savedmodel(
export_directory, input_receiver_fn)
# Warm up and predict using the SavedModel
with tf.Graph().as_default():
with tf.Session() as session:
signatures = tf.saved_model.loader.load(
session, [tf.saved_model.tag_constants.SERVING], export_location)
state = tf.contrib.timeseries.saved_model_utils.cold_start_filter(
signatures=signatures, session=session, features=filter_features)
saved_model_output = (
tf.contrib.timeseries.saved_model_utils.predict_continuation(
continue_from=state, signatures=signatures,
session=session, steps=100,
exogenous_features=predict_exogenous_features))
# The exported model gives the same results as the Estimator.predict()
# call above.
numpy.testing.assert_allclose(
predictions["mean"],
numpy.squeeze(saved_model_output["mean"], axis=0))
return times, observed, all_times, predicted_mean
def main(unused_argv):
if not HAS_MATPLOTLIB:
raise ImportError(
"Please install matplotlib to generate a plot from this example.")
(observed_times, observations,
all_times, predictions) = train_and_predict()
pyplot.axvline(99, linestyle="dotted")
observed_lines = pyplot.plot(
observed_times, observations, label="Observed", color="k")
predicted_lines = pyplot.plot(
all_times, predictions, label="Predicted", color="b")
pyplot.legend(handles=[observed_lines[0], predicted_lines[0]],
loc="upper left")
pyplot.show()
if __name__ == "__main__":
tf.app.run(main=main)
| apache-2.0 |
pjryan126/solid-start-careers | store/api/zillow/venv/lib/python2.7/site-packages/pandas/tools/tests/test_merge.py | 1 | 122954 | # pylint: disable=E1103
import nose
from datetime import datetime
from numpy.random import randn
from numpy import nan
import numpy as np
import random
import pandas as pd
from pandas.compat import range, lrange, lzip, StringIO
from pandas import compat
from pandas.tseries.index import DatetimeIndex
from pandas.tools.merge import merge, concat, ordered_merge, MergeError
from pandas import Categorical, Timestamp
from pandas.util.testing import (assert_frame_equal, assert_series_equal,
assert_almost_equal,
makeCustomDataframe as mkdf,
assertRaisesRegexp)
from pandas import (isnull, DataFrame, Index, MultiIndex, Panel,
Series, date_range, read_csv)
import pandas.algos as algos
import pandas.util.testing as tm
from numpy.testing.decorators import slow
a_ = np.array
N = 50
NGROUPS = 8
JOIN_TYPES = ['inner', 'outer', 'left', 'right']
def get_test_data(ngroups=NGROUPS, n=N):
unique_groups = lrange(ngroups)
arr = np.asarray(np.tile(unique_groups, n // ngroups))
if len(arr) < n:
arr = np.asarray(list(arr) + unique_groups[:n - len(arr)])
random.shuffle(arr)
return arr
class TestMerge(tm.TestCase):
_multiprocess_can_split_ = True
def setUp(self):
# aggregate multiple columns
self.df = DataFrame({'key1': get_test_data(),
'key2': get_test_data(),
'data1': np.random.randn(N),
'data2': np.random.randn(N)})
# exclude a couple keys for fun
self.df = self.df[self.df['key2'] > 1]
self.df2 = DataFrame({'key1': get_test_data(n=N // 5),
'key2': get_test_data(ngroups=NGROUPS // 2,
n=N // 5),
'value': np.random.randn(N // 5)})
index, data = tm.getMixedTypeDict()
self.target = DataFrame(data, index=index)
# Join on string value
self.source = DataFrame({'MergedA': data['A'], 'MergedD': data['D']},
index=data['C'])
self.left = DataFrame({'key': ['a', 'b', 'c', 'd', 'e', 'e', 'a'],
'v1': np.random.randn(7)})
self.right = DataFrame({'v2': np.random.randn(4)},
index=['d', 'b', 'c', 'a'])
def test_cython_left_outer_join(self):
left = a_([0, 1, 2, 1, 2, 0, 0, 1, 2, 3, 3], dtype=np.int64)
right = a_([1, 1, 0, 4, 2, 2, 1], dtype=np.int64)
max_group = 5
ls, rs = algos.left_outer_join(left, right, max_group)
exp_ls = left.argsort(kind='mergesort')
exp_rs = right.argsort(kind='mergesort')
exp_li = a_([0, 1, 2, 3, 3, 3, 4, 4, 4, 5, 5, 5,
6, 6, 7, 7, 8, 8, 9, 10])
exp_ri = a_([0, 0, 0, 1, 2, 3, 1, 2, 3, 1, 2, 3,
4, 5, 4, 5, 4, 5, -1, -1])
exp_ls = exp_ls.take(exp_li)
exp_ls[exp_li == -1] = -1
exp_rs = exp_rs.take(exp_ri)
exp_rs[exp_ri == -1] = -1
self.assert_numpy_array_equal(ls, exp_ls)
self.assert_numpy_array_equal(rs, exp_rs)
def test_cython_right_outer_join(self):
left = a_([0, 1, 2, 1, 2, 0, 0, 1, 2, 3, 3], dtype=np.int64)
right = a_([1, 1, 0, 4, 2, 2, 1], dtype=np.int64)
max_group = 5
rs, ls = algos.left_outer_join(right, left, max_group)
exp_ls = left.argsort(kind='mergesort')
exp_rs = right.argsort(kind='mergesort')
# 0 1 1 1
exp_li = a_([0, 1, 2, 3, 4, 5, 3, 4, 5, 3, 4, 5,
# 2 2 4
6, 7, 8, 6, 7, 8, -1])
exp_ri = a_([0, 0, 0, 1, 1, 1, 2, 2, 2, 3, 3, 3,
4, 4, 4, 5, 5, 5, 6])
exp_ls = exp_ls.take(exp_li)
exp_ls[exp_li == -1] = -1
exp_rs = exp_rs.take(exp_ri)
exp_rs[exp_ri == -1] = -1
self.assert_numpy_array_equal(ls, exp_ls)
self.assert_numpy_array_equal(rs, exp_rs)
def test_cython_inner_join(self):
left = a_([0, 1, 2, 1, 2, 0, 0, 1, 2, 3, 3], dtype=np.int64)
right = a_([1, 1, 0, 4, 2, 2, 1, 4], dtype=np.int64)
max_group = 5
ls, rs = algos.inner_join(left, right, max_group)
exp_ls = left.argsort(kind='mergesort')
exp_rs = right.argsort(kind='mergesort')
exp_li = a_([0, 1, 2, 3, 3, 3, 4, 4, 4, 5, 5, 5,
6, 6, 7, 7, 8, 8])
exp_ri = a_([0, 0, 0, 1, 2, 3, 1, 2, 3, 1, 2, 3,
4, 5, 4, 5, 4, 5])
exp_ls = exp_ls.take(exp_li)
exp_ls[exp_li == -1] = -1
exp_rs = exp_rs.take(exp_ri)
exp_rs[exp_ri == -1] = -1
self.assert_numpy_array_equal(ls, exp_ls)
self.assert_numpy_array_equal(rs, exp_rs)
def test_left_outer_join(self):
joined_key2 = merge(self.df, self.df2, on='key2')
_check_join(self.df, self.df2, joined_key2, ['key2'], how='left')
joined_both = merge(self.df, self.df2)
_check_join(self.df, self.df2, joined_both, ['key1', 'key2'],
how='left')
def test_right_outer_join(self):
joined_key2 = merge(self.df, self.df2, on='key2', how='right')
_check_join(self.df, self.df2, joined_key2, ['key2'], how='right')
joined_both = merge(self.df, self.df2, how='right')
_check_join(self.df, self.df2, joined_both, ['key1', 'key2'],
how='right')
def test_full_outer_join(self):
joined_key2 = merge(self.df, self.df2, on='key2', how='outer')
_check_join(self.df, self.df2, joined_key2, ['key2'], how='outer')
joined_both = merge(self.df, self.df2, how='outer')
_check_join(self.df, self.df2, joined_both, ['key1', 'key2'],
how='outer')
def test_inner_join(self):
joined_key2 = merge(self.df, self.df2, on='key2', how='inner')
_check_join(self.df, self.df2, joined_key2, ['key2'], how='inner')
joined_both = merge(self.df, self.df2, how='inner')
_check_join(self.df, self.df2, joined_both, ['key1', 'key2'],
how='inner')
def test_handle_overlap(self):
joined = merge(self.df, self.df2, on='key2',
suffixes=['.foo', '.bar'])
self.assertIn('key1.foo', joined)
self.assertIn('key1.bar', joined)
def test_handle_overlap_arbitrary_key(self):
joined = merge(self.df, self.df2,
left_on='key2', right_on='key1',
suffixes=['.foo', '.bar'])
self.assertIn('key1.foo', joined)
self.assertIn('key2.bar', joined)
def test_merge_common(self):
joined = merge(self.df, self.df2)
exp = merge(self.df, self.df2, on=['key1', 'key2'])
tm.assert_frame_equal(joined, exp)
def test_join_on(self):
target = self.target
source = self.source
merged = target.join(source, on='C')
self.assert_numpy_array_equal(merged['MergedA'], target['A'])
self.assert_numpy_array_equal(merged['MergedD'], target['D'])
# join with duplicates (fix regression from DataFrame/Matrix merge)
df = DataFrame({'key': ['a', 'a', 'b', 'b', 'c']})
df2 = DataFrame({'value': [0, 1, 2]}, index=['a', 'b', 'c'])
joined = df.join(df2, on='key')
expected = DataFrame({'key': ['a', 'a', 'b', 'b', 'c'],
'value': [0, 0, 1, 1, 2]})
assert_frame_equal(joined, expected)
# Test when some are missing
df_a = DataFrame([[1], [2], [3]], index=['a', 'b', 'c'],
columns=['one'])
df_b = DataFrame([['foo'], ['bar']], index=[1, 2],
columns=['two'])
df_c = DataFrame([[1], [2]], index=[1, 2],
columns=['three'])
joined = df_a.join(df_b, on='one')
joined = joined.join(df_c, on='one')
self.assertTrue(np.isnan(joined['two']['c']))
self.assertTrue(np.isnan(joined['three']['c']))
# merge column not p resent
self.assertRaises(KeyError, target.join, source, on='E')
# overlap
source_copy = source.copy()
source_copy['A'] = 0
self.assertRaises(ValueError, target.join, source_copy, on='A')
def test_join_on_fails_with_different_right_index(self):
with tm.assertRaises(ValueError):
df = DataFrame({'a': np.random.choice(['m', 'f'], size=3),
'b': np.random.randn(3)})
df2 = DataFrame({'a': np.random.choice(['m', 'f'], size=10),
'b': np.random.randn(10)},
index=tm.makeCustomIndex(10, 2))
merge(df, df2, left_on='a', right_index=True)
def test_join_on_fails_with_different_left_index(self):
with tm.assertRaises(ValueError):
df = DataFrame({'a': np.random.choice(['m', 'f'], size=3),
'b': np.random.randn(3)},
index=tm.makeCustomIndex(10, 2))
df2 = DataFrame({'a': np.random.choice(['m', 'f'], size=10),
'b': np.random.randn(10)})
merge(df, df2, right_on='b', left_index=True)
def test_join_on_fails_with_different_column_counts(self):
with tm.assertRaises(ValueError):
df = DataFrame({'a': np.random.choice(['m', 'f'], size=3),
'b': np.random.randn(3)})
df2 = DataFrame({'a': np.random.choice(['m', 'f'], size=10),
'b': np.random.randn(10)},
index=tm.makeCustomIndex(10, 2))
merge(df, df2, right_on='a', left_on=['a', 'b'])
def test_join_on_fails_with_wrong_object_type(self):
# GH12081
wrongly_typed = [Series([0, 1]), 2, 'str', None, np.array([0, 1])]
df = DataFrame({'a': [1, 1]})
for obj in wrongly_typed:
with tm.assertRaisesRegexp(ValueError, str(type(obj))):
merge(obj, df, left_on='a', right_on='a')
with tm.assertRaisesRegexp(ValueError, str(type(obj))):
merge(df, obj, left_on='a', right_on='a')
def test_join_on_pass_vector(self):
expected = self.target.join(self.source, on='C')
del expected['C']
join_col = self.target.pop('C')
result = self.target.join(self.source, on=join_col)
assert_frame_equal(result, expected)
def test_join_with_len0(self):
# nothing to merge
merged = self.target.join(self.source.reindex([]), on='C')
for col in self.source:
self.assertIn(col, merged)
self.assertTrue(merged[col].isnull().all())
merged2 = self.target.join(self.source.reindex([]), on='C',
how='inner')
self.assertTrue(merged2.columns.equals(merged.columns))
self.assertEqual(len(merged2), 0)
def test_join_on_inner(self):
df = DataFrame({'key': ['a', 'a', 'd', 'b', 'b', 'c']})
df2 = DataFrame({'value': [0, 1]}, index=['a', 'b'])
joined = df.join(df2, on='key', how='inner')
expected = df.join(df2, on='key')
expected = expected[expected['value'].notnull()]
self.assert_numpy_array_equal(joined['key'], expected['key'])
self.assert_numpy_array_equal(joined['value'], expected['value'])
self.assertTrue(joined.index.equals(expected.index))
def test_join_on_singlekey_list(self):
df = DataFrame({'key': ['a', 'a', 'b', 'b', 'c']})
df2 = DataFrame({'value': [0, 1, 2]}, index=['a', 'b', 'c'])
# corner cases
joined = df.join(df2, on=['key'])
expected = df.join(df2, on='key')
assert_frame_equal(joined, expected)
def test_join_on_series(self):
result = self.target.join(self.source['MergedA'], on='C')
expected = self.target.join(self.source[['MergedA']], on='C')
assert_frame_equal(result, expected)
def test_join_on_series_buglet(self):
# GH #638
df = DataFrame({'a': [1, 1]})
ds = Series([2], index=[1], name='b')
result = df.join(ds, on='a')
expected = DataFrame({'a': [1, 1],
'b': [2, 2]}, index=df.index)
tm.assert_frame_equal(result, expected)
def test_join_index_mixed(self):
df1 = DataFrame({'A': 1., 'B': 2, 'C': 'foo', 'D': True},
index=np.arange(10),
columns=['A', 'B', 'C', 'D'])
self.assertEqual(df1['B'].dtype, np.int64)
self.assertEqual(df1['D'].dtype, np.bool_)
df2 = DataFrame({'A': 1., 'B': 2, 'C': 'foo', 'D': True},
index=np.arange(0, 10, 2),
columns=['A', 'B', 'C', 'D'])
# overlap
joined = df1.join(df2, lsuffix='_one', rsuffix='_two')
expected_columns = ['A_one', 'B_one', 'C_one', 'D_one',
'A_two', 'B_two', 'C_two', 'D_two']
df1.columns = expected_columns[:4]
df2.columns = expected_columns[4:]
expected = _join_by_hand(df1, df2)
assert_frame_equal(joined, expected)
# no overlapping blocks
df1 = DataFrame(index=np.arange(10))
df1['bool'] = True
df1['string'] = 'foo'
df2 = DataFrame(index=np.arange(5, 15))
df2['int'] = 1
df2['float'] = 1.
for kind in JOIN_TYPES:
joined = df1.join(df2, how=kind)
expected = _join_by_hand(df1, df2, how=kind)
assert_frame_equal(joined, expected)
joined = df2.join(df1, how=kind)
expected = _join_by_hand(df2, df1, how=kind)
assert_frame_equal(joined, expected)
def test_join_empty_bug(self):
# generated an exception in 0.4.3
x = DataFrame()
x.join(DataFrame([3], index=[0], columns=['A']), how='outer')
def test_join_unconsolidated(self):
# GH #331
a = DataFrame(randn(30, 2), columns=['a', 'b'])
c = Series(randn(30))
a['c'] = c
d = DataFrame(randn(30, 1), columns=['q'])
# it works!
a.join(d)
d.join(a)
def test_join_multiindex(self):
index1 = MultiIndex.from_arrays([['a', 'a', 'a', 'b', 'b', 'b'],
[1, 2, 3, 1, 2, 3]],
names=['first', 'second'])
index2 = MultiIndex.from_arrays([['b', 'b', 'b', 'c', 'c', 'c'],
[1, 2, 3, 1, 2, 3]],
names=['first', 'second'])
df1 = DataFrame(data=np.random.randn(6), index=index1,
columns=['var X'])
df2 = DataFrame(data=np.random.randn(6), index=index2,
columns=['var Y'])
df1 = df1.sortlevel(0)
df2 = df2.sortlevel(0)
joined = df1.join(df2, how='outer')
ex_index = index1._tuple_index.union(index2._tuple_index)
expected = df1.reindex(ex_index).join(df2.reindex(ex_index))
expected.index.names = index1.names
assert_frame_equal(joined, expected)
self.assertEqual(joined.index.names, index1.names)
df1 = df1.sortlevel(1)
df2 = df2.sortlevel(1)
joined = df1.join(df2, how='outer').sortlevel(0)
ex_index = index1._tuple_index.union(index2._tuple_index)
expected = df1.reindex(ex_index).join(df2.reindex(ex_index))
expected.index.names = index1.names
assert_frame_equal(joined, expected)
self.assertEqual(joined.index.names, index1.names)
def test_join_inner_multiindex(self):
key1 = ['bar', 'bar', 'bar', 'foo', 'foo', 'baz', 'baz', 'qux',
'qux', 'snap']
key2 = ['two', 'one', 'three', 'one', 'two', 'one', 'two', 'two',
'three', 'one']
data = np.random.randn(len(key1))
data = DataFrame({'key1': key1, 'key2': key2,
'data': data})
index = MultiIndex(levels=[['foo', 'bar', 'baz', 'qux'],
['one', 'two', 'three']],
labels=[[0, 0, 0, 1, 1, 2, 2, 3, 3, 3],
[0, 1, 2, 0, 1, 1, 2, 0, 1, 2]],
names=['first', 'second'])
to_join = DataFrame(np.random.randn(10, 3), index=index,
columns=['j_one', 'j_two', 'j_three'])
joined = data.join(to_join, on=['key1', 'key2'], how='inner')
expected = merge(data, to_join.reset_index(),
left_on=['key1', 'key2'],
right_on=['first', 'second'], how='inner',
sort=False)
expected2 = merge(to_join, data,
right_on=['key1', 'key2'], left_index=True,
how='inner', sort=False)
assert_frame_equal(joined, expected2.reindex_like(joined))
expected2 = merge(to_join, data, right_on=['key1', 'key2'],
left_index=True, how='inner', sort=False)
expected = expected.drop(['first', 'second'], axis=1)
expected.index = joined.index
self.assertTrue(joined.index.is_monotonic)
assert_frame_equal(joined, expected)
# _assert_same_contents(expected, expected2.ix[:, expected.columns])
def test_join_hierarchical_mixed(self):
df = DataFrame([(1, 2, 3), (4, 5, 6)], columns=['a', 'b', 'c'])
new_df = df.groupby(['a']).agg({'b': [np.mean, np.sum]})
other_df = DataFrame(
[(1, 2, 3), (7, 10, 6)], columns=['a', 'b', 'd'])
other_df.set_index('a', inplace=True)
result = merge(new_df, other_df, left_index=True, right_index=True)
self.assertTrue(('b', 'mean') in result)
self.assertTrue('b' in result)
def test_join_float64_float32(self):
a = DataFrame(randn(10, 2), columns=['a', 'b'], dtype=np.float64)
b = DataFrame(randn(10, 1), columns=['c'], dtype=np.float32)
joined = a.join(b)
self.assertEqual(joined.dtypes['a'], 'float64')
self.assertEqual(joined.dtypes['b'], 'float64')
self.assertEqual(joined.dtypes['c'], 'float32')
a = np.random.randint(0, 5, 100).astype('int64')
b = np.random.random(100).astype('float64')
c = np.random.random(100).astype('float32')
df = DataFrame({'a': a, 'b': b, 'c': c})
xpdf = DataFrame({'a': a, 'b': b, 'c': c})
s = DataFrame(np.random.random(5).astype('float32'), columns=['md'])
rs = df.merge(s, left_on='a', right_index=True)
self.assertEqual(rs.dtypes['a'], 'int64')
self.assertEqual(rs.dtypes['b'], 'float64')
self.assertEqual(rs.dtypes['c'], 'float32')
self.assertEqual(rs.dtypes['md'], 'float32')
xp = xpdf.merge(s, left_on='a', right_index=True)
assert_frame_equal(rs, xp)
def test_join_many_non_unique_index(self):
df1 = DataFrame({"a": [1, 1], "b": [1, 1], "c": [10, 20]})
df2 = DataFrame({"a": [1, 1], "b": [1, 2], "d": [100, 200]})
df3 = DataFrame({"a": [1, 1], "b": [1, 2], "e": [1000, 2000]})
idf1 = df1.set_index(["a", "b"])
idf2 = df2.set_index(["a", "b"])
idf3 = df3.set_index(["a", "b"])
result = idf1.join([idf2, idf3], how='outer')
df_partially_merged = merge(df1, df2, on=['a', 'b'], how='outer')
expected = merge(df_partially_merged, df3, on=['a', 'b'], how='outer')
result = result.reset_index()
result['a'] = result['a'].astype(np.float64)
result['b'] = result['b'].astype(np.float64)
assert_frame_equal(result, expected.ix[:, result.columns])
df1 = DataFrame({"a": [1, 1, 1], "b": [1, 1, 1], "c": [10, 20, 30]})
df2 = DataFrame({"a": [1, 1, 1], "b": [1, 1, 2], "d": [100, 200, 300]})
df3 = DataFrame(
{"a": [1, 1, 1], "b": [1, 1, 2], "e": [1000, 2000, 3000]})
idf1 = df1.set_index(["a", "b"])
idf2 = df2.set_index(["a", "b"])
idf3 = df3.set_index(["a", "b"])
result = idf1.join([idf2, idf3], how='inner')
df_partially_merged = merge(df1, df2, on=['a', 'b'], how='inner')
expected = merge(df_partially_merged, df3, on=['a', 'b'], how='inner')
result = result.reset_index()
assert_frame_equal(result, expected.ix[:, result.columns])
# GH 11519
df = DataFrame({'A': ['foo', 'bar', 'foo', 'bar',
'foo', 'bar', 'foo', 'foo'],
'B': ['one', 'one', 'two', 'three',
'two', 'two', 'one', 'three'],
'C': np.random.randn(8),
'D': np.random.randn(8)})
s = Series(np.repeat(np.arange(8), 2),
index=np.repeat(np.arange(8), 2), name='TEST')
inner = df.join(s, how='inner')
outer = df.join(s, how='outer')
left = df.join(s, how='left')
right = df.join(s, how='right')
assert_frame_equal(inner, outer)
assert_frame_equal(inner, left)
assert_frame_equal(inner, right)
def test_merge_index_singlekey_right_vs_left(self):
left = DataFrame({'key': ['a', 'b', 'c', 'd', 'e', 'e', 'a'],
'v1': np.random.randn(7)})
right = DataFrame({'v2': np.random.randn(4)},
index=['d', 'b', 'c', 'a'])
merged1 = merge(left, right, left_on='key',
right_index=True, how='left', sort=False)
merged2 = merge(right, left, right_on='key',
left_index=True, how='right', sort=False)
assert_frame_equal(merged1, merged2.ix[:, merged1.columns])
merged1 = merge(left, right, left_on='key',
right_index=True, how='left', sort=True)
merged2 = merge(right, left, right_on='key',
left_index=True, how='right', sort=True)
assert_frame_equal(merged1, merged2.ix[:, merged1.columns])
def test_merge_index_singlekey_inner(self):
left = DataFrame({'key': ['a', 'b', 'c', 'd', 'e', 'e', 'a'],
'v1': np.random.randn(7)})
right = DataFrame({'v2': np.random.randn(4)},
index=['d', 'b', 'c', 'a'])
# inner join
result = merge(left, right, left_on='key', right_index=True,
how='inner')
expected = left.join(right, on='key').ix[result.index]
assert_frame_equal(result, expected)
result = merge(right, left, right_on='key', left_index=True,
how='inner')
expected = left.join(right, on='key').ix[result.index]
assert_frame_equal(result, expected.ix[:, result.columns])
def test_merge_misspecified(self):
self.assertRaises(ValueError, merge, self.left, self.right,
left_index=True)
self.assertRaises(ValueError, merge, self.left, self.right,
right_index=True)
self.assertRaises(ValueError, merge, self.left, self.left,
left_on='key', on='key')
self.assertRaises(ValueError, merge, self.df, self.df2,
left_on=['key1'], right_on=['key1', 'key2'])
def test_merge_overlap(self):
merged = merge(self.left, self.left, on='key')
exp_len = (self.left['key'].value_counts() ** 2).sum()
self.assertEqual(len(merged), exp_len)
self.assertIn('v1_x', merged)
self.assertIn('v1_y', merged)
def test_merge_different_column_key_names(self):
left = DataFrame({'lkey': ['foo', 'bar', 'baz', 'foo'],
'value': [1, 2, 3, 4]})
right = DataFrame({'rkey': ['foo', 'bar', 'qux', 'foo'],
'value': [5, 6, 7, 8]})
merged = left.merge(right, left_on='lkey', right_on='rkey',
how='outer', sort=True)
exp = pd.Series(['bar', 'baz', 'foo', 'foo', 'foo', 'foo', np.nan],
name='lkey')
tm.assert_series_equal(merged['lkey'], exp)
exp = pd.Series(['bar', np.nan, 'foo', 'foo', 'foo', 'foo', 'qux'],
name='rkey')
tm.assert_series_equal(merged['rkey'], exp)
exp = pd.Series([2, 3, 1, 1, 4, 4, np.nan], name='value_x')
tm.assert_series_equal(merged['value_x'], exp)
exp = pd.Series([6, np.nan, 5, 8, 5, 8, 7], name='value_y')
tm.assert_series_equal(merged['value_y'], exp)
def test_merge_copy(self):
left = DataFrame({'a': 0, 'b': 1}, index=lrange(10))
right = DataFrame({'c': 'foo', 'd': 'bar'}, index=lrange(10))
merged = merge(left, right, left_index=True,
right_index=True, copy=True)
merged['a'] = 6
self.assertTrue((left['a'] == 0).all())
merged['d'] = 'peekaboo'
self.assertTrue((right['d'] == 'bar').all())
def test_merge_nocopy(self):
left = DataFrame({'a': 0, 'b': 1}, index=lrange(10))
right = DataFrame({'c': 'foo', 'd': 'bar'}, index=lrange(10))
merged = merge(left, right, left_index=True,
right_index=True, copy=False)
merged['a'] = 6
self.assertTrue((left['a'] == 6).all())
merged['d'] = 'peekaboo'
self.assertTrue((right['d'] == 'peekaboo').all())
def test_join_sort(self):
left = DataFrame({'key': ['foo', 'bar', 'baz', 'foo'],
'value': [1, 2, 3, 4]})
right = DataFrame({'value2': ['a', 'b', 'c']},
index=['bar', 'baz', 'foo'])
joined = left.join(right, on='key', sort=True)
expected = DataFrame({'key': ['bar', 'baz', 'foo', 'foo'],
'value': [2, 3, 1, 4],
'value2': ['a', 'b', 'c', 'c']},
index=[1, 2, 0, 3])
assert_frame_equal(joined, expected)
# smoke test
joined = left.join(right, on='key', sort=False)
self.assert_numpy_array_equal(joined.index, lrange(4))
def test_intelligently_handle_join_key(self):
# #733, be a bit more 1337 about not returning unconsolidated DataFrame
left = DataFrame({'key': [1, 1, 2, 2, 3],
'value': lrange(5)}, columns=['value', 'key'])
right = DataFrame({'key': [1, 1, 2, 3, 4, 5],
'rvalue': lrange(6)})
joined = merge(left, right, on='key', how='outer')
expected = DataFrame({'key': [1, 1, 1, 1, 2, 2, 3, 4, 5.],
'value': np.array([0, 0, 1, 1, 2, 3, 4,
np.nan, np.nan]),
'rvalue': np.array([0, 1, 0, 1, 2, 2, 3, 4, 5])},
columns=['value', 'key', 'rvalue'])
assert_frame_equal(joined, expected, check_dtype=False)
self.assertTrue(joined._data.is_consolidated())
def test_handle_join_key_pass_array(self):
left = DataFrame({'key': [1, 1, 2, 2, 3],
'value': lrange(5)}, columns=['value', 'key'])
right = DataFrame({'rvalue': lrange(6)})
key = np.array([1, 1, 2, 3, 4, 5])
merged = merge(left, right, left_on='key', right_on=key, how='outer')
merged2 = merge(right, left, left_on=key, right_on='key', how='outer')
assert_series_equal(merged['key'], merged2['key'])
self.assertTrue(merged['key'].notnull().all())
self.assertTrue(merged2['key'].notnull().all())
left = DataFrame({'value': lrange(5)}, columns=['value'])
right = DataFrame({'rvalue': lrange(6)})
lkey = np.array([1, 1, 2, 2, 3])
rkey = np.array([1, 1, 2, 3, 4, 5])
merged = merge(left, right, left_on=lkey, right_on=rkey, how='outer')
self.assert_numpy_array_equal(merged['key_0'],
np.array([1, 1, 1, 1, 2, 2, 3, 4, 5]))
left = DataFrame({'value': lrange(3)})
right = DataFrame({'rvalue': lrange(6)})
key = np.array([0, 1, 1, 2, 2, 3])
merged = merge(left, right, left_index=True, right_on=key, how='outer')
self.assert_numpy_array_equal(merged['key_0'], key)
def test_mixed_type_join_with_suffix(self):
# GH #916
df = DataFrame(np.random.randn(20, 6),
columns=['a', 'b', 'c', 'd', 'e', 'f'])
df.insert(0, 'id', 0)
df.insert(5, 'dt', 'foo')
grouped = df.groupby('id')
mn = grouped.mean()
cn = grouped.count()
# it works!
mn.join(cn, rsuffix='_right')
def test_no_overlap_more_informative_error(self):
dt = datetime.now()
df1 = DataFrame({'x': ['a']}, index=[dt])
df2 = DataFrame({'y': ['b', 'c']}, index=[dt, dt])
self.assertRaises(MergeError, merge, df1, df2)
def test_merge_non_unique_indexes(self):
dt = datetime(2012, 5, 1)
dt2 = datetime(2012, 5, 2)
dt3 = datetime(2012, 5, 3)
dt4 = datetime(2012, 5, 4)
df1 = DataFrame({'x': ['a']}, index=[dt])
df2 = DataFrame({'y': ['b', 'c']}, index=[dt, dt])
_check_merge(df1, df2)
# Not monotonic
df1 = DataFrame({'x': ['a', 'b', 'q']}, index=[dt2, dt, dt4])
df2 = DataFrame({'y': ['c', 'd', 'e', 'f', 'g', 'h']},
index=[dt3, dt3, dt2, dt2, dt, dt])
_check_merge(df1, df2)
df1 = DataFrame({'x': ['a', 'b']}, index=[dt, dt])
df2 = DataFrame({'y': ['c', 'd']}, index=[dt, dt])
_check_merge(df1, df2)
def test_merge_non_unique_index_many_to_many(self):
dt = datetime(2012, 5, 1)
dt2 = datetime(2012, 5, 2)
dt3 = datetime(2012, 5, 3)
df1 = DataFrame({'x': ['a', 'b', 'c', 'd']},
index=[dt2, dt2, dt, dt])
df2 = DataFrame({'y': ['e', 'f', 'g', ' h', 'i']},
index=[dt2, dt2, dt3, dt, dt])
_check_merge(df1, df2)
def test_left_merge_empty_dataframe(self):
left = DataFrame({'key': [1], 'value': [2]})
right = DataFrame({'key': []})
result = merge(left, right, on='key', how='left')
assert_frame_equal(result, left)
result = merge(right, left, on='key', how='right')
assert_frame_equal(result, left)
def test_merge_left_empty_right_empty(self):
# GH 10824
left = pd.DataFrame([], columns=['a', 'b', 'c'])
right = pd.DataFrame([], columns=['x', 'y', 'z'])
exp_in = pd.DataFrame([], columns=['a', 'b', 'c', 'x', 'y', 'z'],
index=pd.Index([], dtype=object),
dtype=object)
for kwarg in [dict(left_index=True, right_index=True),
dict(left_index=True, right_on='x'),
dict(left_on='a', right_index=True),
dict(left_on='a', right_on='x')]:
result = pd.merge(left, right, how='inner', **kwarg)
tm.assert_frame_equal(result, exp_in)
result = pd.merge(left, right, how='left', **kwarg)
tm.assert_frame_equal(result, exp_in)
result = pd.merge(left, right, how='right', **kwarg)
tm.assert_frame_equal(result, exp_in)
result = pd.merge(left, right, how='outer', **kwarg)
tm.assert_frame_equal(result, exp_in)
def test_merge_left_empty_right_notempty(self):
# GH 10824
left = pd.DataFrame([], columns=['a', 'b', 'c'])
right = pd.DataFrame([[1, 2, 3], [4, 5, 6], [7, 8, 9]],
columns=['x', 'y', 'z'])
exp_out = pd.DataFrame({'a': np.array([np.nan] * 3, dtype=object),
'b': np.array([np.nan] * 3, dtype=object),
'c': np.array([np.nan] * 3, dtype=object),
'x': [1, 4, 7],
'y': [2, 5, 8],
'z': [3, 6, 9]},
columns=['a', 'b', 'c', 'x', 'y', 'z'])
exp_in = exp_out[0:0] # make empty DataFrame keeping dtype
# result will have object dtype
exp_in.index = exp_in.index.astype(object)
for kwarg in [dict(left_index=True, right_index=True),
dict(left_index=True, right_on='x'),
dict(left_on='a', right_index=True),
dict(left_on='a', right_on='x')]:
result = pd.merge(left, right, how='inner', **kwarg)
tm.assert_frame_equal(result, exp_in)
result = pd.merge(left, right, how='left', **kwarg)
tm.assert_frame_equal(result, exp_in)
result = pd.merge(left, right, how='right', **kwarg)
tm.assert_frame_equal(result, exp_out)
result = pd.merge(left, right, how='outer', **kwarg)
tm.assert_frame_equal(result, exp_out)
def test_merge_left_notempty_right_empty(self):
# GH 10824
left = pd.DataFrame([[1, 2, 3], [4, 5, 6], [7, 8, 9]],
columns=['a', 'b', 'c'])
right = pd.DataFrame([], columns=['x', 'y', 'z'])
exp_out = pd.DataFrame({'a': [1, 4, 7],
'b': [2, 5, 8],
'c': [3, 6, 9],
'x': np.array([np.nan] * 3, dtype=object),
'y': np.array([np.nan] * 3, dtype=object),
'z': np.array([np.nan] * 3, dtype=object)},
columns=['a', 'b', 'c', 'x', 'y', 'z'])
exp_in = exp_out[0:0] # make empty DataFrame keeping dtype
# result will have object dtype
exp_in.index = exp_in.index.astype(object)
for kwarg in [dict(left_index=True, right_index=True),
dict(left_index=True, right_on='x'),
dict(left_on='a', right_index=True),
dict(left_on='a', right_on='x')]:
result = pd.merge(left, right, how='inner', **kwarg)
tm.assert_frame_equal(result, exp_in)
result = pd.merge(left, right, how='right', **kwarg)
tm.assert_frame_equal(result, exp_in)
result = pd.merge(left, right, how='left', **kwarg)
tm.assert_frame_equal(result, exp_out)
result = pd.merge(left, right, how='outer', **kwarg)
tm.assert_frame_equal(result, exp_out)
def test_merge_nosort(self):
# #2098, anything to do?
from datetime import datetime
d = {"var1": np.random.randint(0, 10, size=10),
"var2": np.random.randint(0, 10, size=10),
"var3": [datetime(2012, 1, 12), datetime(2011, 2, 4),
datetime(
2010, 2, 3), datetime(2012, 1, 12),
datetime(
2011, 2, 4), datetime(2012, 4, 3),
datetime(
2012, 3, 4), datetime(2008, 5, 1),
datetime(2010, 2, 3), datetime(2012, 2, 3)]}
df = DataFrame.from_dict(d)
var3 = df.var3.unique()
var3.sort()
new = DataFrame.from_dict({"var3": var3,
"var8": np.random.random(7)})
result = df.merge(new, on="var3", sort=False)
exp = merge(df, new, on='var3', sort=False)
assert_frame_equal(result, exp)
self.assertTrue((df.var3.unique() == result.var3.unique()).all())
def test_merge_nan_right(self):
df1 = DataFrame({"i1": [0, 1], "i2": [0, 1]})
df2 = DataFrame({"i1": [0], "i3": [0]})
result = df1.join(df2, on="i1", rsuffix="_")
expected = (DataFrame({'i1': {0: 0.0, 1: 1}, 'i2': {0: 0, 1: 1},
'i1_': {0: 0, 1: np.nan},
'i3': {0: 0.0, 1: np.nan},
None: {0: 0, 1: 0}})
.set_index(None)
.reset_index()[['i1', 'i2', 'i1_', 'i3']])
assert_frame_equal(result, expected, check_dtype=False)
df1 = DataFrame({"i1": [0, 1], "i2": [0.5, 1.5]})
df2 = DataFrame({"i1": [0], "i3": [0.7]})
result = df1.join(df2, rsuffix="_", on='i1')
expected = (DataFrame({'i1': {0: 0, 1: 1}, 'i1_': {0: 0.0, 1: nan},
'i2': {0: 0.5, 1: 1.5},
'i3': {0: 0.69999999999999996,
1: nan}})
[['i1', 'i2', 'i1_', 'i3']])
assert_frame_equal(result, expected)
def test_merge_type(self):
class NotADataFrame(DataFrame):
@property
def _constructor(self):
return NotADataFrame
nad = NotADataFrame(self.df)
result = nad.merge(self.df2, on='key1')
tm.assertIsInstance(result, NotADataFrame)
def test_append_dtype_coerce(self):
# GH 4993
# appending with datetime will incorrectly convert datetime64
import datetime as dt
from pandas import NaT
df1 = DataFrame(index=[1, 2], data=[dt.datetime(2013, 1, 1, 0, 0),
dt.datetime(2013, 1, 2, 0, 0)],
columns=['start_time'])
df2 = DataFrame(index=[4, 5], data=[[dt.datetime(2013, 1, 3, 0, 0),
dt.datetime(2013, 1, 3, 6, 10)],
[dt.datetime(2013, 1, 4, 0, 0),
dt.datetime(2013, 1, 4, 7, 10)]],
columns=['start_time', 'end_time'])
expected = concat([Series([NaT, NaT, dt.datetime(2013, 1, 3, 6, 10),
dt.datetime(2013, 1, 4, 7, 10)],
name='end_time'),
Series([dt.datetime(2013, 1, 1, 0, 0),
dt.datetime(2013, 1, 2, 0, 0),
dt.datetime(2013, 1, 3, 0, 0),
dt.datetime(2013, 1, 4, 0, 0)],
name='start_time')], axis=1)
result = df1.append(df2, ignore_index=True)
assert_frame_equal(result, expected)
def test_join_append_timedeltas(self):
import datetime as dt
from pandas import NaT
# timedelta64 issues with join/merge
# GH 5695
d = {'d': dt.datetime(2013, 11, 5, 5, 56), 't': dt.timedelta(0, 22500)}
df = DataFrame(columns=list('dt'))
df = df.append(d, ignore_index=True)
result = df.append(d, ignore_index=True)
expected = DataFrame({'d': [dt.datetime(2013, 11, 5, 5, 56),
dt.datetime(2013, 11, 5, 5, 56)],
't': [dt.timedelta(0, 22500),
dt.timedelta(0, 22500)]})
assert_frame_equal(result, expected)
td = np.timedelta64(300000000)
lhs = DataFrame(Series([td, td], index=["A", "B"]))
rhs = DataFrame(Series([td], index=["A"]))
result = lhs.join(rhs, rsuffix='r', how="left")
expected = DataFrame({'0': Series([td, td], index=list('AB')),
'0r': Series([td, NaT], index=list('AB'))})
assert_frame_equal(result, expected)
def test_overlapping_columns_error_message(self):
df = DataFrame({'key': [1, 2, 3],
'v1': [4, 5, 6],
'v2': [7, 8, 9]})
df2 = DataFrame({'key': [1, 2, 3],
'v1': [4, 5, 6],
'v2': [7, 8, 9]})
df.columns = ['key', 'foo', 'foo']
df2.columns = ['key', 'bar', 'bar']
expected = DataFrame({'key': [1, 2, 3],
'v1': [4, 5, 6],
'v2': [7, 8, 9],
'v3': [4, 5, 6],
'v4': [7, 8, 9]})
expected.columns = ['key', 'foo', 'foo', 'bar', 'bar']
assert_frame_equal(merge(df, df2), expected)
# #2649, #10639
df2.columns = ['key1', 'foo', 'foo']
self.assertRaises(ValueError, merge, df, df2)
def test_merge_on_datetime64tz(self):
# GH11405
left = pd.DataFrame({'key': pd.date_range('20151010', periods=2,
tz='US/Eastern'),
'value': [1, 2]})
right = pd.DataFrame({'key': pd.date_range('20151011', periods=3,
tz='US/Eastern'),
'value': [1, 2, 3]})
expected = DataFrame({'key': pd.date_range('20151010', periods=4,
tz='US/Eastern'),
'value_x': [1, 2, np.nan, np.nan],
'value_y': [np.nan, 1, 2, 3]})
result = pd.merge(left, right, on='key', how='outer')
assert_frame_equal(result, expected)
left = pd.DataFrame({'value': pd.date_range('20151010', periods=2,
tz='US/Eastern'),
'key': [1, 2]})
right = pd.DataFrame({'value': pd.date_range('20151011', periods=2,
tz='US/Eastern'),
'key': [2, 3]})
expected = DataFrame({
'value_x': list(pd.date_range('20151010', periods=2,
tz='US/Eastern')) + [pd.NaT],
'value_y': [pd.NaT] + list(pd.date_range('20151011', periods=2,
tz='US/Eastern')),
'key': [1., 2, 3]})
result = pd.merge(left, right, on='key', how='outer')
assert_frame_equal(result, expected)
def test_merge_on_periods(self):
left = pd.DataFrame({'key': pd.period_range('20151010', periods=2,
freq='D'),
'value': [1, 2]})
right = pd.DataFrame({'key': pd.period_range('20151011', periods=3,
freq='D'),
'value': [1, 2, 3]})
expected = DataFrame({'key': pd.period_range('20151010', periods=4,
freq='D'),
'value_x': [1, 2, np.nan, np.nan],
'value_y': [np.nan, 1, 2, 3]})
result = pd.merge(left, right, on='key', how='outer')
assert_frame_equal(result, expected)
left = pd.DataFrame({'value': pd.period_range('20151010', periods=2,
freq='D'),
'key': [1, 2]})
right = pd.DataFrame({'value': pd.period_range('20151011', periods=2,
freq='D'),
'key': [2, 3]})
exp_x = pd.period_range('20151010', periods=2, freq='D')
exp_y = pd.period_range('20151011', periods=2, freq='D')
expected = DataFrame({'value_x': list(exp_x) + [pd.NaT],
'value_y': [pd.NaT] + list(exp_y),
'key': [1., 2, 3]})
result = pd.merge(left, right, on='key', how='outer')
assert_frame_equal(result, expected)
def test_concat_NaT_series(self):
# GH 11693
# test for merging NaT series with datetime series.
x = Series(date_range('20151124 08:00', '20151124 09:00',
freq='1h', tz='US/Eastern'))
y = Series(pd.NaT, index=[0, 1], dtype='datetime64[ns, US/Eastern]')
expected = Series([x[0], x[1], pd.NaT, pd.NaT])
result = concat([x, y], ignore_index=True)
tm.assert_series_equal(result, expected)
# all NaT with tz
expected = Series(pd.NaT, index=range(4),
dtype='datetime64[ns, US/Eastern]')
result = pd.concat([y, y], ignore_index=True)
tm.assert_series_equal(result, expected)
# without tz
x = pd.Series(pd.date_range('20151124 08:00',
'20151124 09:00', freq='1h'))
y = pd.Series(pd.date_range('20151124 10:00',
'20151124 11:00', freq='1h'))
y[:] = pd.NaT
expected = pd.Series([x[0], x[1], pd.NaT, pd.NaT])
result = pd.concat([x, y], ignore_index=True)
tm.assert_series_equal(result, expected)
# all NaT without tz
x[:] = pd.NaT
expected = pd.Series(pd.NaT, index=range(4),
dtype='datetime64[ns]')
result = pd.concat([x, y], ignore_index=True)
tm.assert_series_equal(result, expected)
def test_concat_tz_series(self):
# GH 11755
# tz and no tz
x = Series(date_range('20151124 08:00',
'20151124 09:00',
freq='1h', tz='UTC'))
y = Series(date_range('2012-01-01', '2012-01-02'))
expected = Series([x[0], x[1], y[0], y[1]],
dtype='object')
result = concat([x, y], ignore_index=True)
tm.assert_series_equal(result, expected)
# GH 11887
# concat tz and object
x = Series(date_range('20151124 08:00',
'20151124 09:00',
freq='1h', tz='UTC'))
y = Series(['a', 'b'])
expected = Series([x[0], x[1], y[0], y[1]],
dtype='object')
result = concat([x, y], ignore_index=True)
tm.assert_series_equal(result, expected)
# 12217
# 12306 fixed I think
# Concat'ing two UTC times
first = pd.DataFrame([[datetime(2016, 1, 1)]])
first[0] = first[0].dt.tz_localize('UTC')
second = pd.DataFrame([[datetime(2016, 1, 2)]])
second[0] = second[0].dt.tz_localize('UTC')
result = pd.concat([first, second])
self.assertEqual(result[0].dtype, 'datetime64[ns, UTC]')
# Concat'ing two London times
first = pd.DataFrame([[datetime(2016, 1, 1)]])
first[0] = first[0].dt.tz_localize('Europe/London')
second = pd.DataFrame([[datetime(2016, 1, 2)]])
second[0] = second[0].dt.tz_localize('Europe/London')
result = pd.concat([first, second])
self.assertEqual(result[0].dtype, 'datetime64[ns, Europe/London]')
# Concat'ing 2+1 London times
first = pd.DataFrame([[datetime(2016, 1, 1)], [datetime(2016, 1, 2)]])
first[0] = first[0].dt.tz_localize('Europe/London')
second = pd.DataFrame([[datetime(2016, 1, 3)]])
second[0] = second[0].dt.tz_localize('Europe/London')
result = pd.concat([first, second])
self.assertEqual(result[0].dtype, 'datetime64[ns, Europe/London]')
# Concat'ing 1+2 London times
first = pd.DataFrame([[datetime(2016, 1, 1)]])
first[0] = first[0].dt.tz_localize('Europe/London')
second = pd.DataFrame([[datetime(2016, 1, 2)], [datetime(2016, 1, 3)]])
second[0] = second[0].dt.tz_localize('Europe/London')
result = pd.concat([first, second])
self.assertEqual(result[0].dtype, 'datetime64[ns, Europe/London]')
def test_concat_period_series(self):
x = Series(pd.PeriodIndex(['2015-11-01', '2015-12-01'], freq='D'))
y = Series(pd.PeriodIndex(['2015-10-01', '2016-01-01'], freq='D'))
expected = Series([x[0], x[1], y[0], y[1]], dtype='object')
result = concat([x, y], ignore_index=True)
tm.assert_series_equal(result, expected)
# different freq
x = Series(pd.PeriodIndex(['2015-11-01', '2015-12-01'], freq='D'))
y = Series(pd.PeriodIndex(['2015-10-01', '2016-01-01'], freq='M'))
expected = Series([x[0], x[1], y[0], y[1]], dtype='object')
result = concat([x, y], ignore_index=True)
tm.assert_series_equal(result, expected)
x = Series(pd.PeriodIndex(['2015-11-01', '2015-12-01'], freq='D'))
y = Series(pd.PeriodIndex(['2015-11-01', '2015-12-01'], freq='M'))
expected = Series([x[0], x[1], y[0], y[1]], dtype='object')
result = concat([x, y], ignore_index=True)
tm.assert_series_equal(result, expected)
# non-period
x = Series(pd.PeriodIndex(['2015-11-01', '2015-12-01'], freq='D'))
y = Series(pd.DatetimeIndex(['2015-11-01', '2015-12-01']))
expected = Series([x[0], x[1], y[0], y[1]], dtype='object')
result = concat([x, y], ignore_index=True)
tm.assert_series_equal(result, expected)
x = Series(pd.PeriodIndex(['2015-11-01', '2015-12-01'], freq='D'))
y = Series(['A', 'B'])
expected = Series([x[0], x[1], y[0], y[1]], dtype='object')
result = concat([x, y], ignore_index=True)
tm.assert_series_equal(result, expected)
def test_indicator(self):
# PR #10054. xref #7412 and closes #8790.
df1 = DataFrame({'col1': [0, 1], 'col_left': [
'a', 'b'], 'col_conflict': [1, 2]})
df1_copy = df1.copy()
df2 = DataFrame({'col1': [1, 2, 3, 4, 5], 'col_right': [2, 2, 2, 2, 2],
'col_conflict': [1, 2, 3, 4, 5]})
df2_copy = df2.copy()
df_result = DataFrame({
'col1': [0, 1, 2, 3, 4, 5],
'col_conflict_x': [1, 2, np.nan, np.nan, np.nan, np.nan],
'col_left': ['a', 'b', np.nan, np.nan, np.nan, np.nan],
'col_conflict_y': [np.nan, 1, 2, 3, 4, 5],
'col_right': [np.nan, 2, 2, 2, 2, 2]}, dtype='float64')
df_result['_merge'] = Categorical(
['left_only', 'both', 'right_only',
'right_only', 'right_only', 'right_only'],
categories=['left_only', 'right_only', 'both'])
df_result = df_result[['col1', 'col_conflict_x', 'col_left',
'col_conflict_y', 'col_right', '_merge']]
test = merge(df1, df2, on='col1', how='outer', indicator=True)
assert_frame_equal(test, df_result)
test = df1.merge(df2, on='col1', how='outer', indicator=True)
assert_frame_equal(test, df_result)
# No side effects
assert_frame_equal(df1, df1_copy)
assert_frame_equal(df2, df2_copy)
# Check with custom name
df_result_custom_name = df_result
df_result_custom_name = df_result_custom_name.rename(
columns={'_merge': 'custom_name'})
test_custom_name = merge(
df1, df2, on='col1', how='outer', indicator='custom_name')
assert_frame_equal(test_custom_name, df_result_custom_name)
test_custom_name = df1.merge(
df2, on='col1', how='outer', indicator='custom_name')
assert_frame_equal(test_custom_name, df_result_custom_name)
# Check only accepts strings and booleans
with tm.assertRaises(ValueError):
merge(df1, df2, on='col1', how='outer', indicator=5)
with tm.assertRaises(ValueError):
df1.merge(df2, on='col1', how='outer', indicator=5)
# Check result integrity
test2 = merge(df1, df2, on='col1', how='left', indicator=True)
self.assertTrue((test2._merge != 'right_only').all())
test2 = df1.merge(df2, on='col1', how='left', indicator=True)
self.assertTrue((test2._merge != 'right_only').all())
test3 = merge(df1, df2, on='col1', how='right', indicator=True)
self.assertTrue((test3._merge != 'left_only').all())
test3 = df1.merge(df2, on='col1', how='right', indicator=True)
self.assertTrue((test3._merge != 'left_only').all())
test4 = merge(df1, df2, on='col1', how='inner', indicator=True)
self.assertTrue((test4._merge == 'both').all())
test4 = df1.merge(df2, on='col1', how='inner', indicator=True)
self.assertTrue((test4._merge == 'both').all())
# Check if working name in df
for i in ['_right_indicator', '_left_indicator', '_merge']:
df_badcolumn = DataFrame({'col1': [1, 2], i: [2, 2]})
with tm.assertRaises(ValueError):
merge(df1, df_badcolumn, on='col1',
how='outer', indicator=True)
with tm.assertRaises(ValueError):
df1.merge(df_badcolumn, on='col1', how='outer', indicator=True)
# Check for name conflict with custom name
df_badcolumn = DataFrame(
{'col1': [1, 2], 'custom_column_name': [2, 2]})
with tm.assertRaises(ValueError):
merge(df1, df_badcolumn, on='col1', how='outer',
indicator='custom_column_name')
with tm.assertRaises(ValueError):
df1.merge(df_badcolumn, on='col1', how='outer',
indicator='custom_column_name')
# Merge on multiple columns
df3 = DataFrame({'col1': [0, 1], 'col2': ['a', 'b']})
df4 = DataFrame({'col1': [1, 1, 3], 'col2': ['b', 'x', 'y']})
hand_coded_result = DataFrame({'col1': [0, 1, 1, 3.0],
'col2': ['a', 'b', 'x', 'y']})
hand_coded_result['_merge'] = Categorical(
['left_only', 'both', 'right_only', 'right_only'],
categories=['left_only', 'right_only', 'both'])
test5 = merge(df3, df4, on=['col1', 'col2'],
how='outer', indicator=True)
assert_frame_equal(test5, hand_coded_result)
test5 = df3.merge(df4, on=['col1', 'col2'],
how='outer', indicator=True)
assert_frame_equal(test5, hand_coded_result)
def _check_merge(x, y):
for how in ['inner', 'left', 'outer']:
result = x.join(y, how=how)
expected = merge(x.reset_index(), y.reset_index(), how=how,
sort=True)
expected = expected.set_index('index')
# TODO check_names on merge?
assert_frame_equal(result, expected, check_names=False)
class TestMergeMulti(tm.TestCase):
def setUp(self):
self.index = MultiIndex(levels=[['foo', 'bar', 'baz', 'qux'],
['one', 'two', 'three']],
labels=[[0, 0, 0, 1, 1, 2, 2, 3, 3, 3],
[0, 1, 2, 0, 1, 1, 2, 0, 1, 2]],
names=['first', 'second'])
self.to_join = DataFrame(np.random.randn(10, 3), index=self.index,
columns=['j_one', 'j_two', 'j_three'])
# a little relevant example with NAs
key1 = ['bar', 'bar', 'bar', 'foo', 'foo', 'baz', 'baz', 'qux',
'qux', 'snap']
key2 = ['two', 'one', 'three', 'one', 'two', 'one', 'two', 'two',
'three', 'one']
data = np.random.randn(len(key1))
self.data = DataFrame({'key1': key1, 'key2': key2,
'data': data})
def test_merge_on_multikey(self):
joined = self.data.join(self.to_join, on=['key1', 'key2'])
join_key = Index(lzip(self.data['key1'], self.data['key2']))
indexer = self.to_join.index.get_indexer(join_key)
ex_values = self.to_join.values.take(indexer, axis=0)
ex_values[indexer == -1] = np.nan
expected = self.data.join(DataFrame(ex_values,
columns=self.to_join.columns))
# TODO: columns aren't in the same order yet
assert_frame_equal(joined, expected.ix[:, joined.columns])
left = self.data.join(self.to_join, on=['key1', 'key2'], sort=True)
right = expected.ix[:, joined.columns].sort_values(['key1', 'key2'],
kind='mergesort')
assert_frame_equal(left, right)
def test_left_join_multi_index(self):
icols = ['1st', '2nd', '3rd']
def bind_cols(df):
iord = lambda a: 0 if a != a else ord(a)
f = lambda ts: ts.map(iord) - ord('a')
return (f(df['1st']) + f(df['3rd']) * 1e2 +
df['2nd'].fillna(0) * 1e4)
def run_asserts(left, right):
for sort in [False, True]:
res = left.join(right, on=icols, how='left', sort=sort)
self.assertTrue(len(left) < len(res) + 1)
self.assertFalse(res['4th'].isnull().any())
self.assertFalse(res['5th'].isnull().any())
tm.assert_series_equal(
res['4th'], - res['5th'], check_names=False)
result = bind_cols(res.iloc[:, :-2])
tm.assert_series_equal(res['4th'], result, check_names=False)
self.assertTrue(result.name is None)
if sort:
tm.assert_frame_equal(
res, res.sort_values(icols, kind='mergesort'))
out = merge(left, right.reset_index(), on=icols,
sort=sort, how='left')
res.index = np.arange(len(res))
tm.assert_frame_equal(out, res)
lc = list(map(chr, np.arange(ord('a'), ord('z') + 1)))
left = DataFrame(np.random.choice(lc, (5000, 2)),
columns=['1st', '3rd'])
left.insert(1, '2nd', np.random.randint(0, 1000, len(left)))
i = np.random.permutation(len(left))
right = left.iloc[i].copy()
left['4th'] = bind_cols(left)
right['5th'] = - bind_cols(right)
right.set_index(icols, inplace=True)
run_asserts(left, right)
# inject some nulls
left.loc[1::23, '1st'] = np.nan
left.loc[2::37, '2nd'] = np.nan
left.loc[3::43, '3rd'] = np.nan
left['4th'] = bind_cols(left)
i = np.random.permutation(len(left))
right = left.iloc[i, :-1]
right['5th'] = - bind_cols(right)
right.set_index(icols, inplace=True)
run_asserts(left, right)
def test_merge_right_vs_left(self):
# compare left vs right merge with multikey
for sort in [False, True]:
merged1 = self.data.merge(self.to_join, left_on=['key1', 'key2'],
right_index=True, how='left', sort=sort)
merged2 = self.to_join.merge(self.data, right_on=['key1', 'key2'],
left_index=True, how='right',
sort=sort)
merged2 = merged2.ix[:, merged1.columns]
assert_frame_equal(merged1, merged2)
def test_compress_group_combinations(self):
# ~ 40000000 possible unique groups
key1 = tm.rands_array(10, 10000)
key1 = np.tile(key1, 2)
key2 = key1[::-1]
df = DataFrame({'key1': key1, 'key2': key2,
'value1': np.random.randn(20000)})
df2 = DataFrame({'key1': key1[::2], 'key2': key2[::2],
'value2': np.random.randn(10000)})
# just to hit the label compression code path
merge(df, df2, how='outer')
def test_left_join_index_preserve_order(self):
left = DataFrame({'k1': [0, 1, 2] * 8,
'k2': ['foo', 'bar'] * 12,
'v': np.array(np.arange(24), dtype=np.int64)})
index = MultiIndex.from_tuples([(2, 'bar'), (1, 'foo')])
right = DataFrame({'v2': [5, 7]}, index=index)
result = left.join(right, on=['k1', 'k2'])
expected = left.copy()
expected['v2'] = np.nan
expected.loc[(expected.k1 == 2) & (expected.k2 == 'bar'), 'v2'] = 5
expected.loc[(expected.k1 == 1) & (expected.k2 == 'foo'), 'v2'] = 7
tm.assert_frame_equal(result, expected)
tm.assert_frame_equal(
result.sort_values(['k1', 'k2'], kind='mergesort'),
left.join(right, on=['k1', 'k2'], sort=True))
# test join with multi dtypes blocks
left = DataFrame({'k1': [0, 1, 2] * 8,
'k2': ['foo', 'bar'] * 12,
'k3': np.array([0, 1, 2] * 8, dtype=np.float32),
'v': np.array(np.arange(24), dtype=np.int32)})
index = MultiIndex.from_tuples([(2, 'bar'), (1, 'foo')])
right = DataFrame({'v2': [5, 7]}, index=index)
result = left.join(right, on=['k1', 'k2'])
expected = left.copy()
expected['v2'] = np.nan
expected.loc[(expected.k1 == 2) & (expected.k2 == 'bar'), 'v2'] = 5
expected.loc[(expected.k1 == 1) & (expected.k2 == 'foo'), 'v2'] = 7
tm.assert_frame_equal(result, expected)
tm.assert_frame_equal(
result.sort_values(['k1', 'k2'], kind='mergesort'),
left.join(right, on=['k1', 'k2'], sort=True))
# do a right join for an extra test
joined = merge(right, left, left_index=True,
right_on=['k1', 'k2'], how='right')
tm.assert_frame_equal(joined.ix[:, expected.columns], expected)
def test_left_join_index_multi_match_multiindex(self):
left = DataFrame([
['X', 'Y', 'C', 'a'],
['W', 'Y', 'C', 'e'],
['V', 'Q', 'A', 'h'],
['V', 'R', 'D', 'i'],
['X', 'Y', 'D', 'b'],
['X', 'Y', 'A', 'c'],
['W', 'Q', 'B', 'f'],
['W', 'R', 'C', 'g'],
['V', 'Y', 'C', 'j'],
['X', 'Y', 'B', 'd']],
columns=['cola', 'colb', 'colc', 'tag'],
index=[3, 2, 0, 1, 7, 6, 4, 5, 9, 8])
right = DataFrame([
['W', 'R', 'C', 0],
['W', 'Q', 'B', 3],
['W', 'Q', 'B', 8],
['X', 'Y', 'A', 1],
['X', 'Y', 'A', 4],
['X', 'Y', 'B', 5],
['X', 'Y', 'C', 6],
['X', 'Y', 'C', 9],
['X', 'Q', 'C', -6],
['X', 'R', 'C', -9],
['V', 'Y', 'C', 7],
['V', 'R', 'D', 2],
['V', 'R', 'D', -1],
['V', 'Q', 'A', -3]],
columns=['col1', 'col2', 'col3', 'val'])
right.set_index(['col1', 'col2', 'col3'], inplace=True)
result = left.join(right, on=['cola', 'colb', 'colc'], how='left')
expected = DataFrame([
['X', 'Y', 'C', 'a', 6],
['X', 'Y', 'C', 'a', 9],
['W', 'Y', 'C', 'e', nan],
['V', 'Q', 'A', 'h', -3],
['V', 'R', 'D', 'i', 2],
['V', 'R', 'D', 'i', -1],
['X', 'Y', 'D', 'b', nan],
['X', 'Y', 'A', 'c', 1],
['X', 'Y', 'A', 'c', 4],
['W', 'Q', 'B', 'f', 3],
['W', 'Q', 'B', 'f', 8],
['W', 'R', 'C', 'g', 0],
['V', 'Y', 'C', 'j', 7],
['X', 'Y', 'B', 'd', 5]],
columns=['cola', 'colb', 'colc', 'tag', 'val'],
index=[3, 3, 2, 0, 1, 1, 7, 6, 6, 4, 4, 5, 9, 8])
tm.assert_frame_equal(result, expected)
result = left.join(right, on=['cola', 'colb', 'colc'],
how='left', sort=True)
tm.assert_frame_equal(
result,
expected.sort_values(['cola', 'colb', 'colc'], kind='mergesort'))
# GH7331 - maintain left frame order in left merge
right.reset_index(inplace=True)
right.columns = left.columns[:3].tolist() + right.columns[-1:].tolist()
result = merge(left, right, how='left', on=left.columns[:-1].tolist())
expected.index = np.arange(len(expected))
tm.assert_frame_equal(result, expected)
def test_left_join_index_multi_match(self):
left = DataFrame([
['c', 0],
['b', 1],
['a', 2],
['b', 3]],
columns=['tag', 'val'],
index=[2, 0, 1, 3])
right = DataFrame([
['a', 'v'],
['c', 'w'],
['c', 'x'],
['d', 'y'],
['a', 'z'],
['c', 'r'],
['e', 'q'],
['c', 's']],
columns=['tag', 'char'])
right.set_index('tag', inplace=True)
result = left.join(right, on='tag', how='left')
expected = DataFrame([
['c', 0, 'w'],
['c', 0, 'x'],
['c', 0, 'r'],
['c', 0, 's'],
['b', 1, nan],
['a', 2, 'v'],
['a', 2, 'z'],
['b', 3, nan]],
columns=['tag', 'val', 'char'],
index=[2, 2, 2, 2, 0, 1, 1, 3])
tm.assert_frame_equal(result, expected)
result = left.join(right, on='tag', how='left', sort=True)
tm.assert_frame_equal(
result, expected.sort_values('tag', kind='mergesort'))
# GH7331 - maintain left frame order in left merge
result = merge(left, right.reset_index(), how='left', on='tag')
expected.index = np.arange(len(expected))
tm.assert_frame_equal(result, expected)
def test_join_multi_dtypes(self):
# test with multi dtypes in the join index
def _test(dtype1, dtype2):
left = DataFrame({'k1': np.array([0, 1, 2] * 8, dtype=dtype1),
'k2': ['foo', 'bar'] * 12,
'v': np.array(np.arange(24), dtype=np.int64)})
index = MultiIndex.from_tuples([(2, 'bar'), (1, 'foo')])
right = DataFrame(
{'v2': np.array([5, 7], dtype=dtype2)}, index=index)
result = left.join(right, on=['k1', 'k2'])
expected = left.copy()
if dtype2.kind == 'i':
dtype2 = np.dtype('float64')
expected['v2'] = np.array(np.nan, dtype=dtype2)
expected.loc[(expected.k1 == 2) & (expected.k2 == 'bar'), 'v2'] = 5
expected.loc[(expected.k1 == 1) & (expected.k2 == 'foo'), 'v2'] = 7
tm.assert_frame_equal(result, expected)
result = left.join(right, on=['k1', 'k2'], sort=True)
expected.sort_values(['k1', 'k2'], kind='mergesort', inplace=True)
tm.assert_frame_equal(result, expected)
for d1 in [np.int64, np.int32, np.int16, np.int8, np.uint8]:
for d2 in [np.int64, np.float64, np.float32, np.float16]:
_test(np.dtype(d1), np.dtype(d2))
def test_left_merge_na_buglet(self):
left = DataFrame({'id': list('abcde'), 'v1': randn(5),
'v2': randn(5), 'dummy': list('abcde'),
'v3': randn(5)},
columns=['id', 'v1', 'v2', 'dummy', 'v3'])
right = DataFrame({'id': ['a', 'b', np.nan, np.nan, np.nan],
'sv3': [1.234, 5.678, np.nan, np.nan, np.nan]})
merged = merge(left, right, on='id', how='left')
rdf = right.drop(['id'], axis=1)
expected = left.join(rdf)
tm.assert_frame_equal(merged, expected)
def test_merge_na_keys(self):
data = [[1950, "A", 1.5],
[1950, "B", 1.5],
[1955, "B", 1.5],
[1960, "B", np.nan],
[1970, "B", 4.],
[1950, "C", 4.],
[1960, "C", np.nan],
[1965, "C", 3.],
[1970, "C", 4.]]
frame = DataFrame(data, columns=["year", "panel", "data"])
other_data = [[1960, 'A', np.nan],
[1970, 'A', np.nan],
[1955, 'A', np.nan],
[1965, 'A', np.nan],
[1965, 'B', np.nan],
[1955, 'C', np.nan]]
other = DataFrame(other_data, columns=['year', 'panel', 'data'])
result = frame.merge(other, how='outer')
expected = frame.fillna(-999).merge(other.fillna(-999), how='outer')
expected = expected.replace(-999, np.nan)
tm.assert_frame_equal(result, expected)
@slow
def test_int64_overflow_issues(self):
from itertools import product
from collections import defaultdict
from pandas.core.groupby import _int64_overflow_possible
# #2690, combinatorial explosion
df1 = DataFrame(np.random.randn(1000, 7),
columns=list('ABCDEF') + ['G1'])
df2 = DataFrame(np.random.randn(1000, 7),
columns=list('ABCDEF') + ['G2'])
# it works!
result = merge(df1, df2, how='outer')
self.assertTrue(len(result) == 2000)
low, high, n = -1 << 10, 1 << 10, 1 << 20
left = DataFrame(np.random.randint(low, high, (n, 7)),
columns=list('ABCDEFG'))
left['left'] = left.sum(axis=1)
# one-2-one match
i = np.random.permutation(len(left))
right = left.iloc[i].copy()
right.columns = right.columns[:-1].tolist() + ['right']
right.index = np.arange(len(right))
right['right'] *= -1
out = merge(left, right, how='outer')
self.assertEqual(len(out), len(left))
assert_series_equal(out['left'], - out['right'], check_names=False)
result = out.iloc[:, :-2].sum(axis=1)
assert_series_equal(out['left'], result, check_names=False)
self.assertTrue(result.name is None)
out.sort_values(out.columns.tolist(), inplace=True)
out.index = np.arange(len(out))
for how in ['left', 'right', 'outer', 'inner']:
assert_frame_equal(out, merge(left, right, how=how, sort=True))
# check that left merge w/ sort=False maintains left frame order
out = merge(left, right, how='left', sort=False)
assert_frame_equal(left, out[left.columns.tolist()])
out = merge(right, left, how='left', sort=False)
assert_frame_equal(right, out[right.columns.tolist()])
# one-2-many/none match
n = 1 << 11
left = DataFrame(np.random.randint(low, high, (n, 7)).astype('int64'),
columns=list('ABCDEFG'))
# confirm that this is checking what it is supposed to check
shape = left.apply(Series.nunique).values
self.assertTrue(_int64_overflow_possible(shape))
# add duplicates to left frame
left = concat([left, left], ignore_index=True)
right = DataFrame(np.random.randint(low, high, (n // 2, 7))
.astype('int64'),
columns=list('ABCDEFG'))
# add duplicates & overlap with left to the right frame
i = np.random.choice(len(left), n)
right = concat([right, right, left.iloc[i]], ignore_index=True)
left['left'] = np.random.randn(len(left))
right['right'] = np.random.randn(len(right))
# shuffle left & right frames
i = np.random.permutation(len(left))
left = left.iloc[i].copy()
left.index = np.arange(len(left))
i = np.random.permutation(len(right))
right = right.iloc[i].copy()
right.index = np.arange(len(right))
# manually compute outer merge
ldict, rdict = defaultdict(list), defaultdict(list)
for idx, row in left.set_index(list('ABCDEFG')).iterrows():
ldict[idx].append(row['left'])
for idx, row in right.set_index(list('ABCDEFG')).iterrows():
rdict[idx].append(row['right'])
vals = []
for k, lval in ldict.items():
rval = rdict.get(k, [np.nan])
for lv, rv in product(lval, rval):
vals.append(k + tuple([lv, rv]))
for k, rval in rdict.items():
if k not in ldict:
for rv in rval:
vals.append(k + tuple([np.nan, rv]))
def align(df):
df = df.sort_values(df.columns.tolist())
df.index = np.arange(len(df))
return df
def verify_order(df):
kcols = list('ABCDEFG')
assert_frame_equal(df[kcols].copy(),
df[kcols].sort_values(kcols, kind='mergesort'))
out = DataFrame(vals, columns=list('ABCDEFG') + ['left', 'right'])
out = align(out)
jmask = {'left': out['left'].notnull(),
'right': out['right'].notnull(),
'inner': out['left'].notnull() & out['right'].notnull(),
'outer': np.ones(len(out), dtype='bool')}
for how in 'left', 'right', 'outer', 'inner':
mask = jmask[how]
frame = align(out[mask].copy())
self.assertTrue(mask.all() ^ mask.any() or how == 'outer')
for sort in [False, True]:
res = merge(left, right, how=how, sort=sort)
if sort:
verify_order(res)
# as in GH9092 dtypes break with outer/right join
assert_frame_equal(frame, align(res),
check_dtype=how not in ('right', 'outer'))
def test_join_multi_levels(self):
# GH 3662
# merge multi-levels
household = (
DataFrame(
dict(household_id=[1, 2, 3],
male=[0, 1, 0],
wealth=[196087.3, 316478.7, 294750]),
columns=['household_id', 'male', 'wealth'])
.set_index('household_id'))
portfolio = (
DataFrame(
dict(household_id=[1, 2, 2, 3, 3, 3, 4],
asset_id=["nl0000301109", "nl0000289783", "gb00b03mlx29",
"gb00b03mlx29", "lu0197800237", "nl0000289965",
np.nan],
name=["ABN Amro", "Robeco", "Royal Dutch Shell",
"Royal Dutch Shell",
"AAB Eastern Europe Equity Fund",
"Postbank BioTech Fonds", np.nan],
share=[1.0, 0.4, 0.6, 0.15, 0.6, 0.25, 1.0]),
columns=['household_id', 'asset_id', 'name', 'share'])
.set_index(['household_id', 'asset_id']))
result = household.join(portfolio, how='inner')
expected = (
DataFrame(
dict(male=[0, 1, 1, 0, 0, 0],
wealth=[196087.3, 316478.7, 316478.7,
294750.0, 294750.0, 294750.0],
name=['ABN Amro', 'Robeco', 'Royal Dutch Shell',
'Royal Dutch Shell',
'AAB Eastern Europe Equity Fund',
'Postbank BioTech Fonds'],
share=[1.00, 0.40, 0.60, 0.15, 0.60, 0.25],
household_id=[1, 2, 2, 3, 3, 3],
asset_id=['nl0000301109', 'nl0000289783', 'gb00b03mlx29',
'gb00b03mlx29', 'lu0197800237',
'nl0000289965']))
.set_index(['household_id', 'asset_id'])
.reindex(columns=['male', 'wealth', 'name', 'share']))
assert_frame_equal(result, expected)
assert_frame_equal(result, expected)
# equivalency
result2 = (merge(household.reset_index(), portfolio.reset_index(),
on=['household_id'], how='inner')
.set_index(['household_id', 'asset_id']))
assert_frame_equal(result2, expected)
result = household.join(portfolio, how='outer')
expected = (concat([
expected,
(DataFrame(
dict(share=[1.00]),
index=MultiIndex.from_tuples(
[(4, np.nan)],
names=['household_id', 'asset_id'])))
], axis=0).reindex(columns=expected.columns))
assert_frame_equal(result, expected)
# invalid cases
household.index.name = 'foo'
def f():
household.join(portfolio, how='inner')
self.assertRaises(ValueError, f)
portfolio2 = portfolio.copy()
portfolio2.index.set_names(['household_id', 'foo'])
def f():
portfolio2.join(portfolio, how='inner')
self.assertRaises(ValueError, f)
def test_join_multi_levels2(self):
# some more advanced merges
# GH6360
household = (
DataFrame(
dict(household_id=[1, 2, 2, 3, 3, 3, 4],
asset_id=["nl0000301109", "nl0000301109", "gb00b03mlx29",
"gb00b03mlx29", "lu0197800237", "nl0000289965",
np.nan],
share=[1.0, 0.4, 0.6, 0.15, 0.6, 0.25, 1.0]),
columns=['household_id', 'asset_id', 'share'])
.set_index(['household_id', 'asset_id']))
log_return = DataFrame(dict(
asset_id=["gb00b03mlx29", "gb00b03mlx29",
"gb00b03mlx29", "lu0197800237", "lu0197800237"],
t=[233, 234, 235, 180, 181],
log_return=[.09604978, -.06524096, .03532373, .03025441, .036997]
)).set_index(["asset_id", "t"])
expected = (
DataFrame(dict(
household_id=[2, 2, 2, 3, 3, 3, 3, 3],
asset_id=["gb00b03mlx29", "gb00b03mlx29",
"gb00b03mlx29", "gb00b03mlx29",
"gb00b03mlx29", "gb00b03mlx29",
"lu0197800237", "lu0197800237"],
t=[233, 234, 235, 233, 234, 235, 180, 181],
share=[0.6, 0.6, 0.6, 0.15, 0.15, 0.15, 0.6, 0.6],
log_return=[.09604978, -.06524096, .03532373,
.09604978, -.06524096, .03532373,
.03025441, .036997]
))
.set_index(["household_id", "asset_id", "t"])
.reindex(columns=['share', 'log_return']))
def f():
household.join(log_return, how='inner')
self.assertRaises(NotImplementedError, f)
# this is the equivalency
result = (merge(household.reset_index(), log_return.reset_index(),
on=['asset_id'], how='inner')
.set_index(['household_id', 'asset_id', 't']))
assert_frame_equal(result, expected)
expected = (
DataFrame(dict(
household_id=[1, 2, 2, 2, 2, 3, 3, 3, 3, 3, 3, 4],
asset_id=["nl0000301109", "nl0000289783", "gb00b03mlx29",
"gb00b03mlx29", "gb00b03mlx29",
"gb00b03mlx29", "gb00b03mlx29", "gb00b03mlx29",
"lu0197800237", "lu0197800237",
"nl0000289965", None],
t=[None, None, 233, 234, 235, 233, 234,
235, 180, 181, None, None],
share=[1.0, 0.4, 0.6, 0.6, 0.6, 0.15,
0.15, 0.15, 0.6, 0.6, 0.25, 1.0],
log_return=[None, None, .09604978, -.06524096, .03532373,
.09604978, -.06524096, .03532373,
.03025441, .036997, None, None]
))
.set_index(["household_id", "asset_id", "t"]))
def f():
household.join(log_return, how='outer')
self.assertRaises(NotImplementedError, f)
def _check_join(left, right, result, join_col, how='left',
lsuffix='_x', rsuffix='_y'):
# some smoke tests
for c in join_col:
assert(result[c].notnull().all())
left_grouped = left.groupby(join_col)
right_grouped = right.groupby(join_col)
for group_key, group in result.groupby(join_col):
l_joined = _restrict_to_columns(group, left.columns, lsuffix)
r_joined = _restrict_to_columns(group, right.columns, rsuffix)
try:
lgroup = left_grouped.get_group(group_key)
except KeyError:
if how in ('left', 'inner'):
raise AssertionError('key %s should not have been in the join'
% str(group_key))
_assert_all_na(l_joined, left.columns, join_col)
else:
_assert_same_contents(l_joined, lgroup)
try:
rgroup = right_grouped.get_group(group_key)
except KeyError:
if how in ('right', 'inner'):
raise AssertionError('key %s should not have been in the join'
% str(group_key))
_assert_all_na(r_joined, right.columns, join_col)
else:
_assert_same_contents(r_joined, rgroup)
def _restrict_to_columns(group, columns, suffix):
found = [c for c in group.columns
if c in columns or c.replace(suffix, '') in columns]
# filter
group = group.ix[:, found]
# get rid of suffixes, if any
group = group.rename(columns=lambda x: x.replace(suffix, ''))
# put in the right order...
group = group.ix[:, columns]
return group
def _assert_same_contents(join_chunk, source):
NA_SENTINEL = -1234567 # drop_duplicates not so NA-friendly...
jvalues = join_chunk.fillna(NA_SENTINEL).drop_duplicates().values
svalues = source.fillna(NA_SENTINEL).drop_duplicates().values
rows = set(tuple(row) for row in jvalues)
assert(len(rows) == len(source))
assert(all(tuple(row) in rows for row in svalues))
def _assert_all_na(join_chunk, source_columns, join_col):
for c in source_columns:
if c in join_col:
continue
assert(join_chunk[c].isnull().all())
def _join_by_hand(a, b, how='left'):
join_index = a.index.join(b.index, how=how)
a_re = a.reindex(join_index)
b_re = b.reindex(join_index)
result_columns = a.columns.append(b.columns)
for col, s in compat.iteritems(b_re):
a_re[col] = s
return a_re.reindex(columns=result_columns)
class TestConcatenate(tm.TestCase):
_multiprocess_can_split_ = True
def setUp(self):
self.frame = DataFrame(tm.getSeriesData())
self.mixed_frame = self.frame.copy()
self.mixed_frame['foo'] = 'bar'
def test_append(self):
begin_index = self.frame.index[:5]
end_index = self.frame.index[5:]
begin_frame = self.frame.reindex(begin_index)
end_frame = self.frame.reindex(end_index)
appended = begin_frame.append(end_frame)
assert_almost_equal(appended['A'], self.frame['A'])
del end_frame['A']
partial_appended = begin_frame.append(end_frame)
self.assertIn('A', partial_appended)
partial_appended = end_frame.append(begin_frame)
self.assertIn('A', partial_appended)
# mixed type handling
appended = self.mixed_frame[:5].append(self.mixed_frame[5:])
assert_frame_equal(appended, self.mixed_frame)
# what to test here
mixed_appended = self.mixed_frame[:5].append(self.frame[5:])
mixed_appended2 = self.frame[:5].append(self.mixed_frame[5:])
# all equal except 'foo' column
assert_frame_equal(
mixed_appended.reindex(columns=['A', 'B', 'C', 'D']),
mixed_appended2.reindex(columns=['A', 'B', 'C', 'D']))
# append empty
empty = DataFrame({})
appended = self.frame.append(empty)
assert_frame_equal(self.frame, appended)
self.assertIsNot(appended, self.frame)
appended = empty.append(self.frame)
assert_frame_equal(self.frame, appended)
self.assertIsNot(appended, self.frame)
# overlap
self.assertRaises(ValueError, self.frame.append, self.frame,
verify_integrity=True)
# new columns
# GH 6129
df = DataFrame({'a': {'x': 1, 'y': 2}, 'b': {'x': 3, 'y': 4}})
row = Series([5, 6, 7], index=['a', 'b', 'c'], name='z')
expected = DataFrame({'a': {'x': 1, 'y': 2, 'z': 5}, 'b': {
'x': 3, 'y': 4, 'z': 6}, 'c': {'z': 7}})
result = df.append(row)
assert_frame_equal(result, expected)
def test_append_length0_frame(self):
df = DataFrame(columns=['A', 'B', 'C'])
df3 = DataFrame(index=[0, 1], columns=['A', 'B'])
df5 = df.append(df3)
expected = DataFrame(index=[0, 1], columns=['A', 'B', 'C'])
assert_frame_equal(df5, expected)
def test_append_records(self):
arr1 = np.zeros((2,), dtype=('i4,f4,a10'))
arr1[:] = [(1, 2., 'Hello'), (2, 3., "World")]
arr2 = np.zeros((3,), dtype=('i4,f4,a10'))
arr2[:] = [(3, 4., 'foo'),
(5, 6., "bar"),
(7., 8., 'baz')]
df1 = DataFrame(arr1)
df2 = DataFrame(arr2)
result = df1.append(df2, ignore_index=True)
expected = DataFrame(np.concatenate((arr1, arr2)))
assert_frame_equal(result, expected)
def test_append_different_columns(self):
df = DataFrame({'bools': np.random.randn(10) > 0,
'ints': np.random.randint(0, 10, 10),
'floats': np.random.randn(10),
'strings': ['foo', 'bar'] * 5})
a = df[:5].ix[:, ['bools', 'ints', 'floats']]
b = df[5:].ix[:, ['strings', 'ints', 'floats']]
appended = a.append(b)
self.assertTrue(isnull(appended['strings'][0:4]).all())
self.assertTrue(isnull(appended['bools'][5:]).all())
def test_append_many(self):
chunks = [self.frame[:5], self.frame[5:10],
self.frame[10:15], self.frame[15:]]
result = chunks[0].append(chunks[1:])
tm.assert_frame_equal(result, self.frame)
chunks[-1] = chunks[-1].copy()
chunks[-1]['foo'] = 'bar'
result = chunks[0].append(chunks[1:])
tm.assert_frame_equal(result.ix[:, self.frame.columns], self.frame)
self.assertTrue((result['foo'][15:] == 'bar').all())
self.assertTrue(result['foo'][:15].isnull().all())
def test_append_preserve_index_name(self):
# #980
df1 = DataFrame(data=None, columns=['A', 'B', 'C'])
df1 = df1.set_index(['A'])
df2 = DataFrame(data=[[1, 4, 7], [2, 5, 8], [3, 6, 9]],
columns=['A', 'B', 'C'])
df2 = df2.set_index(['A'])
result = df1.append(df2)
self.assertEqual(result.index.name, 'A')
def test_join_many(self):
df = DataFrame(np.random.randn(10, 6), columns=list('abcdef'))
df_list = [df[['a', 'b']], df[['c', 'd']], df[['e', 'f']]]
joined = df_list[0].join(df_list[1:])
tm.assert_frame_equal(joined, df)
df_list = [df[['a', 'b']][:-2],
df[['c', 'd']][2:], df[['e', 'f']][1:9]]
def _check_diff_index(df_list, result, exp_index):
reindexed = [x.reindex(exp_index) for x in df_list]
expected = reindexed[0].join(reindexed[1:])
tm.assert_frame_equal(result, expected)
# different join types
joined = df_list[0].join(df_list[1:], how='outer')
_check_diff_index(df_list, joined, df.index)
joined = df_list[0].join(df_list[1:])
_check_diff_index(df_list, joined, df_list[0].index)
joined = df_list[0].join(df_list[1:], how='inner')
_check_diff_index(df_list, joined, df.index[2:8])
self.assertRaises(ValueError, df_list[0].join, df_list[1:], on='a')
def test_join_many_mixed(self):
df = DataFrame(np.random.randn(8, 4), columns=['A', 'B', 'C', 'D'])
df['key'] = ['foo', 'bar'] * 4
df1 = df.ix[:, ['A', 'B']]
df2 = df.ix[:, ['C', 'D']]
df3 = df.ix[:, ['key']]
result = df1.join([df2, df3])
assert_frame_equal(result, df)
def test_append_missing_column_proper_upcast(self):
df1 = DataFrame({'A': np.array([1, 2, 3, 4], dtype='i8')})
df2 = DataFrame({'B': np.array([True, False, True, False],
dtype=bool)})
appended = df1.append(df2, ignore_index=True)
self.assertEqual(appended['A'].dtype, 'f8')
self.assertEqual(appended['B'].dtype, 'O')
def test_concat_copy(self):
df = DataFrame(np.random.randn(4, 3))
df2 = DataFrame(np.random.randint(0, 10, size=4).reshape(4, 1))
df3 = DataFrame({5: 'foo'}, index=range(4))
# these are actual copies
result = concat([df, df2, df3], axis=1, copy=True)
for b in result._data.blocks:
self.assertIsNone(b.values.base)
# these are the same
result = concat([df, df2, df3], axis=1, copy=False)
for b in result._data.blocks:
if b.is_float:
self.assertTrue(
b.values.base is df._data.blocks[0].values.base)
elif b.is_integer:
self.assertTrue(
b.values.base is df2._data.blocks[0].values.base)
elif b.is_object:
self.assertIsNotNone(b.values.base)
# float block was consolidated
df4 = DataFrame(np.random.randn(4, 1))
result = concat([df, df2, df3, df4], axis=1, copy=False)
for b in result._data.blocks:
if b.is_float:
self.assertIsNone(b.values.base)
elif b.is_integer:
self.assertTrue(
b.values.base is df2._data.blocks[0].values.base)
elif b.is_object:
self.assertIsNotNone(b.values.base)
def test_concat_with_group_keys(self):
df = DataFrame(np.random.randn(4, 3))
df2 = DataFrame(np.random.randn(4, 4))
# axis=0
df = DataFrame(np.random.randn(3, 4))
df2 = DataFrame(np.random.randn(4, 4))
result = concat([df, df2], keys=[0, 1])
exp_index = MultiIndex.from_arrays([[0, 0, 0, 1, 1, 1, 1],
[0, 1, 2, 0, 1, 2, 3]])
expected = DataFrame(np.r_[df.values, df2.values],
index=exp_index)
tm.assert_frame_equal(result, expected)
result = concat([df, df], keys=[0, 1])
exp_index2 = MultiIndex.from_arrays([[0, 0, 0, 1, 1, 1],
[0, 1, 2, 0, 1, 2]])
expected = DataFrame(np.r_[df.values, df.values],
index=exp_index2)
tm.assert_frame_equal(result, expected)
# axis=1
df = DataFrame(np.random.randn(4, 3))
df2 = DataFrame(np.random.randn(4, 4))
result = concat([df, df2], keys=[0, 1], axis=1)
expected = DataFrame(np.c_[df.values, df2.values],
columns=exp_index)
tm.assert_frame_equal(result, expected)
result = concat([df, df], keys=[0, 1], axis=1)
expected = DataFrame(np.c_[df.values, df.values],
columns=exp_index2)
tm.assert_frame_equal(result, expected)
def test_concat_keys_specific_levels(self):
df = DataFrame(np.random.randn(10, 4))
pieces = [df.ix[:, [0, 1]], df.ix[:, [2]], df.ix[:, [3]]]
level = ['three', 'two', 'one', 'zero']
result = concat(pieces, axis=1, keys=['one', 'two', 'three'],
levels=[level],
names=['group_key'])
self.assert_numpy_array_equal(result.columns.levels[0], level)
self.assertEqual(result.columns.names[0], 'group_key')
def test_concat_dataframe_keys_bug(self):
t1 = DataFrame({
'value': Series([1, 2, 3], index=Index(['a', 'b', 'c'],
name='id'))})
t2 = DataFrame({
'value': Series([7, 8], index=Index(['a', 'b'], name='id'))})
# it works
result = concat([t1, t2], axis=1, keys=['t1', 't2'])
self.assertEqual(list(result.columns), [('t1', 'value'),
('t2', 'value')])
def test_concat_series_partial_columns_names(self):
# GH10698
foo = Series([1, 2], name='foo')
bar = Series([1, 2])
baz = Series([4, 5])
result = concat([foo, bar, baz], axis=1)
expected = DataFrame({'foo': [1, 2], 0: [1, 2], 1: [
4, 5]}, columns=['foo', 0, 1])
tm.assert_frame_equal(result, expected)
result = concat([foo, bar, baz], axis=1, keys=[
'red', 'blue', 'yellow'])
expected = DataFrame({'red': [1, 2], 'blue': [1, 2], 'yellow': [
4, 5]}, columns=['red', 'blue', 'yellow'])
tm.assert_frame_equal(result, expected)
result = concat([foo, bar, baz], axis=1, ignore_index=True)
expected = DataFrame({0: [1, 2], 1: [1, 2], 2: [4, 5]})
tm.assert_frame_equal(result, expected)
def test_concat_dict(self):
frames = {'foo': DataFrame(np.random.randn(4, 3)),
'bar': DataFrame(np.random.randn(4, 3)),
'baz': DataFrame(np.random.randn(4, 3)),
'qux': DataFrame(np.random.randn(4, 3))}
sorted_keys = sorted(frames)
result = concat(frames)
expected = concat([frames[k] for k in sorted_keys], keys=sorted_keys)
tm.assert_frame_equal(result, expected)
result = concat(frames, axis=1)
expected = concat([frames[k] for k in sorted_keys], keys=sorted_keys,
axis=1)
tm.assert_frame_equal(result, expected)
keys = ['baz', 'foo', 'bar']
result = concat(frames, keys=keys)
expected = concat([frames[k] for k in keys], keys=keys)
tm.assert_frame_equal(result, expected)
def test_concat_ignore_index(self):
frame1 = DataFrame({"test1": ["a", "b", "c"],
"test2": [1, 2, 3],
"test3": [4.5, 3.2, 1.2]})
frame2 = DataFrame({"test3": [5.2, 2.2, 4.3]})
frame1.index = Index(["x", "y", "z"])
frame2.index = Index(["x", "y", "q"])
v1 = concat([frame1, frame2], axis=1, ignore_index=True)
nan = np.nan
expected = DataFrame([[nan, nan, nan, 4.3],
['a', 1, 4.5, 5.2],
['b', 2, 3.2, 2.2],
['c', 3, 1.2, nan]],
index=Index(["q", "x", "y", "z"]))
tm.assert_frame_equal(v1, expected)
def test_concat_multiindex_with_keys(self):
index = MultiIndex(levels=[['foo', 'bar', 'baz', 'qux'],
['one', 'two', 'three']],
labels=[[0, 0, 0, 1, 1, 2, 2, 3, 3, 3],
[0, 1, 2, 0, 1, 1, 2, 0, 1, 2]],
names=['first', 'second'])
frame = DataFrame(np.random.randn(10, 3), index=index,
columns=Index(['A', 'B', 'C'], name='exp'))
result = concat([frame, frame], keys=[0, 1], names=['iteration'])
self.assertEqual(result.index.names, ('iteration',) + index.names)
tm.assert_frame_equal(result.ix[0], frame)
tm.assert_frame_equal(result.ix[1], frame)
self.assertEqual(result.index.nlevels, 3)
def test_concat_multiindex_with_tz(self):
# GH 6606
df = DataFrame({'dt': [datetime(2014, 1, 1),
datetime(2014, 1, 2),
datetime(2014, 1, 3)],
'b': ['A', 'B', 'C'],
'c': [1, 2, 3], 'd': [4, 5, 6]})
df['dt'] = df['dt'].apply(lambda d: Timestamp(d, tz='US/Pacific'))
df = df.set_index(['dt', 'b'])
exp_idx1 = DatetimeIndex(['2014-01-01', '2014-01-02',
'2014-01-03'] * 2,
tz='US/Pacific', name='dt')
exp_idx2 = Index(['A', 'B', 'C'] * 2, name='b')
exp_idx = MultiIndex.from_arrays([exp_idx1, exp_idx2])
expected = DataFrame({'c': [1, 2, 3] * 2, 'd': [4, 5, 6] * 2},
index=exp_idx, columns=['c', 'd'])
result = concat([df, df])
tm.assert_frame_equal(result, expected)
def test_concat_keys_and_levels(self):
df = DataFrame(np.random.randn(1, 3))
df2 = DataFrame(np.random.randn(1, 4))
levels = [['foo', 'baz'], ['one', 'two']]
names = ['first', 'second']
result = concat([df, df2, df, df2],
keys=[('foo', 'one'), ('foo', 'two'),
('baz', 'one'), ('baz', 'two')],
levels=levels,
names=names)
expected = concat([df, df2, df, df2])
exp_index = MultiIndex(levels=levels + [[0]],
labels=[[0, 0, 1, 1], [0, 1, 0, 1],
[0, 0, 0, 0]],
names=names + [None])
expected.index = exp_index
assert_frame_equal(result, expected)
# no names
result = concat([df, df2, df, df2],
keys=[('foo', 'one'), ('foo', 'two'),
('baz', 'one'), ('baz', 'two')],
levels=levels)
self.assertEqual(result.index.names, (None,) * 3)
# no levels
result = concat([df, df2, df, df2],
keys=[('foo', 'one'), ('foo', 'two'),
('baz', 'one'), ('baz', 'two')],
names=['first', 'second'])
self.assertEqual(result.index.names, ('first', 'second') + (None,))
self.assert_numpy_array_equal(result.index.levels[0], ['baz', 'foo'])
def test_concat_keys_levels_no_overlap(self):
# GH #1406
df = DataFrame(np.random.randn(1, 3), index=['a'])
df2 = DataFrame(np.random.randn(1, 4), index=['b'])
self.assertRaises(ValueError, concat, [df, df],
keys=['one', 'two'], levels=[['foo', 'bar', 'baz']])
self.assertRaises(ValueError, concat, [df, df2],
keys=['one', 'two'], levels=[['foo', 'bar', 'baz']])
def test_concat_rename_index(self):
a = DataFrame(np.random.rand(3, 3),
columns=list('ABC'),
index=Index(list('abc'), name='index_a'))
b = DataFrame(np.random.rand(3, 3),
columns=list('ABC'),
index=Index(list('abc'), name='index_b'))
result = concat([a, b], keys=['key0', 'key1'],
names=['lvl0', 'lvl1'])
exp = concat([a, b], keys=['key0', 'key1'], names=['lvl0'])
names = list(exp.index.names)
names[1] = 'lvl1'
exp.index.set_names(names, inplace=True)
tm.assert_frame_equal(result, exp)
self.assertEqual(result.index.names, exp.index.names)
def test_crossed_dtypes_weird_corner(self):
columns = ['A', 'B', 'C', 'D']
df1 = DataFrame({'A': np.array([1, 2, 3, 4], dtype='f8'),
'B': np.array([1, 2, 3, 4], dtype='i8'),
'C': np.array([1, 2, 3, 4], dtype='f8'),
'D': np.array([1, 2, 3, 4], dtype='i8')},
columns=columns)
df2 = DataFrame({'A': np.array([1, 2, 3, 4], dtype='i8'),
'B': np.array([1, 2, 3, 4], dtype='f8'),
'C': np.array([1, 2, 3, 4], dtype='i8'),
'D': np.array([1, 2, 3, 4], dtype='f8')},
columns=columns)
appended = df1.append(df2, ignore_index=True)
expected = DataFrame(np.concatenate([df1.values, df2.values], axis=0),
columns=columns)
tm.assert_frame_equal(appended, expected)
df = DataFrame(np.random.randn(1, 3), index=['a'])
df2 = DataFrame(np.random.randn(1, 4), index=['b'])
result = concat(
[df, df2], keys=['one', 'two'], names=['first', 'second'])
self.assertEqual(result.index.names, ('first', 'second'))
def test_dups_index(self):
# GH 4771
# single dtypes
df = DataFrame(np.random.randint(0, 10, size=40).reshape(
10, 4), columns=['A', 'A', 'C', 'C'])
result = concat([df, df], axis=1)
assert_frame_equal(result.iloc[:, :4], df)
assert_frame_equal(result.iloc[:, 4:], df)
result = concat([df, df], axis=0)
assert_frame_equal(result.iloc[:10], df)
assert_frame_equal(result.iloc[10:], df)
# multi dtypes
df = concat([DataFrame(np.random.randn(10, 4),
columns=['A', 'A', 'B', 'B']),
DataFrame(np.random.randint(0, 10, size=20)
.reshape(10, 2),
columns=['A', 'C'])],
axis=1)
result = concat([df, df], axis=1)
assert_frame_equal(result.iloc[:, :6], df)
assert_frame_equal(result.iloc[:, 6:], df)
result = concat([df, df], axis=0)
assert_frame_equal(result.iloc[:10], df)
assert_frame_equal(result.iloc[10:], df)
# append
result = df.iloc[0:8, :].append(df.iloc[8:])
assert_frame_equal(result, df)
result = df.iloc[0:8, :].append(df.iloc[8:9]).append(df.iloc[9:10])
assert_frame_equal(result, df)
expected = concat([df, df], axis=0)
result = df.append(df)
assert_frame_equal(result, expected)
def test_with_mixed_tuples(self):
# 10697
# columns have mixed tuples, so handle properly
df1 = DataFrame({u'A': 'foo', (u'B', 1): 'bar'}, index=range(2))
df2 = DataFrame({u'B': 'foo', (u'B', 1): 'bar'}, index=range(2))
# it works
concat([df1, df2])
def test_join_dups(self):
# joining dups
df = concat([DataFrame(np.random.randn(10, 4),
columns=['A', 'A', 'B', 'B']),
DataFrame(np.random.randint(0, 10, size=20)
.reshape(10, 2),
columns=['A', 'C'])],
axis=1)
expected = concat([df, df], axis=1)
result = df.join(df, rsuffix='_2')
result.columns = expected.columns
assert_frame_equal(result, expected)
# GH 4975, invalid join on dups
w = DataFrame(np.random.randn(4, 2), columns=["x", "y"])
x = DataFrame(np.random.randn(4, 2), columns=["x", "y"])
y = DataFrame(np.random.randn(4, 2), columns=["x", "y"])
z = DataFrame(np.random.randn(4, 2), columns=["x", "y"])
dta = x.merge(y, left_index=True, right_index=True).merge(
z, left_index=True, right_index=True, how="outer")
dta = dta.merge(w, left_index=True, right_index=True)
expected = concat([x, y, z, w], axis=1)
expected.columns = ['x_x', 'y_x', 'x_y',
'y_y', 'x_x', 'y_x', 'x_y', 'y_y']
assert_frame_equal(dta, expected)
def test_handle_empty_objects(self):
df = DataFrame(np.random.randn(10, 4), columns=list('abcd'))
baz = df[:5].copy()
baz['foo'] = 'bar'
empty = df[5:5]
frames = [baz, empty, empty, df[5:]]
concatted = concat(frames, axis=0)
expected = df.ix[:, ['a', 'b', 'c', 'd', 'foo']]
expected['foo'] = expected['foo'].astype('O')
expected.loc[0:4, 'foo'] = 'bar'
tm.assert_frame_equal(concatted, expected)
# empty as first element with time series
# GH3259
df = DataFrame(dict(A=range(10000)), index=date_range(
'20130101', periods=10000, freq='s'))
empty = DataFrame()
result = concat([df, empty], axis=1)
assert_frame_equal(result, df)
result = concat([empty, df], axis=1)
assert_frame_equal(result, df)
result = concat([df, empty])
assert_frame_equal(result, df)
result = concat([empty, df])
assert_frame_equal(result, df)
def test_concat_mixed_objs(self):
# concat mixed series/frames
# G2385
# axis 1
index = date_range('01-Jan-2013', periods=10, freq='H')
arr = np.arange(10, dtype='int64')
s1 = Series(arr, index=index)
s2 = Series(arr, index=index)
df = DataFrame(arr.reshape(-1, 1), index=index)
expected = DataFrame(np.repeat(arr, 2).reshape(-1, 2),
index=index, columns=[0, 0])
result = concat([df, df], axis=1)
assert_frame_equal(result, expected)
expected = DataFrame(np.repeat(arr, 2).reshape(-1, 2),
index=index, columns=[0, 1])
result = concat([s1, s2], axis=1)
assert_frame_equal(result, expected)
expected = DataFrame(np.repeat(arr, 3).reshape(-1, 3),
index=index, columns=[0, 1, 2])
result = concat([s1, s2, s1], axis=1)
assert_frame_equal(result, expected)
expected = DataFrame(np.repeat(arr, 5).reshape(-1, 5),
index=index, columns=[0, 0, 1, 2, 3])
result = concat([s1, df, s2, s2, s1], axis=1)
assert_frame_equal(result, expected)
# with names
s1.name = 'foo'
expected = DataFrame(np.repeat(arr, 3).reshape(-1, 3),
index=index, columns=['foo', 0, 0])
result = concat([s1, df, s2], axis=1)
assert_frame_equal(result, expected)
s2.name = 'bar'
expected = DataFrame(np.repeat(arr, 3).reshape(-1, 3),
index=index, columns=['foo', 0, 'bar'])
result = concat([s1, df, s2], axis=1)
assert_frame_equal(result, expected)
# ignore index
expected = DataFrame(np.repeat(arr, 3).reshape(-1, 3),
index=index, columns=[0, 1, 2])
result = concat([s1, df, s2], axis=1, ignore_index=True)
assert_frame_equal(result, expected)
# axis 0
expected = DataFrame(np.tile(arr, 3).reshape(-1, 1),
index=index.tolist() * 3, columns=[0])
result = concat([s1, df, s2])
assert_frame_equal(result, expected)
expected = DataFrame(np.tile(arr, 3).reshape(-1, 1), columns=[0])
result = concat([s1, df, s2], ignore_index=True)
assert_frame_equal(result, expected)
# invalid concatente of mixed dims
panel = tm.makePanel()
self.assertRaises(ValueError, lambda: concat([panel, s1], axis=1))
def test_panel_join(self):
panel = tm.makePanel()
tm.add_nans(panel)
p1 = panel.ix[:2, :10, :3]
p2 = panel.ix[2:, 5:, 2:]
# left join
result = p1.join(p2)
expected = p1.copy()
expected['ItemC'] = p2['ItemC']
tm.assert_panel_equal(result, expected)
# right join
result = p1.join(p2, how='right')
expected = p2.copy()
expected['ItemA'] = p1['ItemA']
expected['ItemB'] = p1['ItemB']
expected = expected.reindex(items=['ItemA', 'ItemB', 'ItemC'])
tm.assert_panel_equal(result, expected)
# inner join
result = p1.join(p2, how='inner')
expected = panel.ix[:, 5:10, 2:3]
tm.assert_panel_equal(result, expected)
# outer join
result = p1.join(p2, how='outer')
expected = p1.reindex(major=panel.major_axis,
minor=panel.minor_axis)
expected = expected.join(p2.reindex(major=panel.major_axis,
minor=panel.minor_axis))
tm.assert_panel_equal(result, expected)
def test_panel_join_overlap(self):
panel = tm.makePanel()
tm.add_nans(panel)
p1 = panel.ix[['ItemA', 'ItemB', 'ItemC']]
p2 = panel.ix[['ItemB', 'ItemC']]
# Expected index is
#
# ItemA, ItemB_p1, ItemC_p1, ItemB_p2, ItemC_p2
joined = p1.join(p2, lsuffix='_p1', rsuffix='_p2')
p1_suf = p1.ix[['ItemB', 'ItemC']].add_suffix('_p1')
p2_suf = p2.ix[['ItemB', 'ItemC']].add_suffix('_p2')
no_overlap = panel.ix[['ItemA']]
expected = no_overlap.join(p1_suf.join(p2_suf))
tm.assert_panel_equal(joined, expected)
def test_panel_join_many(self):
tm.K = 10
panel = tm.makePanel()
tm.K = 4
panels = [panel.ix[:2], panel.ix[2:6], panel.ix[6:]]
joined = panels[0].join(panels[1:])
tm.assert_panel_equal(joined, panel)
panels = [panel.ix[:2, :-5], panel.ix[2:6, 2:], panel.ix[6:, 5:-7]]
data_dict = {}
for p in panels:
data_dict.update(p.iteritems())
joined = panels[0].join(panels[1:], how='inner')
expected = Panel.from_dict(data_dict, intersect=True)
tm.assert_panel_equal(joined, expected)
joined = panels[0].join(panels[1:], how='outer')
expected = Panel.from_dict(data_dict, intersect=False)
tm.assert_panel_equal(joined, expected)
# edge cases
self.assertRaises(ValueError, panels[0].join, panels[1:],
how='outer', lsuffix='foo', rsuffix='bar')
self.assertRaises(ValueError, panels[0].join, panels[1:],
how='right')
def test_panel_concat_other_axes(self):
panel = tm.makePanel()
p1 = panel.ix[:, :5, :]
p2 = panel.ix[:, 5:, :]
result = concat([p1, p2], axis=1)
tm.assert_panel_equal(result, panel)
p1 = panel.ix[:, :, :2]
p2 = panel.ix[:, :, 2:]
result = concat([p1, p2], axis=2)
tm.assert_panel_equal(result, panel)
# if things are a bit misbehaved
p1 = panel.ix[:2, :, :2]
p2 = panel.ix[:, :, 2:]
p1['ItemC'] = 'baz'
result = concat([p1, p2], axis=2)
expected = panel.copy()
expected['ItemC'] = expected['ItemC'].astype('O')
expected.ix['ItemC', :, :2] = 'baz'
tm.assert_panel_equal(result, expected)
def test_panel_concat_buglet(self):
# #2257
def make_panel():
index = 5
cols = 3
def df():
return DataFrame(np.random.randn(index, cols),
index=["I%s" % i for i in range(index)],
columns=["C%s" % i for i in range(cols)])
return Panel(dict([("Item%s" % x, df()) for x in ['A', 'B', 'C']]))
panel1 = make_panel()
panel2 = make_panel()
panel2 = panel2.rename_axis(dict([(x, "%s_1" % x)
for x in panel2.major_axis]),
axis=1)
panel3 = panel2.rename_axis(lambda x: '%s_1' % x, axis=1)
panel3 = panel3.rename_axis(lambda x: '%s_1' % x, axis=2)
# it works!
concat([panel1, panel3], axis=1, verify_integrity=True)
def test_panel4d_concat(self):
p4d = tm.makePanel4D()
p1 = p4d.ix[:, :, :5, :]
p2 = p4d.ix[:, :, 5:, :]
result = concat([p1, p2], axis=2)
tm.assert_panel4d_equal(result, p4d)
p1 = p4d.ix[:, :, :, :2]
p2 = p4d.ix[:, :, :, 2:]
result = concat([p1, p2], axis=3)
tm.assert_panel4d_equal(result, p4d)
def test_panel4d_concat_mixed_type(self):
p4d = tm.makePanel4D()
# if things are a bit misbehaved
p1 = p4d.ix[:, :2, :, :2]
p2 = p4d.ix[:, :, :, 2:]
p1['L5'] = 'baz'
result = concat([p1, p2], axis=3)
p2['L5'] = np.nan
expected = concat([p1, p2], axis=3)
expected = expected.ix[result.labels]
tm.assert_panel4d_equal(result, expected)
def test_concat_series(self):
ts = tm.makeTimeSeries()
ts.name = 'foo'
pieces = [ts[:5], ts[5:15], ts[15:]]
result = concat(pieces)
tm.assert_series_equal(result, ts)
self.assertEqual(result.name, ts.name)
result = concat(pieces, keys=[0, 1, 2])
expected = ts.copy()
ts.index = DatetimeIndex(np.array(ts.index.values, dtype='M8[ns]'))
exp_labels = [np.repeat([0, 1, 2], [len(x) for x in pieces]),
np.arange(len(ts))]
exp_index = MultiIndex(levels=[[0, 1, 2], ts.index],
labels=exp_labels)
expected.index = exp_index
tm.assert_series_equal(result, expected)
def test_concat_series_axis1(self):
ts = tm.makeTimeSeries()
pieces = [ts[:-2], ts[2:], ts[2:-2]]
result = concat(pieces, axis=1)
expected = DataFrame(pieces).T
assert_frame_equal(result, expected)
result = concat(pieces, keys=['A', 'B', 'C'], axis=1)
expected = DataFrame(pieces, index=['A', 'B', 'C']).T
assert_frame_equal(result, expected)
# preserve series names, #2489
s = Series(randn(5), name='A')
s2 = Series(randn(5), name='B')
result = concat([s, s2], axis=1)
expected = DataFrame({'A': s, 'B': s2})
assert_frame_equal(result, expected)
s2.name = None
result = concat([s, s2], axis=1)
self.assertTrue(np.array_equal(
result.columns, Index(['A', 0], dtype='object')))
# must reindex, #2603
s = Series(randn(3), index=['c', 'a', 'b'], name='A')
s2 = Series(randn(4), index=['d', 'a', 'b', 'c'], name='B')
result = concat([s, s2], axis=1)
expected = DataFrame({'A': s, 'B': s2})
assert_frame_equal(result, expected)
def test_concat_single_with_key(self):
df = DataFrame(np.random.randn(10, 4))
result = concat([df], keys=['foo'])
expected = concat([df, df], keys=['foo', 'bar'])
tm.assert_frame_equal(result, expected[:10])
def test_concat_exclude_none(self):
df = DataFrame(np.random.randn(10, 4))
pieces = [df[:5], None, None, df[5:]]
result = concat(pieces)
tm.assert_frame_equal(result, df)
self.assertRaises(ValueError, concat, [None, None])
def test_concat_datetime64_block(self):
from pandas.tseries.index import date_range
rng = date_range('1/1/2000', periods=10)
df = DataFrame({'time': rng})
result = concat([df, df])
self.assertTrue((result.iloc[:10]['time'] == rng).all())
self.assertTrue((result.iloc[10:]['time'] == rng).all())
def test_concat_timedelta64_block(self):
from pandas import to_timedelta
rng = to_timedelta(np.arange(10), unit='s')
df = DataFrame({'time': rng})
result = concat([df, df])
self.assertTrue((result.iloc[:10]['time'] == rng).all())
self.assertTrue((result.iloc[10:]['time'] == rng).all())
def test_concat_keys_with_none(self):
# #1649
df0 = DataFrame([[10, 20, 30], [10, 20, 30], [10, 20, 30]])
result = concat(dict(a=None, b=df0, c=df0[:2], d=df0[:1], e=df0))
expected = concat(dict(b=df0, c=df0[:2], d=df0[:1], e=df0))
tm.assert_frame_equal(result, expected)
result = concat([None, df0, df0[:2], df0[:1], df0],
keys=['a', 'b', 'c', 'd', 'e'])
expected = concat([df0, df0[:2], df0[:1], df0],
keys=['b', 'c', 'd', 'e'])
tm.assert_frame_equal(result, expected)
def test_concat_bug_1719(self):
ts1 = tm.makeTimeSeries()
ts2 = tm.makeTimeSeries()[::2]
# to join with union
# these two are of different length!
left = concat([ts1, ts2], join='outer', axis=1)
right = concat([ts2, ts1], join='outer', axis=1)
self.assertEqual(len(left), len(right))
def test_concat_bug_2972(self):
ts0 = Series(np.zeros(5))
ts1 = Series(np.ones(5))
ts0.name = ts1.name = 'same name'
result = concat([ts0, ts1], axis=1)
expected = DataFrame({0: ts0, 1: ts1})
expected.columns = ['same name', 'same name']
assert_frame_equal(result, expected)
def test_concat_bug_3602(self):
# GH 3602, duplicate columns
df1 = DataFrame({'firmNo': [0, 0, 0, 0], 'stringvar': [
'rrr', 'rrr', 'rrr', 'rrr'], 'prc': [6, 6, 6, 6]})
df2 = DataFrame({'misc': [1, 2, 3, 4], 'prc': [
6, 6, 6, 6], 'C': [9, 10, 11, 12]})
expected = DataFrame([[0, 6, 'rrr', 9, 1, 6],
[0, 6, 'rrr', 10, 2, 6],
[0, 6, 'rrr', 11, 3, 6],
[0, 6, 'rrr', 12, 4, 6]])
expected.columns = ['firmNo', 'prc', 'stringvar', 'C', 'misc', 'prc']
result = concat([df1, df2], axis=1)
assert_frame_equal(result, expected)
def test_concat_series_axis1_same_names_ignore_index(self):
dates = date_range('01-Jan-2013', '01-Jan-2014', freq='MS')[0:-1]
s1 = Series(randn(len(dates)), index=dates, name='value')
s2 = Series(randn(len(dates)), index=dates, name='value')
result = concat([s1, s2], axis=1, ignore_index=True)
self.assertTrue(np.array_equal(result.columns, [0, 1]))
def test_concat_iterables(self):
from collections import deque, Iterable
# GH8645 check concat works with tuples, list, generators, and weird
# stuff like deque and custom iterables
df1 = DataFrame([1, 2, 3])
df2 = DataFrame([4, 5, 6])
expected = DataFrame([1, 2, 3, 4, 5, 6])
assert_frame_equal(concat((df1, df2), ignore_index=True), expected)
assert_frame_equal(concat([df1, df2], ignore_index=True), expected)
assert_frame_equal(concat((df for df in (df1, df2)),
ignore_index=True), expected)
assert_frame_equal(
concat(deque((df1, df2)), ignore_index=True), expected)
class CustomIterator1(object):
def __len__(self):
return 2
def __getitem__(self, index):
try:
return {0: df1, 1: df2}[index]
except KeyError:
raise IndexError
assert_frame_equal(pd.concat(CustomIterator1(),
ignore_index=True), expected)
class CustomIterator2(Iterable):
def __iter__(self):
yield df1
yield df2
assert_frame_equal(pd.concat(CustomIterator2(),
ignore_index=True), expected)
def test_concat_invalid(self):
# trying to concat a ndframe with a non-ndframe
df1 = mkdf(10, 2)
for obj in [1, dict(), [1, 2], (1, 2)]:
self.assertRaises(TypeError, lambda x: concat([df1, obj]))
def test_concat_invalid_first_argument(self):
df1 = mkdf(10, 2)
df2 = mkdf(10, 2)
self.assertRaises(TypeError, concat, df1, df2)
# generator ok though
concat(DataFrame(np.random.rand(5, 5)) for _ in range(3))
# text reader ok
# GH6583
data = """index,A,B,C,D
foo,2,3,4,5
bar,7,8,9,10
baz,12,13,14,15
qux,12,13,14,15
foo2,12,13,14,15
bar2,12,13,14,15
"""
reader = read_csv(StringIO(data), chunksize=1)
result = concat(reader, ignore_index=True)
expected = read_csv(StringIO(data))
assert_frame_equal(result, expected)
class TestOrderedMerge(tm.TestCase):
def setUp(self):
self.left = DataFrame({'key': ['a', 'c', 'e'],
'lvalue': [1, 2., 3]})
self.right = DataFrame({'key': ['b', 'c', 'd', 'f'],
'rvalue': [1, 2, 3., 4]})
# GH #813
def test_basic(self):
result = ordered_merge(self.left, self.right, on='key')
expected = DataFrame({'key': ['a', 'b', 'c', 'd', 'e', 'f'],
'lvalue': [1, nan, 2, nan, 3, nan],
'rvalue': [nan, 1, 2, 3, nan, 4]})
assert_frame_equal(result, expected)
def test_ffill(self):
result = ordered_merge(
self.left, self.right, on='key', fill_method='ffill')
expected = DataFrame({'key': ['a', 'b', 'c', 'd', 'e', 'f'],
'lvalue': [1., 1, 2, 2, 3, 3.],
'rvalue': [nan, 1, 2, 3, 3, 4]})
assert_frame_equal(result, expected)
def test_multigroup(self):
left = concat([self.left, self.left], ignore_index=True)
# right = concat([self.right, self.right], ignore_index=True)
left['group'] = ['a'] * 3 + ['b'] * 3
# right['group'] = ['a'] * 4 + ['b'] * 4
result = ordered_merge(left, self.right, on='key', left_by='group',
fill_method='ffill')
expected = DataFrame({'key': ['a', 'b', 'c', 'd', 'e', 'f'] * 2,
'lvalue': [1., 1, 2, 2, 3, 3.] * 2,
'rvalue': [nan, 1, 2, 3, 3, 4] * 2})
expected['group'] = ['a'] * 6 + ['b'] * 6
assert_frame_equal(result, expected.ix[:, result.columns])
result2 = ordered_merge(self.right, left, on='key', right_by='group',
fill_method='ffill')
assert_frame_equal(result, result2.ix[:, result.columns])
result = ordered_merge(left, self.right, on='key', left_by='group')
self.assertTrue(result['group'].notnull().all())
def test_merge_type(self):
class NotADataFrame(DataFrame):
@property
def _constructor(self):
return NotADataFrame
nad = NotADataFrame(self.left)
result = nad.merge(self.right, on='key')
tm.assertIsInstance(result, NotADataFrame)
def test_empty_sequence_concat(self):
# GH 9157
empty_pat = "[Nn]o objects"
none_pat = "objects.*None"
test_cases = [
((), empty_pat),
([], empty_pat),
({}, empty_pat),
([None], none_pat),
([None, None], none_pat)
]
for df_seq, pattern in test_cases:
assertRaisesRegexp(ValueError, pattern, pd.concat, df_seq)
pd.concat([pd.DataFrame()])
pd.concat([None, pd.DataFrame()])
pd.concat([pd.DataFrame(), None])
if __name__ == '__main__':
nose.runmodule(argv=[__file__, '-vvs', '-x', '--pdb', '--pdb-failure'],
exit=False)
| gpl-2.0 |
mortonjt/scipy | doc/source/tutorial/stats/plots/kde_plot4.py | 142 | 1457 | from functools import partial
import numpy as np
from scipy import stats
import matplotlib.pyplot as plt
def my_kde_bandwidth(obj, fac=1./5):
"""We use Scott's Rule, multiplied by a constant factor."""
return np.power(obj.n, -1./(obj.d+4)) * fac
loc1, scale1, size1 = (-2, 1, 175)
loc2, scale2, size2 = (2, 0.2, 50)
x2 = np.concatenate([np.random.normal(loc=loc1, scale=scale1, size=size1),
np.random.normal(loc=loc2, scale=scale2, size=size2)])
x_eval = np.linspace(x2.min() - 1, x2.max() + 1, 500)
kde = stats.gaussian_kde(x2)
kde2 = stats.gaussian_kde(x2, bw_method='silverman')
kde3 = stats.gaussian_kde(x2, bw_method=partial(my_kde_bandwidth, fac=0.2))
kde4 = stats.gaussian_kde(x2, bw_method=partial(my_kde_bandwidth, fac=0.5))
pdf = stats.norm.pdf
bimodal_pdf = pdf(x_eval, loc=loc1, scale=scale1) * float(size1) / x2.size + \
pdf(x_eval, loc=loc2, scale=scale2) * float(size2) / x2.size
fig = plt.figure(figsize=(8, 6))
ax = fig.add_subplot(111)
ax.plot(x2, np.zeros(x2.shape), 'b+', ms=12)
ax.plot(x_eval, kde(x_eval), 'k-', label="Scott's Rule")
ax.plot(x_eval, kde2(x_eval), 'b-', label="Silverman's Rule")
ax.plot(x_eval, kde3(x_eval), 'g-', label="Scott * 0.2")
ax.plot(x_eval, kde4(x_eval), 'c-', label="Scott * 0.5")
ax.plot(x_eval, bimodal_pdf, 'r--', label="Actual PDF")
ax.set_xlim([x_eval.min(), x_eval.max()])
ax.legend(loc=2)
ax.set_xlabel('x')
ax.set_ylabel('Density')
plt.show()
| bsd-3-clause |
buntyke/GPy | setup.py | 8 | 4128 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import print_function
import os
import sys
from setuptools import setup, Extension
import numpy as np
def read(fname):
return open(os.path.join(os.path.dirname(__file__), fname)).read()
def read_to_rst(fname):
try:
import pypandoc
#print 'Warning in installation: For rst formatting in pypi, consider installing pypandoc for conversion'
with open('README.rst', 'w') as f:
f.write(pypandoc.convert('README.md', 'rst'))
except:
return read(fname)
version_dummy = {}
exec(read('GPy/__version__.py'), version_dummy)
__version__ = version_dummy['__version__']
del version_dummy
#Mac OS X Clang doesn't support OpenMP at the current time.
#This detects if we are building on a Mac
def ismac():
return sys.platform[:6] == 'darwin'
if ismac():
compile_flags = [ '-O3', ]
link_args = []
else:
compile_flags = [ '-fopenmp', '-O3', ]
link_args = ['-lgomp']
ext_mods = [Extension(name='GPy.kern._src.stationary_cython',
sources=['GPy/kern/_src/stationary_cython.c','GPy/kern/_src/stationary_utils.c'],
include_dirs=[np.get_include()],
extra_compile_args=compile_flags,
extra_link_args = link_args),
Extension(name='GPy.util.choleskies_cython',
sources=['GPy/util/choleskies_cython.c'],
include_dirs=[np.get_include()],
extra_link_args = link_args,
extra_compile_args=compile_flags),
Extension(name='GPy.util.linalg_cython',
sources=['GPy/util/linalg_cython.c'],
include_dirs=[np.get_include()],
extra_compile_args=compile_flags),
Extension(name='GPy.kern._src.coregionalize_cython',
sources=['GPy/kern/_src/coregionalize_cython.c'],
include_dirs=[np.get_include()],
extra_compile_args=compile_flags)]
setup(name = 'GPy',
version = __version__,
author = read('AUTHORS.txt'),
author_email = "gpy.authors@gmail.com",
description = ("The Gaussian Process Toolbox"),
license = "BSD 3-clause",
keywords = "machine-learning gaussian-processes kernels",
url = "http://sheffieldml.github.com/GPy/",
ext_modules = ext_mods,
packages = ["GPy.models",
"GPy.inference.optimization",
"GPy.inference.mcmc",
"GPy.inference",
"GPy.inference.latent_function_inference",
"GPy.likelihoods", "GPy.mappings",
"GPy.examples", "GPy.core.parameterization",
"GPy.core", "GPy.testing",
"GPy", "GPy.util", "GPy.kern",
"GPy.kern._src.psi_comp", "GPy.kern._src",
"GPy.plotting.matplot_dep.latent_space_visualizations.controllers",
"GPy.plotting.matplot_dep.latent_space_visualizations",
"GPy.plotting.matplot_dep", "GPy.plotting"],
package_dir={'GPy': 'GPy'},
package_data = {'GPy': ['defaults.cfg', 'installation.cfg',
'util/data_resources.json',
'util/football_teams.json',
]},
include_package_data = True,
py_modules = ['GPy.__init__'],
test_suite = 'GPy.testing',
long_description=read_to_rst('README.md'),
install_requires=['numpy>=1.7', 'scipy>=0.16', 'six'],
extras_require = {'docs':['matplotlib >=1.3','Sphinx','IPython']},
classifiers=['License :: OSI Approved :: BSD License',
'Natural Language :: English',
'Operating System :: MacOS :: MacOS X',
'Operating System :: Microsoft :: Windows',
'Operating System :: POSIX :: Linux',
'Programming Language :: Python :: 2.7',
'Topic :: Scientific/Engineering :: Artificial Intelligence']
)
| mit |
huaxz1986/git_book | chapters/PreProcessing/standardize.py | 1 | 1797 | # -*- coding: utf-8 -*-
"""
数据预处理
~~~~~~~~~~~~~~~~
数据标准化
:copyright: (c) 2016 by the huaxz1986.
:license: lgpl-3.0, see LICENSE for more details.
"""
from sklearn.preprocessing import MinMaxScaler,MaxAbsScaler,StandardScaler
def test_MinMaxScaler():
'''
测试 MinMaxScaler 的用法
:return: None
'''
X=[ [1,5,1,2,10],
[2,6,3,2,7],
[3,7,5,6,4,],
[4,8,7,8,1] ]
print("before transform:",X)
scaler=MinMaxScaler(feature_range=(0,2))
scaler.fit(X)
print("min_ is :",scaler.min_)
print("scale_ is :",scaler.scale_)
print("data_max_ is :",scaler.data_max_)
print("data_min_ is :",scaler.data_min_)
print("data_range_ is :",scaler.data_range_)
print("after transform:",scaler.transform(X))
def test_MaxAbsScaler():
'''
测试 MaxAbsScaler 的用法
:return: None
'''
X=[ [1,5,1,2,10],
[2,6,3,2,7],
[3,7,5,6,4,],
[4,8,7,8,1] ]
print("before transform:",X)
scaler=MaxAbsScaler()
scaler.fit(X)
print("scale_ is :",scaler.scale_)
print("max_abs_ is :",scaler.max_abs_)
print("after transform:",scaler.transform(X))
def test_StandardScaler():
'''
测试 StandardScaler 的用法
:return: None
'''
X=[ [1,5,1,2,10],
[2,6,3,2,7],
[3,7,5,6,4,],
[4,8,7,8,1] ]
print("before transform:",X)
scaler=StandardScaler()
scaler.fit(X)
print("scale_ is :",scaler.scale_)
print("mean_ is :",scaler.mean_)
print("var_ is :",scaler.var_)
print("after transform:",scaler.transform(X))
if __name__=='__main__':
test_MinMaxScaler() # 调用 test_MinMaxScaler
# test_MaxAbsScaler() # 调用 test_MaxAbsScaler
# test_MaxAbsScaler() # 调用 test_MaxAbsScaler | gpl-3.0 |
QuinnSong/JPG-Tools | src/word_cloud.py | 1 | 4013 | # -*- coding: cp936 -*-
from wordcloud import (WordCloud, get_single_color_func, STOPWORDS)
#import imageio
import jieba
import io
from PIL import Image
import numpy
from random import Random
import colorsys
def get_single_color_func((color, wc_enable_random_color)):
old_r, old_g, old_b, old_a = color
enable_random_color = wc_enable_random_color
rgb_max = 255
h, s, v = colorsys.rgb_to_hsv(old_r / rgb_max,
old_g / rgb_max,
old_b / rgb_max)
def single_color_func(word=None, font_size=None, position=None, orientation=None, font_path=None, random_state=None):
""" Random color generation
random_state : random.Random object or None, (default=None)
If a random object is given, this is used for generating random
numbers.
"""
if random_state is None:
random_state = Random()
#h = 0
#s = 100
#l = int(50 * (float(random_state.randint(1, 255))/100.0))
if enable_random_color:
r, g, b = colorsys.hsv_to_rgb(h, s, random_state.uniform(0.2, 1))
else:
r, g, b = colorsys.hsv_to_rgb(h, s, v)
#return 'hsl({},{}%, {}%)'.format(int(h), int(s), l)
return 'rgb({:.0f}, {:.0f}, {:.0f})'.format(r * rgb_max,
g * rgb_max,
b * rgb_max)
return single_color_func
def generate_word_cloud(words, font_path, font_color, bg_color, custom_stopwords, mask, allow_numbers = True, scale = 4, repeat = False):
# use imageio to generate mask [optional]
# mask = imageio.imread(image_file)
# mask = numpy.array(Image.open(image_file).convert('RGBA'))
sw = set(STOPWORDS)
[ sw.add(i) for i in custom_stopwords ]
word_cloud = WordCloud(
#width = 400,
#height = 200,
# Larger canvases takes longer. If a large word cloud, try
# a lower canvas size and set the scale parameter.
font_path= font_path, # example: 'c:/windows/Fonts/simhei.ttf'
max_words = 2000,
max_font_size = 100,
color_func = get_single_color_func(font_color) if font_color else None, # can userandom_red_color_func [default is None]
background_color = bg_color, #'example: rgba(191,191,191,255)', 'white'[default is None]
stopwords = sw,
mask = mask,
repeat = repeat,
include_numbers = allow_numbers,
mode = 'RGBA',
scale = scale).generate(words)
im = word_cloud.to_image()
#im.show()
#im.save(saved_name)
#word_cloud.to_file(saved_name)
return im
if __name__ == '__main__':
words = 'Paris, France, Houston, Italy, America, Roma, \
Austin, Seattle, Miami, London, Boston, Beijing, Shanghai, Macau, \
Moscow, Venice, Germany, Australia, Netherlands, Detroit'
words = u'±±¾©, Î÷°², ÉϺ£, ¹ãÖÝ, ´óÁ¬, ÖØÇì, Ìì½ò, ¼ÃÄÏ, ³¤É³, Ö£ÖÝ, ÌÆÉ½, ÎÞÎý, ÕżҿÚ, \
Çൺ, ±£¶¨, Ì«Ô, µ¤¶«, ¼ªÁÖ, ¹þ¶û±õ, ÄϾ©, º¼ÖÝ, ºÏ·Ê, Î人, º£¿Ú, ³É¶¼, À¥Ã÷, Î÷Äþ'
words = ' '.join(jieba.cut(words))
#generate_word_cloud(words, 'heart.png', 'word_cloud.png')
mask = numpy.array(Image.open('alice_mask.png').convert('RGBA'))
generate_word_cloud(words, 'c:/windows/Fonts/simhei.ttf', None, None, [], mask)#, 'word_cloud.png')
#generate_word_cloud(words, 'alice_mask.png', 'word_cloud.png')
import matplotlib.font_manager as fontman
font_list = fontman.findSystemFonts()
with open(u'wiki.txt') as words_file:
content = words_file.read()
import re
re.findall(r"\w[\w']+", content) | gpl-3.0 |
adamgreenhall/scikit-learn | examples/neighbors/plot_approximate_nearest_neighbors_hyperparameters.py | 227 | 5170 | """
=================================================
Hyper-parameters of Approximate Nearest Neighbors
=================================================
This example demonstrates the behaviour of the
accuracy of the nearest neighbor queries of Locality Sensitive Hashing
Forest as the number of candidates and the number of estimators (trees)
vary.
In the first plot, accuracy is measured with the number of candidates. Here,
the term "number of candidates" refers to maximum bound for the number of
distinct points retrieved from each tree to calculate the distances. Nearest
neighbors are selected from this pool of candidates. Number of estimators is
maintained at three fixed levels (1, 5, 10).
In the second plot, the number of candidates is fixed at 50. Number of trees
is varied and the accuracy is plotted against those values. To measure the
accuracy, the true nearest neighbors are required, therefore
:class:`sklearn.neighbors.NearestNeighbors` is used to compute the exact
neighbors.
"""
from __future__ import division
print(__doc__)
# Author: Maheshakya Wijewardena <maheshakya.10@cse.mrt.ac.lk>
#
# License: BSD 3 clause
###############################################################################
import numpy as np
from sklearn.datasets.samples_generator import make_blobs
from sklearn.neighbors import LSHForest
from sklearn.neighbors import NearestNeighbors
import matplotlib.pyplot as plt
# Initialize size of the database, iterations and required neighbors.
n_samples = 10000
n_features = 100
n_queries = 30
rng = np.random.RandomState(42)
# Generate sample data
X, _ = make_blobs(n_samples=n_samples + n_queries,
n_features=n_features, centers=10,
random_state=0)
X_index = X[:n_samples]
X_query = X[n_samples:]
# Get exact neighbors
nbrs = NearestNeighbors(n_neighbors=1, algorithm='brute',
metric='cosine').fit(X_index)
neighbors_exact = nbrs.kneighbors(X_query, return_distance=False)
# Set `n_candidate` values
n_candidates_values = np.linspace(10, 500, 5).astype(np.int)
n_estimators_for_candidate_value = [1, 5, 10]
n_iter = 10
stds_accuracies = np.zeros((len(n_estimators_for_candidate_value),
n_candidates_values.shape[0]),
dtype=float)
accuracies_c = np.zeros((len(n_estimators_for_candidate_value),
n_candidates_values.shape[0]), dtype=float)
# LSH Forest is a stochastic index: perform several iteration to estimate
# expected accuracy and standard deviation displayed as error bars in
# the plots
for j, value in enumerate(n_estimators_for_candidate_value):
for i, n_candidates in enumerate(n_candidates_values):
accuracy_c = []
for seed in range(n_iter):
lshf = LSHForest(n_estimators=value,
n_candidates=n_candidates, n_neighbors=1,
random_state=seed)
# Build the LSH Forest index
lshf.fit(X_index)
# Get neighbors
neighbors_approx = lshf.kneighbors(X_query,
return_distance=False)
accuracy_c.append(np.sum(np.equal(neighbors_approx,
neighbors_exact)) /
n_queries)
stds_accuracies[j, i] = np.std(accuracy_c)
accuracies_c[j, i] = np.mean(accuracy_c)
# Set `n_estimators` values
n_estimators_values = [1, 5, 10, 20, 30, 40, 50]
accuracies_trees = np.zeros(len(n_estimators_values), dtype=float)
# Calculate average accuracy for each value of `n_estimators`
for i, n_estimators in enumerate(n_estimators_values):
lshf = LSHForest(n_estimators=n_estimators, n_neighbors=1)
# Build the LSH Forest index
lshf.fit(X_index)
# Get neighbors
neighbors_approx = lshf.kneighbors(X_query, return_distance=False)
accuracies_trees[i] = np.sum(np.equal(neighbors_approx,
neighbors_exact))/n_queries
###############################################################################
# Plot the accuracy variation with `n_candidates`
plt.figure()
colors = ['c', 'm', 'y']
for i, n_estimators in enumerate(n_estimators_for_candidate_value):
label = 'n_estimators = %d ' % n_estimators
plt.plot(n_candidates_values, accuracies_c[i, :],
'o-', c=colors[i], label=label)
plt.errorbar(n_candidates_values, accuracies_c[i, :],
stds_accuracies[i, :], c=colors[i])
plt.legend(loc='upper left', fontsize='small')
plt.ylim([0, 1.2])
plt.xlim(min(n_candidates_values), max(n_candidates_values))
plt.ylabel("Accuracy")
plt.xlabel("n_candidates")
plt.grid(which='both')
plt.title("Accuracy variation with n_candidates")
# Plot the accuracy variation with `n_estimators`
plt.figure()
plt.scatter(n_estimators_values, accuracies_trees, c='k')
plt.plot(n_estimators_values, accuracies_trees, c='g')
plt.ylim([0, 1.2])
plt.xlim(min(n_estimators_values), max(n_estimators_values))
plt.ylabel("Accuracy")
plt.xlabel("n_estimators")
plt.grid(which='both')
plt.title("Accuracy variation with n_estimators")
plt.show()
| bsd-3-clause |
mmottahedi/neuralnilm_prototype | scripts/e353.py | 2 | 6431 | from __future__ import print_function, division
import matplotlib
import logging
from sys import stdout
matplotlib.use('Agg') # Must be before importing matplotlib.pyplot or pylab!
from neuralnilm import (Net, RealApplianceSource,
BLSTMLayer, DimshuffleLayer,
BidirectionalRecurrentLayer)
from neuralnilm.source import standardise, discretize, fdiff, power_and_fdiff
from neuralnilm.experiment import run_experiment, init_experiment
from neuralnilm.net import TrainingError
from neuralnilm.layers import MixtureDensityLayer
from neuralnilm.objectives import (scaled_cost, mdn_nll,
scaled_cost_ignore_inactive, ignore_inactive,
scaled_cost3)
from neuralnilm.plot import MDNPlotter, CentralOutputPlotter
from lasagne.nonlinearities import sigmoid, rectify, tanh
from lasagne.objectives import mse, binary_crossentropy
from lasagne.init import Uniform, Normal
from lasagne.layers import (LSTMLayer, DenseLayer, Conv1DLayer,
ReshapeLayer, FeaturePoolLayer, RecurrentLayer)
from lasagne.updates import nesterov_momentum, momentum
from functools import partial
import os
import __main__
from copy import deepcopy
from math import sqrt
import numpy as np
import theano.tensor as T
NAME = os.path.splitext(os.path.split(__main__.__file__)[1])[0]
PATH = "/homes/dk3810/workspace/python/neuralnilm/figures"
SAVE_PLOT_INTERVAL = 5000
GRADIENT_STEPS = 100
source_dict = dict(
filename='/data/dk3810/ukdale.h5',
appliances=[
['fridge freezer', 'fridge', 'freezer'],
'hair straighteners',
'television',
'dish washer',
['washer dryer', 'washing machine']
],
max_appliance_powers=[300, 500, 200, 2500, 2400],
on_power_thresholds=[5] * 5,
max_input_power=5900,
min_on_durations=[60, 60, 60, 1800, 1800],
min_off_durations=[12, 12, 12, 1800, 600],
window=("2013-06-01", "2014-07-01"),
seq_length=512,
output_one_appliance=False,
boolean_targets=False,
train_buildings=[1],
validation_buildings=[1],
skip_probability=0.7,
one_target_per_seq=False,
n_seq_per_batch=16,
# subsample_target=2,
include_diff=False,
clip_appliance_power=True,
target_is_prediction=False,
# independently_center_inputs = True,
standardise_input=True,
unit_variance_targets=True,
# input_padding=8,
lag=0,
classification=True
# reshape_target_to_2D=True
# input_stats={'mean': np.array([ 0.05526326], dtype=np.float32),
# 'std': np.array([ 0.12636775], dtype=np.float32)},
# target_stats={
# 'mean': np.array([ 0.04066789, 0.01881946,
# 0.24639061, 0.17608672, 0.10273963],
# dtype=np.float32),
# 'std': np.array([ 0.11449792, 0.07338708,
# 0.26608968, 0.33463112, 0.21250485],
# dtype=np.float32)}
)
N = 50
net_dict = dict(
save_plot_interval=SAVE_PLOT_INTERVAL,
# loss_function=partial(ignore_inactive, loss_func=mdn_nll, seq_length=SEQ_LENGTH),
# loss_function=lambda x, t: mdn_nll(x, t).mean(),
# loss_function=lambda x, t: mse(x, t).mean(),
loss_function=lambda x, t: binary_crossentropy(x, t).mean(),
# loss_function=partial(scaled_cost, loss_func=mse),
# loss_function=ignore_inactive,
# loss_function=partial(scaled_cost3, ignore_inactive=False),
updates_func=momentum,
learning_rate=1e-4,
learning_rate_changes_by_iteration={
# 200: 1e-2,
# 400: 1e-3,
# 800: 1e-4
# 500: 1e-3
# 4000: 1e-03,
# 6000: 5e-06,
# 7000: 1e-06
# 2000: 5e-06
# 3000: 1e-05
# 7000: 5e-06,
# 10000: 1e-06,
# 15000: 5e-07,
# 50000: 1e-07
},
do_save_activations=True,
auto_reshape=False,
plotter=CentralOutputPlotter
# plotter=MDNPlotter
)
"""
||||||||||
||||||||||
||||||||||
||||||||||
||||||||||
||||||||||
12345678901234567890
"""
def exp_a(name):
global source
# source_dict_copy = deepcopy(source_dict)
# source = RealApplianceSource(**source_dict_copy)
net_dict_copy = deepcopy(net_dict)
net_dict_copy.update(dict(
experiment_name=name,
source=source
))
N = 512
output_shape = source.output_shape_after_processing()
net_dict_copy['layers_config'] = [
{
'type': DimshuffleLayer,
'pattern': (0, 2, 1) # (batch, features, time)
},
{
'type': Conv1DLayer, # convolve over the time axis
'num_filters': 32,
'filter_length': 4,
'stride': 1,
'nonlinearity': rectify,
'border_mode': 'same'
},
{
'type': FeaturePoolLayer,
'ds': 4, # number of feature maps to be pooled together
'axis': 2, # pool over the time axis
'pool_function': T.max
},
{
'type': DimshuffleLayer,
'pattern': (0, 2, 1) # back to (batch, time, features)
},
{
'type': DenseLayer,
'num_units': N,
'nonlinearity': rectify
},
{
'type': DenseLayer,
'num_units': N // 2,
'nonlinearity': rectify
},
{
'type': DenseLayer,
'num_units': N // 4,
'nonlinearity': rectify
},
{
'type': DenseLayer,
'num_units': output_shape[1] * output_shape[2],
'nonlinearity': sigmoid
}
]
net = Net(**net_dict_copy)
return net
def main():
# EXPERIMENTS = list('abcdefghijklmnopqrstuvwxyz')
EXPERIMENTS = list('a')
for experiment in EXPERIMENTS:
full_exp_name = NAME + experiment
func_call = init_experiment(PATH, experiment, full_exp_name)
logger = logging.getLogger(full_exp_name)
try:
net = eval(func_call)
run_experiment(net, epochs=None)
except KeyboardInterrupt:
logger.info("KeyboardInterrupt")
break
except Exception as exception:
logger.exception("Exception")
raise
finally:
logging.shutdown()
if __name__ == "__main__":
main()
| mit |
untom/scikit-learn | examples/linear_model/plot_ols_ridge_variance.py | 387 | 2060 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=========================================================
Ordinary Least Squares and Ridge Regression Variance
=========================================================
Due to the few points in each dimension and the straight
line that linear regression uses to follow these points
as well as it can, noise on the observations will cause
great variance as shown in the first plot. Every line's slope
can vary quite a bit for each prediction due to the noise
induced in the observations.
Ridge regression is basically minimizing a penalised version
of the least-squared function. The penalising `shrinks` the
value of the regression coefficients.
Despite the few data points in each dimension, the slope
of the prediction is much more stable and the variance
in the line itself is greatly reduced, in comparison to that
of the standard linear regression
"""
print(__doc__)
# Code source: Gaël Varoquaux
# Modified for documentation by Jaques Grobler
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn import linear_model
X_train = np.c_[.5, 1].T
y_train = [.5, 1]
X_test = np.c_[0, 2].T
np.random.seed(0)
classifiers = dict(ols=linear_model.LinearRegression(),
ridge=linear_model.Ridge(alpha=.1))
fignum = 1
for name, clf in classifiers.items():
fig = plt.figure(fignum, figsize=(4, 3))
plt.clf()
plt.title(name)
ax = plt.axes([.12, .12, .8, .8])
for _ in range(6):
this_X = .1 * np.random.normal(size=(2, 1)) + X_train
clf.fit(this_X, y_train)
ax.plot(X_test, clf.predict(X_test), color='.5')
ax.scatter(this_X, y_train, s=3, c='.5', marker='o', zorder=10)
clf.fit(X_train, y_train)
ax.plot(X_test, clf.predict(X_test), linewidth=2, color='blue')
ax.scatter(X_train, y_train, s=30, c='r', marker='+', zorder=10)
ax.set_xticks(())
ax.set_yticks(())
ax.set_ylim((0, 1.6))
ax.set_xlabel('X')
ax.set_ylabel('y')
ax.set_xlim(0, 2)
fignum += 1
plt.show()
| bsd-3-clause |
stefanodoni/mtperf | parsers/PCMParser.py | 2 | 2732 | import pandas as pd
from parsers.Parser import Parser
class PCMParser (Parser):
columns = ['SysDate', 'SysTime', 'SysEXEC', 'SysIPC', 'SysFREQ',
'SysAFREQ', 'SysL3MISS', 'SysL2MISS', 'SysL3HIT', 'SysL2HIT',
'SysL3MPI', 'SysL2MPI', 'SysREAD', 'SysWRITE', 'SysINST',
'SysACYC', 'SysTIMEticks', 'SysPhysIPC', 'SysPhysIPCPerc', 'SysINSTnom', 'SysINSTnomPerc',
'SysCoreCStatesC0res', 'SysCoreCStatesC1res', 'SysCoreCStatesC3res', 'SysCoreCStatesC6res', 'SysCoreCStatesC7res',
'SysPackCStatesC2res', 'SysPackCStatesC3res', 'SysPackCStatesC6res', 'SysPackCStatesC7res', 'SysPackCStatesProcEnergy',
'SKT0EXEC', 'SKT0IPC', 'SKT0FREQ', 'SKT0AFREQ', 'SKT0L3MISS',
'SKT0L2MISS', 'SKT0L3HIT', 'SKT0L2HIT', 'SKT0L3MPI', 'SKT0L2MPI',
'SKT0READ','SKT0WRITE', 'SKT0TEMP',
'SKT0CoreCStateC0res', 'SKT0CoreCStateC1res', 'SKT0CoreCStateC3res', 'SKT0CoreCStateC6res', 'SKT0CoreCStateC7res',
'SKT0PackCStateC2res', 'SKT0PackCStateC3res', 'SKT0PackCStateC6res', 'SKT0PackCStateC7res',
'ProcEnergySKT0',
'Core0Sock0EXEC', 'Core0Sock0IPC', 'Core0Sock0FREQ', 'Core0Sock0AFREQ', 'Core0Sock0L3MISS',
'Core0Sock0L2MISS', 'Core0Sock0L3HIT', 'Core0Sock0L2HIT', 'Core0Sock0L3MPI', 'Core0Sock0L2MPI',
'Core0Sock0C0res', 'Core0Sock0C1res', 'Core0Sock0C3res', 'Core0Sock0C6res', 'Core0Sock0C7res', 'Core0Sock0TEMP',
'Core1Sock0EXEC', 'Core1Sock0IPC', 'Core1Sock0FREQ', 'Core1Sock0AFREQ', 'Core1Sock0L3MISS',
'Core1Sock0L2MISS', 'Core1Sock0L3HIT', 'Core1Sock0L2HIT', 'Core1Sock0L3MPI', 'Core1Sock0L2MPI',
'Core1Sock0C0res', 'Core1Sock0C1res', 'Core1Sock0C3res', 'Core1Sock0C6res', 'Core1Sock0C7res', 'Core1Sock0TEMP',
'Core2Sock0EXEC', 'Core2Sock0IPC', 'Core2Sock0FREQ', 'Core2Sock0AFREQ', 'Core2Sock0L3MISS',
'Core2Sock0L2MISS', 'Core2Sock0L3HIT', 'Core2Sock0L2HIT', 'Core2Sock0L3MPI', 'Core2Sock0L2MPI',
'Core2Sock0C0res', 'Core2Sock0C1res', 'Core2Sock0C3res', 'Core2Sock0C6res','Core2Sock0C7res', 'Core2Sock0TEMP',
'Core3Sock0EXEC', 'Core3Sock0IPC', 'Core3Sock0FREQ', 'Core3Sock0AFREQ', 'Core3Sock0L3MISS',
'Core3Sock0L2MISS', 'Core3Sock0L3HIT', 'Core3Sock0L2HIT', 'Core3Sock0L3MPI', 'Core3Sock0L2MPI',
'Core3Sock0C0res', 'Core3Sock0C1res', 'Core3Sock0C3res', 'Core3Sock0C6res', 'Core3Sock0C7res', 'Core3Sock0TEMP']
def parse(self, file):
csvfile = open(file, 'rb')
dataframe = pd.read_csv(csvfile, sep=';', header=None, skiprows=2,
names=self.columns, decimal='.', index_col=False,
parse_dates={Parser.TIMESTAMP_STR: [0, 1]},
date_parser = lambda x: pd.datetime.strptime(x, '%Y-%m-%d %H:%M:%S.%f'))
csvfile.close()
return dataframe
| gpl-2.0 |
CA-Lab/moral-exchange | Data/hebb/3_testing_hebb.py | 1 | 4869 | import argparse
import matplotlib
#matplotlib.use('TkAgg')
matplotlib.use('svg')
import matplotlib.pyplot as plt
import pylab as pl
import random as rd
import scipy as sp
import networkx as nx
import numpy as np
import math as mt
import pprint as ppt
parser = argparse.ArgumentParser(description='Hebbian network simulation')
parser.add_argument('--runid', required=True )
args = parser.parse_args()
file_num = 0
time_list = []
energy_state_g = []
energy_state_o = []
perturbation_period = 1000
pert_accu = 0
time = 0
T = 0
m = []
T_list = [0, ]
U_plot = [0, ]
g = nx.complete_graph(120)
o = nx.complete_graph(120)
#GU = open('gu_%d.txt' %file_num, 'w')
def init_full():
global g, o, file_num
#file_num += 1
#print file_num
randomize_states(g)
for i,j in g.edges():
g.edge[i][j]['weight'] = 0
o = g.copy()
for i,j in o.edges():
if rd.random() < 0.07:
o.edge[i][j]['weight'] = rd.choice([1,-1])
nx.write_weighted_edgelist(g, 'run_%s_g_edgelist_%d.csv' % (args.runid, file_num))
nx.write_weighted_edgelist(o, 'run_%s_o_edgelist_%d.csv' % (args.runid, file_num))
def init_erdos():
global g, o
g = nx.erdos_renyi_graph(120, 1)
randomize_states(g)
for i,j in g.edges():
g.edge[i][j]['weight'] = 0
o = g.copy()
for i,j in o.edges():
if rd.random() < 0.07:
o.edge[i][j]['weight'] = rd.choice([-1,1])
def init_small_world():
g = nx.watts_strogatz_graph(120, 8, 0.5)
randomize_states(g)
for i,j in g.edges():
g.edge[i][j]['weight'] = 0
o = g.copy()
for i,j in o.edges():
if rd.random() < 0.07:
o.edge[i][j]['weight'] = rd.choice([-1,1])
def draw():
pl.cla()
nx.draw(g, pos = positions,
node_color = [g.node[i]['s'] for i in g.nodes_iter()],
with_labels = True, edge_color = 'c',
#width = [g.edge[i][j]['weight'] for (i,j) in g.edges_iter()],
cmap = pl.cm.autumn, vmin = 0, vmax = 1)
pl.axis('image')
pl.title('t = ' + str(time))
plt.show()
def randomize_states( o ):
for i in o.nodes():
o.node[i]['s'] = rd.choice([1,-1])
def local_uo(i, o):
u_o = 0
for j in o.neighbors(i):
u_o += o.edge[i][j]['weight'] * o.node[i]['s'] * o.node[j]['s']
return u_o
def global_uo(o):
U = 0
for i in o.nodes():
U += local_uo( i, o )
return U
def node_state(i):
global g, o
m_1 = 0
for j in o.neighbors(i):
m_1 += (o.edge[i][j]['weight'] + g.edge[i][j]['weight']) * -1 * o.node[j]['s']
m_2 = 0
for j in o.neighbors(i):
m_2 += (o.edge[i][j]['weight'] + g.edge[i][j]['weight']) * 1 * o.node[j]['s']
if m_1 > m_2:
o.node[i]['s'] = -1
else:
o.node[i]['s'] = 1
def step():
global time, o, g, T, perturbation_period, pert_accu, file_num
time +=1
if pert_accu == perturbation_period:
if T > 600 and T < 3000:
learning()
pert_accu = 0
T += 1
T_list.append( T )
U_plot.append( global_uo(o) )
randomize_states(o)
else:
pert_accu += 1
i = rd.choice(o.nodes())
node_state(i)
#if time == 3599998:
#if time == 3599:
#nx.write_weighted_edgelist(g, 'g_edgelist_end_%d.csv' %file_num)
#nx.write_weighted_edgelist(o, 'o_edgelist_end_%d.csv' %file_num)
def learning():
global g, o
r = 0.005
for i in o.nodes():
m_1 = 0
for j in o.neighbors(i):
m_1 += (g.edge[i][j]['weight'] + o.edge[i][j]['weight'] + r) * o.node[i]['s'] * o.node[j]['s']
m_2 = 0
for j in o.neighbors(i):
m_2 += (g.edge[i][j]['weight'] + o.edge[i][j]['weight'] -r) * o.node[i]['s'] * o.node[j]['s']
if m_1 > m_2:
g.edge[i][j]['weight'] += r
else:
g.edge[i][j]['weight'] -= r
def no_draw():
global time, file_num
print time
def data():
global time, o, g, file_num
nx.write_weighted_edgelist(g, 'run_%s_g_edgelist_end_%d.csv' % (args.runid, file_num))
nx.write_weighted_edgelist(o, 'run_%s_o_edgelist_end_%d.csv' % (args.runid, file_num))
GU = open('run_%s_gu_%d.txt' % (args.runid, file_num), 'w')
gu = global_uo(o)
GU.write(str(gu))
GU.close()
init_full()
for n in xrange(perturbation_period * 3600):
# no_draw()
step()
data()
plt.cla()
#plt.plot(time_list, energy_state_g, 'b+')
#plt.plot(time_list, energy_state_o, 'r-')
plt.scatter( T_list, U_plot, c=u'r', marker=u'D' )
plt.xlabel('Time')
plt.ylabel('Global Utility')
plt.savefig('run_%s_learning_plot_full.svg' % args.runid)
#plt.savefig('learning_plot_small.svg')
#plt.savefig('learning_plot_erdos.svg')
| gpl-3.0 |
pv/scikit-learn | sklearn/linear_model/least_angle.py | 57 | 49338 | """
Least Angle Regression algorithm. See the documentation on the
Generalized Linear Model for a complete discussion.
"""
from __future__ import print_function
# Author: Fabian Pedregosa <fabian.pedregosa@inria.fr>
# Alexandre Gramfort <alexandre.gramfort@inria.fr>
# Gael Varoquaux
#
# License: BSD 3 clause
from math import log
import sys
import warnings
from distutils.version import LooseVersion
import numpy as np
from scipy import linalg, interpolate
from scipy.linalg.lapack import get_lapack_funcs
from .base import LinearModel
from ..base import RegressorMixin
from ..utils import arrayfuncs, as_float_array, check_X_y
from ..cross_validation import check_cv
from ..utils import ConvergenceWarning
from ..externals.joblib import Parallel, delayed
from ..externals.six.moves import xrange
import scipy
solve_triangular_args = {}
if LooseVersion(scipy.__version__) >= LooseVersion('0.12'):
solve_triangular_args = {'check_finite': False}
def lars_path(X, y, Xy=None, Gram=None, max_iter=500,
alpha_min=0, method='lar', copy_X=True,
eps=np.finfo(np.float).eps,
copy_Gram=True, verbose=0, return_path=True,
return_n_iter=False):
"""Compute Least Angle Regression or Lasso path using LARS algorithm [1]
The optimization objective for the case method='lasso' is::
(1 / (2 * n_samples)) * ||y - Xw||^2_2 + alpha * ||w||_1
in the case of method='lars', the objective function is only known in
the form of an implicit equation (see discussion in [1])
Read more in the :ref:`User Guide <least_angle_regression>`.
Parameters
-----------
X : array, shape: (n_samples, n_features)
Input data.
y : array, shape: (n_samples)
Input targets.
max_iter : integer, optional (default=500)
Maximum number of iterations to perform, set to infinity for no limit.
Gram : None, 'auto', array, shape: (n_features, n_features), optional
Precomputed Gram matrix (X' * X), if ``'auto'``, the Gram
matrix is precomputed from the given X, if there are more samples
than features.
alpha_min : float, optional (default=0)
Minimum correlation along the path. It corresponds to the
regularization parameter alpha parameter in the Lasso.
method : {'lar', 'lasso'}, optional (default='lar')
Specifies the returned model. Select ``'lar'`` for Least Angle
Regression, ``'lasso'`` for the Lasso.
eps : float, optional (default=``np.finfo(np.float).eps``)
The machine-precision regularization in the computation of the
Cholesky diagonal factors. Increase this for very ill-conditioned
systems.
copy_X : bool, optional (default=True)
If ``False``, ``X`` is overwritten.
copy_Gram : bool, optional (default=True)
If ``False``, ``Gram`` is overwritten.
verbose : int (default=0)
Controls output verbosity.
return_path : bool, optional (default=True)
If ``return_path==True`` returns the entire path, else returns only the
last point of the path.
return_n_iter : bool, optional (default=False)
Whether to return the number of iterations.
Returns
--------
alphas : array, shape: [n_alphas + 1]
Maximum of covariances (in absolute value) at each iteration.
``n_alphas`` is either ``max_iter``, ``n_features`` or the
number of nodes in the path with ``alpha >= alpha_min``, whichever
is smaller.
active : array, shape [n_alphas]
Indices of active variables at the end of the path.
coefs : array, shape (n_features, n_alphas + 1)
Coefficients along the path
n_iter : int
Number of iterations run. Returned only if return_n_iter is set
to True.
See also
--------
lasso_path
LassoLars
Lars
LassoLarsCV
LarsCV
sklearn.decomposition.sparse_encode
References
----------
.. [1] "Least Angle Regression", Effron et al.
http://www-stat.stanford.edu/~tibs/ftp/lars.pdf
.. [2] `Wikipedia entry on the Least-angle regression
<http://en.wikipedia.org/wiki/Least-angle_regression>`_
.. [3] `Wikipedia entry on the Lasso
<http://en.wikipedia.org/wiki/Lasso_(statistics)#Lasso_method>`_
"""
n_features = X.shape[1]
n_samples = y.size
max_features = min(max_iter, n_features)
if return_path:
coefs = np.zeros((max_features + 1, n_features))
alphas = np.zeros(max_features + 1)
else:
coef, prev_coef = np.zeros(n_features), np.zeros(n_features)
alpha, prev_alpha = np.array([0.]), np.array([0.]) # better ideas?
n_iter, n_active = 0, 0
active, indices = list(), np.arange(n_features)
# holds the sign of covariance
sign_active = np.empty(max_features, dtype=np.int8)
drop = False
# will hold the cholesky factorization. Only lower part is
# referenced.
# We are initializing this to "zeros" and not empty, because
# it is passed to scipy linalg functions and thus if it has NaNs,
# even if they are in the upper part that it not used, we
# get errors raised.
# Once we support only scipy > 0.12 we can use check_finite=False and
# go back to "empty"
L = np.zeros((max_features, max_features), dtype=X.dtype)
swap, nrm2 = linalg.get_blas_funcs(('swap', 'nrm2'), (X,))
solve_cholesky, = get_lapack_funcs(('potrs',), (X,))
if Gram is None:
if copy_X:
# force copy. setting the array to be fortran-ordered
# speeds up the calculation of the (partial) Gram matrix
# and allows to easily swap columns
X = X.copy('F')
elif Gram == 'auto':
Gram = None
if X.shape[0] > X.shape[1]:
Gram = np.dot(X.T, X)
elif copy_Gram:
Gram = Gram.copy()
if Xy is None:
Cov = np.dot(X.T, y)
else:
Cov = Xy.copy()
if verbose:
if verbose > 1:
print("Step\t\tAdded\t\tDropped\t\tActive set size\t\tC")
else:
sys.stdout.write('.')
sys.stdout.flush()
tiny = np.finfo(np.float).tiny # to avoid division by 0 warning
tiny32 = np.finfo(np.float32).tiny # to avoid division by 0 warning
equality_tolerance = np.finfo(np.float32).eps
while True:
if Cov.size:
C_idx = np.argmax(np.abs(Cov))
C_ = Cov[C_idx]
C = np.fabs(C_)
else:
C = 0.
if return_path:
alpha = alphas[n_iter, np.newaxis]
coef = coefs[n_iter]
prev_alpha = alphas[n_iter - 1, np.newaxis]
prev_coef = coefs[n_iter - 1]
alpha[0] = C / n_samples
if alpha[0] <= alpha_min + equality_tolerance: # early stopping
if abs(alpha[0] - alpha_min) > equality_tolerance:
# interpolation factor 0 <= ss < 1
if n_iter > 0:
# In the first iteration, all alphas are zero, the formula
# below would make ss a NaN
ss = ((prev_alpha[0] - alpha_min) /
(prev_alpha[0] - alpha[0]))
coef[:] = prev_coef + ss * (coef - prev_coef)
alpha[0] = alpha_min
if return_path:
coefs[n_iter] = coef
break
if n_iter >= max_iter or n_active >= n_features:
break
if not drop:
##########################################################
# Append x_j to the Cholesky factorization of (Xa * Xa') #
# #
# ( L 0 ) #
# L -> ( ) , where L * w = Xa' x_j #
# ( w z ) and z = ||x_j|| #
# #
##########################################################
sign_active[n_active] = np.sign(C_)
m, n = n_active, C_idx + n_active
Cov[C_idx], Cov[0] = swap(Cov[C_idx], Cov[0])
indices[n], indices[m] = indices[m], indices[n]
Cov_not_shortened = Cov
Cov = Cov[1:] # remove Cov[0]
if Gram is None:
X.T[n], X.T[m] = swap(X.T[n], X.T[m])
c = nrm2(X.T[n_active]) ** 2
L[n_active, :n_active] = \
np.dot(X.T[n_active], X.T[:n_active].T)
else:
# swap does only work inplace if matrix is fortran
# contiguous ...
Gram[m], Gram[n] = swap(Gram[m], Gram[n])
Gram[:, m], Gram[:, n] = swap(Gram[:, m], Gram[:, n])
c = Gram[n_active, n_active]
L[n_active, :n_active] = Gram[n_active, :n_active]
# Update the cholesky decomposition for the Gram matrix
if n_active:
linalg.solve_triangular(L[:n_active, :n_active],
L[n_active, :n_active],
trans=0, lower=1,
overwrite_b=True,
**solve_triangular_args)
v = np.dot(L[n_active, :n_active], L[n_active, :n_active])
diag = max(np.sqrt(np.abs(c - v)), eps)
L[n_active, n_active] = diag
if diag < 1e-7:
# The system is becoming too ill-conditioned.
# We have degenerate vectors in our active set.
# We'll 'drop for good' the last regressor added.
# Note: this case is very rare. It is no longer triggered by the
# test suite. The `equality_tolerance` margin added in 0.16.0 to
# get early stopping to work consistently on all versions of
# Python including 32 bit Python under Windows seems to make it
# very difficult to trigger the 'drop for good' strategy.
warnings.warn('Regressors in active set degenerate. '
'Dropping a regressor, after %i iterations, '
'i.e. alpha=%.3e, '
'with an active set of %i regressors, and '
'the smallest cholesky pivot element being %.3e'
% (n_iter, alpha, n_active, diag),
ConvergenceWarning)
# XXX: need to figure a 'drop for good' way
Cov = Cov_not_shortened
Cov[0] = 0
Cov[C_idx], Cov[0] = swap(Cov[C_idx], Cov[0])
continue
active.append(indices[n_active])
n_active += 1
if verbose > 1:
print("%s\t\t%s\t\t%s\t\t%s\t\t%s" % (n_iter, active[-1], '',
n_active, C))
if method == 'lasso' and n_iter > 0 and prev_alpha[0] < alpha[0]:
# alpha is increasing. This is because the updates of Cov are
# bringing in too much numerical error that is greater than
# than the remaining correlation with the
# regressors. Time to bail out
warnings.warn('Early stopping the lars path, as the residues '
'are small and the current value of alpha is no '
'longer well controlled. %i iterations, alpha=%.3e, '
'previous alpha=%.3e, with an active set of %i '
'regressors.'
% (n_iter, alpha, prev_alpha, n_active),
ConvergenceWarning)
break
# least squares solution
least_squares, info = solve_cholesky(L[:n_active, :n_active],
sign_active[:n_active],
lower=True)
if least_squares.size == 1 and least_squares == 0:
# This happens because sign_active[:n_active] = 0
least_squares[...] = 1
AA = 1.
else:
# is this really needed ?
AA = 1. / np.sqrt(np.sum(least_squares * sign_active[:n_active]))
if not np.isfinite(AA):
# L is too ill-conditioned
i = 0
L_ = L[:n_active, :n_active].copy()
while not np.isfinite(AA):
L_.flat[::n_active + 1] += (2 ** i) * eps
least_squares, info = solve_cholesky(
L_, sign_active[:n_active], lower=True)
tmp = max(np.sum(least_squares * sign_active[:n_active]),
eps)
AA = 1. / np.sqrt(tmp)
i += 1
least_squares *= AA
if Gram is None:
# equiangular direction of variables in the active set
eq_dir = np.dot(X.T[:n_active].T, least_squares)
# correlation between each unactive variables and
# eqiangular vector
corr_eq_dir = np.dot(X.T[n_active:], eq_dir)
else:
# if huge number of features, this takes 50% of time, I
# think could be avoided if we just update it using an
# orthogonal (QR) decomposition of X
corr_eq_dir = np.dot(Gram[:n_active, n_active:].T,
least_squares)
g1 = arrayfuncs.min_pos((C - Cov) / (AA - corr_eq_dir + tiny))
g2 = arrayfuncs.min_pos((C + Cov) / (AA + corr_eq_dir + tiny))
gamma_ = min(g1, g2, C / AA)
# TODO: better names for these variables: z
drop = False
z = -coef[active] / (least_squares + tiny32)
z_pos = arrayfuncs.min_pos(z)
if z_pos < gamma_:
# some coefficients have changed sign
idx = np.where(z == z_pos)[0][::-1]
# update the sign, important for LAR
sign_active[idx] = -sign_active[idx]
if method == 'lasso':
gamma_ = z_pos
drop = True
n_iter += 1
if return_path:
if n_iter >= coefs.shape[0]:
del coef, alpha, prev_alpha, prev_coef
# resize the coefs and alphas array
add_features = 2 * max(1, (max_features - n_active))
coefs = np.resize(coefs, (n_iter + add_features, n_features))
alphas = np.resize(alphas, n_iter + add_features)
coef = coefs[n_iter]
prev_coef = coefs[n_iter - 1]
alpha = alphas[n_iter, np.newaxis]
prev_alpha = alphas[n_iter - 1, np.newaxis]
else:
# mimic the effect of incrementing n_iter on the array references
prev_coef = coef
prev_alpha[0] = alpha[0]
coef = np.zeros_like(coef)
coef[active] = prev_coef[active] + gamma_ * least_squares
# update correlations
Cov -= gamma_ * corr_eq_dir
# See if any coefficient has changed sign
if drop and method == 'lasso':
# handle the case when idx is not length of 1
[arrayfuncs.cholesky_delete(L[:n_active, :n_active], ii) for ii in
idx]
n_active -= 1
m, n = idx, n_active
# handle the case when idx is not length of 1
drop_idx = [active.pop(ii) for ii in idx]
if Gram is None:
# propagate dropped variable
for ii in idx:
for i in range(ii, n_active):
X.T[i], X.T[i + 1] = swap(X.T[i], X.T[i + 1])
# yeah this is stupid
indices[i], indices[i + 1] = indices[i + 1], indices[i]
# TODO: this could be updated
residual = y - np.dot(X[:, :n_active], coef[active])
temp = np.dot(X.T[n_active], residual)
Cov = np.r_[temp, Cov]
else:
for ii in idx:
for i in range(ii, n_active):
indices[i], indices[i + 1] = indices[i + 1], indices[i]
Gram[i], Gram[i + 1] = swap(Gram[i], Gram[i + 1])
Gram[:, i], Gram[:, i + 1] = swap(Gram[:, i],
Gram[:, i + 1])
# Cov_n = Cov_j + x_j * X + increment(betas) TODO:
# will this still work with multiple drops ?
# recompute covariance. Probably could be done better
# wrong as Xy is not swapped with the rest of variables
# TODO: this could be updated
residual = y - np.dot(X, coef)
temp = np.dot(X.T[drop_idx], residual)
Cov = np.r_[temp, Cov]
sign_active = np.delete(sign_active, idx)
sign_active = np.append(sign_active, 0.) # just to maintain size
if verbose > 1:
print("%s\t\t%s\t\t%s\t\t%s\t\t%s" % (n_iter, '', drop_idx,
n_active, abs(temp)))
if return_path:
# resize coefs in case of early stop
alphas = alphas[:n_iter + 1]
coefs = coefs[:n_iter + 1]
if return_n_iter:
return alphas, active, coefs.T, n_iter
else:
return alphas, active, coefs.T
else:
if return_n_iter:
return alpha, active, coef, n_iter
else:
return alpha, active, coef
###############################################################################
# Estimator classes
class Lars(LinearModel, RegressorMixin):
"""Least Angle Regression model a.k.a. LAR
Read more in the :ref:`User Guide <least_angle_regression>`.
Parameters
----------
n_nonzero_coefs : int, optional
Target number of non-zero coefficients. Use ``np.inf`` for no limit.
fit_intercept : boolean
Whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
verbose : boolean or integer, optional
Sets the verbosity amount
normalize : boolean, optional, default False
If ``True``, the regressors X will be normalized before regression.
precompute : True | False | 'auto' | array-like
Whether to use a precomputed Gram matrix to speed up
calculations. If set to ``'auto'`` let us decide. The Gram
matrix can also be passed as argument.
copy_X : boolean, optional, default True
If ``True``, X will be copied; else, it may be overwritten.
eps : float, optional
The machine-precision regularization in the computation of the
Cholesky diagonal factors. Increase this for very ill-conditioned
systems. Unlike the ``tol`` parameter in some iterative
optimization-based algorithms, this parameter does not control
the tolerance of the optimization.
fit_path : boolean
If True the full path is stored in the ``coef_path_`` attribute.
If you compute the solution for a large problem or many targets,
setting ``fit_path`` to ``False`` will lead to a speedup, especially
with a small alpha.
Attributes
----------
alphas_ : array, shape (n_alphas + 1,) | list of n_targets such arrays
Maximum of covariances (in absolute value) at each iteration. \
``n_alphas`` is either ``n_nonzero_coefs`` or ``n_features``, \
whichever is smaller.
active_ : list, length = n_alphas | list of n_targets such lists
Indices of active variables at the end of the path.
coef_path_ : array, shape (n_features, n_alphas + 1) \
| list of n_targets such arrays
The varying values of the coefficients along the path. It is not
present if the ``fit_path`` parameter is ``False``.
coef_ : array, shape (n_features,) or (n_targets, n_features)
Parameter vector (w in the formulation formula).
intercept_ : float | array, shape (n_targets,)
Independent term in decision function.
n_iter_ : array-like or int
The number of iterations taken by lars_path to find the
grid of alphas for each target.
Examples
--------
>>> from sklearn import linear_model
>>> clf = linear_model.Lars(n_nonzero_coefs=1)
>>> clf.fit([[-1, 1], [0, 0], [1, 1]], [-1.1111, 0, -1.1111])
... # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
Lars(copy_X=True, eps=..., fit_intercept=True, fit_path=True,
n_nonzero_coefs=1, normalize=True, precompute='auto', verbose=False)
>>> print(clf.coef_) # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
[ 0. -1.11...]
See also
--------
lars_path, LarsCV
sklearn.decomposition.sparse_encode
"""
def __init__(self, fit_intercept=True, verbose=False, normalize=True,
precompute='auto', n_nonzero_coefs=500,
eps=np.finfo(np.float).eps, copy_X=True, fit_path=True):
self.fit_intercept = fit_intercept
self.verbose = verbose
self.normalize = normalize
self.method = 'lar'
self.precompute = precompute
self.n_nonzero_coefs = n_nonzero_coefs
self.eps = eps
self.copy_X = copy_X
self.fit_path = fit_path
def _get_gram(self):
# precompute if n_samples > n_features
precompute = self.precompute
if hasattr(precompute, '__array__'):
Gram = precompute
elif precompute == 'auto':
Gram = 'auto'
else:
Gram = None
return Gram
def fit(self, X, y, Xy=None):
"""Fit the model using X, y as training data.
parameters
----------
X : array-like, shape (n_samples, n_features)
Training data.
y : array-like, shape (n_samples,) or (n_samples, n_targets)
Target values.
Xy : array-like, shape (n_samples,) or (n_samples, n_targets), \
optional
Xy = np.dot(X.T, y) that can be precomputed. It is useful
only when the Gram matrix is precomputed.
returns
-------
self : object
returns an instance of self.
"""
X, y = check_X_y(X, y, y_numeric=True, multi_output=True)
n_features = X.shape[1]
X, y, X_mean, y_mean, X_std = self._center_data(X, y,
self.fit_intercept,
self.normalize,
self.copy_X)
if y.ndim == 1:
y = y[:, np.newaxis]
n_targets = y.shape[1]
alpha = getattr(self, 'alpha', 0.)
if hasattr(self, 'n_nonzero_coefs'):
alpha = 0. # n_nonzero_coefs parametrization takes priority
max_iter = self.n_nonzero_coefs
else:
max_iter = self.max_iter
precompute = self.precompute
if not hasattr(precompute, '__array__') and (
precompute is True or
(precompute == 'auto' and X.shape[0] > X.shape[1]) or
(precompute == 'auto' and y.shape[1] > 1)):
Gram = np.dot(X.T, X)
else:
Gram = self._get_gram()
self.alphas_ = []
self.n_iter_ = []
if self.fit_path:
self.coef_ = []
self.active_ = []
self.coef_path_ = []
for k in xrange(n_targets):
this_Xy = None if Xy is None else Xy[:, k]
alphas, active, coef_path, n_iter_ = lars_path(
X, y[:, k], Gram=Gram, Xy=this_Xy, copy_X=self.copy_X,
copy_Gram=True, alpha_min=alpha, method=self.method,
verbose=max(0, self.verbose - 1), max_iter=max_iter,
eps=self.eps, return_path=True,
return_n_iter=True)
self.alphas_.append(alphas)
self.active_.append(active)
self.n_iter_.append(n_iter_)
self.coef_path_.append(coef_path)
self.coef_.append(coef_path[:, -1])
if n_targets == 1:
self.alphas_, self.active_, self.coef_path_, self.coef_ = [
a[0] for a in (self.alphas_, self.active_, self.coef_path_,
self.coef_)]
self.n_iter_ = self.n_iter_[0]
else:
self.coef_ = np.empty((n_targets, n_features))
for k in xrange(n_targets):
this_Xy = None if Xy is None else Xy[:, k]
alphas, _, self.coef_[k], n_iter_ = lars_path(
X, y[:, k], Gram=Gram, Xy=this_Xy, copy_X=self.copy_X,
copy_Gram=True, alpha_min=alpha, method=self.method,
verbose=max(0, self.verbose - 1), max_iter=max_iter,
eps=self.eps, return_path=False, return_n_iter=True)
self.alphas_.append(alphas)
self.n_iter_.append(n_iter_)
if n_targets == 1:
self.alphas_ = self.alphas_[0]
self.n_iter_ = self.n_iter_[0]
self._set_intercept(X_mean, y_mean, X_std)
return self
class LassoLars(Lars):
"""Lasso model fit with Least Angle Regression a.k.a. Lars
It is a Linear Model trained with an L1 prior as regularizer.
The optimization objective for Lasso is::
(1 / (2 * n_samples)) * ||y - Xw||^2_2 + alpha * ||w||_1
Read more in the :ref:`User Guide <least_angle_regression>`.
Parameters
----------
alpha : float
Constant that multiplies the penalty term. Defaults to 1.0.
``alpha = 0`` is equivalent to an ordinary least square, solved
by :class:`LinearRegression`. For numerical reasons, using
``alpha = 0`` with the LassoLars object is not advised and you
should prefer the LinearRegression object.
fit_intercept : boolean
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
verbose : boolean or integer, optional
Sets the verbosity amount
normalize : boolean, optional, default False
If True, the regressors X will be normalized before regression.
copy_X : boolean, optional, default True
If True, X will be copied; else, it may be overwritten.
precompute : True | False | 'auto' | array-like
Whether to use a precomputed Gram matrix to speed up
calculations. If set to ``'auto'`` let us decide. The Gram
matrix can also be passed as argument.
max_iter : integer, optional
Maximum number of iterations to perform.
eps : float, optional
The machine-precision regularization in the computation of the
Cholesky diagonal factors. Increase this for very ill-conditioned
systems. Unlike the ``tol`` parameter in some iterative
optimization-based algorithms, this parameter does not control
the tolerance of the optimization.
fit_path : boolean
If ``True`` the full path is stored in the ``coef_path_`` attribute.
If you compute the solution for a large problem or many targets,
setting ``fit_path`` to ``False`` will lead to a speedup, especially
with a small alpha.
Attributes
----------
alphas_ : array, shape (n_alphas + 1,) | list of n_targets such arrays
Maximum of covariances (in absolute value) at each iteration. \
``n_alphas`` is either ``max_iter``, ``n_features``, or the number of \
nodes in the path with correlation greater than ``alpha``, whichever \
is smaller.
active_ : list, length = n_alphas | list of n_targets such lists
Indices of active variables at the end of the path.
coef_path_ : array, shape (n_features, n_alphas + 1) or list
If a list is passed it's expected to be one of n_targets such arrays.
The varying values of the coefficients along the path. It is not
present if the ``fit_path`` parameter is ``False``.
coef_ : array, shape (n_features,) or (n_targets, n_features)
Parameter vector (w in the formulation formula).
intercept_ : float | array, shape (n_targets,)
Independent term in decision function.
n_iter_ : array-like or int.
The number of iterations taken by lars_path to find the
grid of alphas for each target.
Examples
--------
>>> from sklearn import linear_model
>>> clf = linear_model.LassoLars(alpha=0.01)
>>> clf.fit([[-1, 1], [0, 0], [1, 1]], [-1, 0, -1])
... # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
LassoLars(alpha=0.01, copy_X=True, eps=..., fit_intercept=True,
fit_path=True, max_iter=500, normalize=True, precompute='auto',
verbose=False)
>>> print(clf.coef_) # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
[ 0. -0.963257...]
See also
--------
lars_path
lasso_path
Lasso
LassoCV
LassoLarsCV
sklearn.decomposition.sparse_encode
"""
def __init__(self, alpha=1.0, fit_intercept=True, verbose=False,
normalize=True, precompute='auto', max_iter=500,
eps=np.finfo(np.float).eps, copy_X=True, fit_path=True):
self.alpha = alpha
self.fit_intercept = fit_intercept
self.max_iter = max_iter
self.verbose = verbose
self.normalize = normalize
self.method = 'lasso'
self.precompute = precompute
self.copy_X = copy_X
self.eps = eps
self.fit_path = fit_path
###############################################################################
# Cross-validated estimator classes
def _check_copy_and_writeable(array, copy=False):
if copy or not array.flags.writeable:
return array.copy()
return array
def _lars_path_residues(X_train, y_train, X_test, y_test, Gram=None,
copy=True, method='lars', verbose=False,
fit_intercept=True, normalize=True, max_iter=500,
eps=np.finfo(np.float).eps):
"""Compute the residues on left-out data for a full LARS path
Parameters
-----------
X_train : array, shape (n_samples, n_features)
The data to fit the LARS on
y_train : array, shape (n_samples)
The target variable to fit LARS on
X_test : array, shape (n_samples, n_features)
The data to compute the residues on
y_test : array, shape (n_samples)
The target variable to compute the residues on
Gram : None, 'auto', array, shape: (n_features, n_features), optional
Precomputed Gram matrix (X' * X), if ``'auto'``, the Gram
matrix is precomputed from the given X, if there are more samples
than features
copy : boolean, optional
Whether X_train, X_test, y_train and y_test should be copied;
if False, they may be overwritten.
method : 'lar' | 'lasso'
Specifies the returned model. Select ``'lar'`` for Least Angle
Regression, ``'lasso'`` for the Lasso.
verbose : integer, optional
Sets the amount of verbosity
fit_intercept : boolean
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
normalize : boolean, optional, default False
If True, the regressors X will be normalized before regression.
max_iter : integer, optional
Maximum number of iterations to perform.
eps : float, optional
The machine-precision regularization in the computation of the
Cholesky diagonal factors. Increase this for very ill-conditioned
systems. Unlike the ``tol`` parameter in some iterative
optimization-based algorithms, this parameter does not control
the tolerance of the optimization.
Returns
--------
alphas : array, shape (n_alphas,)
Maximum of covariances (in absolute value) at each iteration.
``n_alphas`` is either ``max_iter`` or ``n_features``, whichever
is smaller.
active : list
Indices of active variables at the end of the path.
coefs : array, shape (n_features, n_alphas)
Coefficients along the path
residues : array, shape (n_alphas, n_samples)
Residues of the prediction on the test data
"""
X_train = _check_copy_and_writeable(X_train, copy)
y_train = _check_copy_and_writeable(y_train, copy)
X_test = _check_copy_and_writeable(X_test, copy)
y_test = _check_copy_and_writeable(y_test, copy)
if fit_intercept:
X_mean = X_train.mean(axis=0)
X_train -= X_mean
X_test -= X_mean
y_mean = y_train.mean(axis=0)
y_train = as_float_array(y_train, copy=False)
y_train -= y_mean
y_test = as_float_array(y_test, copy=False)
y_test -= y_mean
if normalize:
norms = np.sqrt(np.sum(X_train ** 2, axis=0))
nonzeros = np.flatnonzero(norms)
X_train[:, nonzeros] /= norms[nonzeros]
alphas, active, coefs = lars_path(
X_train, y_train, Gram=Gram, copy_X=False, copy_Gram=False,
method=method, verbose=max(0, verbose - 1), max_iter=max_iter, eps=eps)
if normalize:
coefs[nonzeros] /= norms[nonzeros][:, np.newaxis]
residues = np.dot(X_test, coefs) - y_test[:, np.newaxis]
return alphas, active, coefs, residues.T
class LarsCV(Lars):
"""Cross-validated Least Angle Regression model
Read more in the :ref:`User Guide <least_angle_regression>`.
Parameters
----------
fit_intercept : boolean
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
verbose : boolean or integer, optional
Sets the verbosity amount
normalize : boolean, optional, default False
If True, the regressors X will be normalized before regression.
copy_X : boolean, optional, default True
If ``True``, X will be copied; else, it may be overwritten.
precompute : True | False | 'auto' | array-like
Whether to use a precomputed Gram matrix to speed up
calculations. If set to ``'auto'`` let us decide. The Gram
matrix can also be passed as argument.
max_iter: integer, optional
Maximum number of iterations to perform.
cv : cross-validation generator, optional
see :mod:`sklearn.cross_validation`. If ``None`` is passed, default to
a 5-fold strategy
max_n_alphas : integer, optional
The maximum number of points on the path used to compute the
residuals in the cross-validation
n_jobs : integer, optional
Number of CPUs to use during the cross validation. If ``-1``, use
all the CPUs
eps : float, optional
The machine-precision regularization in the computation of the
Cholesky diagonal factors. Increase this for very ill-conditioned
systems.
Attributes
----------
coef_ : array, shape (n_features,)
parameter vector (w in the formulation formula)
intercept_ : float
independent term in decision function
coef_path_ : array, shape (n_features, n_alphas)
the varying values of the coefficients along the path
alpha_ : float
the estimated regularization parameter alpha
alphas_ : array, shape (n_alphas,)
the different values of alpha along the path
cv_alphas_ : array, shape (n_cv_alphas,)
all the values of alpha along the path for the different folds
cv_mse_path_ : array, shape (n_folds, n_cv_alphas)
the mean square error on left-out for each fold along the path
(alpha values given by ``cv_alphas``)
n_iter_ : array-like or int
the number of iterations run by Lars with the optimal alpha.
See also
--------
lars_path, LassoLars, LassoLarsCV
"""
method = 'lar'
def __init__(self, fit_intercept=True, verbose=False, max_iter=500,
normalize=True, precompute='auto', cv=None,
max_n_alphas=1000, n_jobs=1, eps=np.finfo(np.float).eps,
copy_X=True):
self.fit_intercept = fit_intercept
self.max_iter = max_iter
self.verbose = verbose
self.normalize = normalize
self.precompute = precompute
self.copy_X = copy_X
self.cv = cv
self.max_n_alphas = max_n_alphas
self.n_jobs = n_jobs
self.eps = eps
def fit(self, X, y):
"""Fit the model using X, y as training data.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training data.
y : array-like, shape (n_samples,)
Target values.
Returns
-------
self : object
returns an instance of self.
"""
self.fit_path = True
X, y = check_X_y(X, y, y_numeric=True)
# init cross-validation generator
cv = check_cv(self.cv, X, y, classifier=False)
Gram = 'auto' if self.precompute else None
cv_paths = Parallel(n_jobs=self.n_jobs, verbose=self.verbose)(
delayed(_lars_path_residues)(
X[train], y[train], X[test], y[test], Gram=Gram, copy=False,
method=self.method, verbose=max(0, self.verbose - 1),
normalize=self.normalize, fit_intercept=self.fit_intercept,
max_iter=self.max_iter, eps=self.eps)
for train, test in cv)
all_alphas = np.concatenate(list(zip(*cv_paths))[0])
# Unique also sorts
all_alphas = np.unique(all_alphas)
# Take at most max_n_alphas values
stride = int(max(1, int(len(all_alphas) / float(self.max_n_alphas))))
all_alphas = all_alphas[::stride]
mse_path = np.empty((len(all_alphas), len(cv_paths)))
for index, (alphas, active, coefs, residues) in enumerate(cv_paths):
alphas = alphas[::-1]
residues = residues[::-1]
if alphas[0] != 0:
alphas = np.r_[0, alphas]
residues = np.r_[residues[0, np.newaxis], residues]
if alphas[-1] != all_alphas[-1]:
alphas = np.r_[alphas, all_alphas[-1]]
residues = np.r_[residues, residues[-1, np.newaxis]]
this_residues = interpolate.interp1d(alphas,
residues,
axis=0)(all_alphas)
this_residues **= 2
mse_path[:, index] = np.mean(this_residues, axis=-1)
mask = np.all(np.isfinite(mse_path), axis=-1)
all_alphas = all_alphas[mask]
mse_path = mse_path[mask]
# Select the alpha that minimizes left-out error
i_best_alpha = np.argmin(mse_path.mean(axis=-1))
best_alpha = all_alphas[i_best_alpha]
# Store our parameters
self.alpha_ = best_alpha
self.cv_alphas_ = all_alphas
self.cv_mse_path_ = mse_path
# Now compute the full model
# it will call a lasso internally when self if LassoLarsCV
# as self.method == 'lasso'
Lars.fit(self, X, y)
return self
@property
def alpha(self):
# impedance matching for the above Lars.fit (should not be documented)
return self.alpha_
class LassoLarsCV(LarsCV):
"""Cross-validated Lasso, using the LARS algorithm
The optimization objective for Lasso is::
(1 / (2 * n_samples)) * ||y - Xw||^2_2 + alpha * ||w||_1
Read more in the :ref:`User Guide <least_angle_regression>`.
Parameters
----------
fit_intercept : boolean
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
verbose : boolean or integer, optional
Sets the verbosity amount
normalize : boolean, optional, default False
If True, the regressors X will be normalized before regression.
precompute : True | False | 'auto' | array-like
Whether to use a precomputed Gram matrix to speed up
calculations. If set to ``'auto'`` let us decide. The Gram
matrix can also be passed as argument.
max_iter : integer, optional
Maximum number of iterations to perform.
cv : cross-validation generator, optional
see sklearn.cross_validation module. If None is passed, default to
a 5-fold strategy
max_n_alphas : integer, optional
The maximum number of points on the path used to compute the
residuals in the cross-validation
n_jobs : integer, optional
Number of CPUs to use during the cross validation. If ``-1``, use
all the CPUs
eps : float, optional
The machine-precision regularization in the computation of the
Cholesky diagonal factors. Increase this for very ill-conditioned
systems.
copy_X : boolean, optional, default True
If True, X will be copied; else, it may be overwritten.
Attributes
----------
coef_ : array, shape (n_features,)
parameter vector (w in the formulation formula)
intercept_ : float
independent term in decision function.
coef_path_ : array, shape (n_features, n_alphas)
the varying values of the coefficients along the path
alpha_ : float
the estimated regularization parameter alpha
alphas_ : array, shape (n_alphas,)
the different values of alpha along the path
cv_alphas_ : array, shape (n_cv_alphas,)
all the values of alpha along the path for the different folds
cv_mse_path_ : array, shape (n_folds, n_cv_alphas)
the mean square error on left-out for each fold along the path
(alpha values given by ``cv_alphas``)
n_iter_ : array-like or int
the number of iterations run by Lars with the optimal alpha.
Notes
-----
The object solves the same problem as the LassoCV object. However,
unlike the LassoCV, it find the relevant alphas values by itself.
In general, because of this property, it will be more stable.
However, it is more fragile to heavily multicollinear datasets.
It is more efficient than the LassoCV if only a small number of
features are selected compared to the total number, for instance if
there are very few samples compared to the number of features.
See also
--------
lars_path, LassoLars, LarsCV, LassoCV
"""
method = 'lasso'
class LassoLarsIC(LassoLars):
"""Lasso model fit with Lars using BIC or AIC for model selection
The optimization objective for Lasso is::
(1 / (2 * n_samples)) * ||y - Xw||^2_2 + alpha * ||w||_1
AIC is the Akaike information criterion and BIC is the Bayes
Information criterion. Such criteria are useful to select the value
of the regularization parameter by making a trade-off between the
goodness of fit and the complexity of the model. A good model should
explain well the data while being simple.
Read more in the :ref:`User Guide <least_angle_regression>`.
Parameters
----------
criterion : 'bic' | 'aic'
The type of criterion to use.
fit_intercept : boolean
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
verbose : boolean or integer, optional
Sets the verbosity amount
normalize : boolean, optional, default False
If True, the regressors X will be normalized before regression.
copy_X : boolean, optional, default True
If True, X will be copied; else, it may be overwritten.
precompute : True | False | 'auto' | array-like
Whether to use a precomputed Gram matrix to speed up
calculations. If set to ``'auto'`` let us decide. The Gram
matrix can also be passed as argument.
max_iter : integer, optional
Maximum number of iterations to perform. Can be used for
early stopping.
eps : float, optional
The machine-precision regularization in the computation of the
Cholesky diagonal factors. Increase this for very ill-conditioned
systems. Unlike the ``tol`` parameter in some iterative
optimization-based algorithms, this parameter does not control
the tolerance of the optimization.
Attributes
----------
coef_ : array, shape (n_features,)
parameter vector (w in the formulation formula)
intercept_ : float
independent term in decision function.
alpha_ : float
the alpha parameter chosen by the information criterion
n_iter_ : int
number of iterations run by lars_path to find the grid of
alphas.
criterion_ : array, shape (n_alphas,)
The value of the information criteria ('aic', 'bic') across all
alphas. The alpha which has the smallest information criteria
is chosen.
Examples
--------
>>> from sklearn import linear_model
>>> clf = linear_model.LassoLarsIC(criterion='bic')
>>> clf.fit([[-1, 1], [0, 0], [1, 1]], [-1.1111, 0, -1.1111])
... # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
LassoLarsIC(copy_X=True, criterion='bic', eps=..., fit_intercept=True,
max_iter=500, normalize=True, precompute='auto',
verbose=False)
>>> print(clf.coef_) # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
[ 0. -1.11...]
Notes
-----
The estimation of the number of degrees of freedom is given by:
"On the degrees of freedom of the lasso"
Hui Zou, Trevor Hastie, and Robert Tibshirani
Ann. Statist. Volume 35, Number 5 (2007), 2173-2192.
http://en.wikipedia.org/wiki/Akaike_information_criterion
http://en.wikipedia.org/wiki/Bayesian_information_criterion
See also
--------
lars_path, LassoLars, LassoLarsCV
"""
def __init__(self, criterion='aic', fit_intercept=True, verbose=False,
normalize=True, precompute='auto', max_iter=500,
eps=np.finfo(np.float).eps, copy_X=True):
self.criterion = criterion
self.fit_intercept = fit_intercept
self.max_iter = max_iter
self.verbose = verbose
self.normalize = normalize
self.copy_X = copy_X
self.precompute = precompute
self.eps = eps
def fit(self, X, y, copy_X=True):
"""Fit the model using X, y as training data.
Parameters
----------
X : array-like, shape (n_samples, n_features)
training data.
y : array-like, shape (n_samples,)
target values.
copy_X : boolean, optional, default True
If ``True``, X will be copied; else, it may be overwritten.
Returns
-------
self : object
returns an instance of self.
"""
self.fit_path = True
X, y = check_X_y(X, y, y_numeric=True)
X, y, Xmean, ymean, Xstd = LinearModel._center_data(
X, y, self.fit_intercept, self.normalize, self.copy_X)
max_iter = self.max_iter
Gram = self._get_gram()
alphas_, active_, coef_path_, self.n_iter_ = lars_path(
X, y, Gram=Gram, copy_X=copy_X, copy_Gram=True, alpha_min=0.0,
method='lasso', verbose=self.verbose, max_iter=max_iter,
eps=self.eps, return_n_iter=True)
n_samples = X.shape[0]
if self.criterion == 'aic':
K = 2 # AIC
elif self.criterion == 'bic':
K = log(n_samples) # BIC
else:
raise ValueError('criterion should be either bic or aic')
R = y[:, np.newaxis] - np.dot(X, coef_path_) # residuals
mean_squared_error = np.mean(R ** 2, axis=0)
df = np.zeros(coef_path_.shape[1], dtype=np.int) # Degrees of freedom
for k, coef in enumerate(coef_path_.T):
mask = np.abs(coef) > np.finfo(coef.dtype).eps
if not np.any(mask):
continue
# get the number of degrees of freedom equal to:
# Xc = X[:, mask]
# Trace(Xc * inv(Xc.T, Xc) * Xc.T) ie the number of non-zero coefs
df[k] = np.sum(mask)
self.alphas_ = alphas_
with np.errstate(divide='ignore'):
self.criterion_ = n_samples * np.log(mean_squared_error) + K * df
n_best = np.argmin(self.criterion_)
self.alpha_ = alphas_[n_best]
self.coef_ = coef_path_[:, n_best]
self._set_intercept(Xmean, ymean, Xstd)
return self
| bsd-3-clause |
pauldeng/nilmtk | nilmtk/timeframe.py | 6 | 10648 | from __future__ import print_function, division
import pandas as pd
import pytz
from datetime import timedelta
from copy import deepcopy
from warnings import warn
class TimeFrame(object):
"""A TimeFrame is a single time span or period,
e.g. from "2013" to "2014".
Attributes
----------
_start : pd.Timestamp or None
if None and empty if False
then behave as if start is infinitely far into the past
_end : pd.Timestamp or None
if None and empty is False
then behave as if end is infinitely far into the future
enabled : boolean
If False then behave as if both _end and _start are None
_empty : boolean
If True then represents an empty time frame
include_end : boolean
"""
def __init__(self, start=None, end=None, tz=None):
self.clear()
if isinstance(start, TimeFrame):
self.copy_constructor(start)
else:
self.start = start
self.end = end
self.include_end = False
if tz is not None:
if self._start:
self._start = self._start.tz_localize(tz)
if self._end:
self._end = self._end.tz_localize(tz)
def copy_constructor(self, other):
for key, value in other.__dict__.iteritems():
setattr(self, key, value)
def clear(self):
self.enabled = True
self._start = None
self._end = None
self._empty = False
@classmethod
def from_dict(cls, d):
def key_to_timestamp(key):
string = d.get(key)
return None if string is None else pd.Timestamp(string)
start = key_to_timestamp('start')
end = key_to_timestamp('end')
return cls(start, end)
@property
def start(self):
if self.enabled:
return self._start
@property
def end(self):
if self.enabled:
return self._end
@property
def empty(self):
return self._empty
@start.setter
def start(self, new_start):
new_start = convert_nat_to_none(new_start)
if new_start is None:
self._start = None
return
new_start = pd.Timestamp(new_start)
if self.end and new_start >= self.end:
raise ValueError("start date must be before end date")
else:
self._start = new_start
@end.setter
def end(self, new_end):
new_end = convert_nat_to_none(new_end)
if new_end is None:
self._end = None
return
new_end = pd.Timestamp(new_end)
if self.start and new_end <= self.start:
raise ValueError("end date must be after start date")
else:
self._end = new_end
def adjacent(self, other, gap=0):
"""Returns True if self.start == other.end or visa versa.
Parameters
----------
gap : float or int
Number of seconds gap allowed.
Notes
-----
Does not yet handle case where self or other is open-ended.
"""
assert gap >= 0
gap_td = timedelta(seconds=gap)
if self.empty or other.empty:
return False
return (other.start - gap_td <= self.end <= other.start or
self.start - gap_td <= other.end <= self.start)
def union(self, other):
"""Return a single TimeFrame combining self and other."""
start = min(self.start, other.start)
end = max(self.end, other.end)
return TimeFrame(start, end)
@property
def timedelta(self):
if self.end and self.start:
return self.end - self.start
elif self.empty:
return timedelta(0)
def intersection(self, other):
"""Returns a new TimeFrame of the intersection between
this TimeFrame and `other` TimeFrame.
If the intersect is empty then the returned TimeFrame
will have empty == True."""
if other is None:
return deepcopy(self)
assert isinstance(other, TimeFrame)
include_end = False
if self.empty or other.empty:
start = None
end = None
empty = True
else:
if other.start is None:
start = self.start
elif self.start is None:
start = other.start
else:
start = max(self.start, other.start)
if other.end is None:
end = self.end
elif self.end is None:
end = other.end
else:
end = min(self.end, other.end)
# set include_end
if end == other.end:
include_end = other.include_end
elif end == self.end:
include_end = self.include_end
empty = False
if (start is not None) and (end is not None):
if start >= end:
start = None
end = None
empty = True
intersect = TimeFrame(start, end)
intersect._empty = empty
intersect.include_end = include_end
return intersect
def query_terms(self, variable_name='timeframe'):
if self.empty:
raise Exception("TimeFrame is empty.")
terms = []
if self.start is not None:
terms.append("index>=" + variable_name + ".start")
if self.end is not None:
terms.append("index<" + ("=" if self.include_end else "")
+ variable_name + ".end")
return None if terms == [] else terms
def slice(self, frame):
"""Slices `frame` using self.start and self.end.
Parameters
----------
frame : pd.DataFrame or pd.Series to slice
Returns
-------
frame : sliced frame
"""
if not self.empty:
if self.include_end:
sliced = frame[(frame.index >= self.start) &
(frame.index <= self.end)]
else:
sliced = frame[(frame.index >= self.start) &
(frame.index < self.end)]
sliced.timeframe = self
return sliced
def __nonzero__(self):
if self.empty:
return False
else:
return (self.start is not None) or (self.end is not None)
def __repr__(self):
return ("TimeFrame(start='{}', end='{}', empty={})"
.format(self.start, self.end, self.empty))
def __eq__(self, other):
return ((other.start == self.start) and
(other.end == self.end) and
(other.empty == self.empty))
def __ne__(self, other):
return not self.__eq__(other)
def __hash__(self):
return hash((self.start, self.end, self.empty))
def to_dict(self):
dct = {}
if self.start:
dct['start'] = self.start.isoformat()
if self.end:
dct['end'] = self.end.isoformat()
return dct
def check_tz(self):
if any([isinstance(tf.tz, pytz._FixedOffset)
for tf in [self.start, self.end]
if tf is not None]):
warn("Using a pytz._FixedOffset timezone may cause issues"
" (e.g. might cause Pandas to raise 'TypeError: too many"
" timezones in this block, create separate data columns'). "
" It is better to set the timezone to a geographical location"
" e.g. 'Europe/London'.")
def check_for_overlap(self, other):
intersect = self.intersection(other)
if not intersect.empty:
raise ValueError("Periods overlap: " + str(self) +
" " + str(other))
def split(self, duration_threshold):
"""Splits this TimeFrame into smaller adjacent TimeFrames no
longer in duration than duration_threshold.
Parameters
----------
duration_threshold : int, seconds
Returns
-------
generator of new TimeFrame objects
"""
if not self:
raise ValueError("Cannot split a TimeFrame if `start` or `end`"
" is None")
if duration_threshold >= self.timedelta.total_seconds():
yield self
return
duration_threshold_td = timedelta(seconds=duration_threshold)
timeframe = self
while True:
allowed_end = timeframe.start + duration_threshold_td
if timeframe.end <= allowed_end:
yield timeframe
break
else:
yield TimeFrame(start=timeframe.start, end=allowed_end)
timeframe = TimeFrame(start=allowed_end, end=timeframe.end)
def split_timeframes(timeframes, duration_threshold):
# TODO: put this into TimeFrameGroup. #316
for timeframe in timeframes:
for split in timeframe.split(duration_threshold):
yield split
def merge_timeframes(timeframes, gap=0):
"""
Parameters
----------
timeframes : list of TimeFrame objects (must be sorted)
Returns
-------
merged : list of TimeFrame objects
Where adjacent timeframes have been merged.
"""
# TODO: put this into TimeFrameGroup. #316
assert isinstance(timeframes, list)
assert all([isinstance(timeframe, TimeFrame) for timeframe in timeframes])
n_timeframes = len(timeframes)
if n_timeframes == 0:
return []
elif n_timeframes == 1:
return timeframes
merged = [timeframes[0]]
for timeframe in timeframes[1:]:
if timeframe.adjacent(merged[-1], gap):
merged[-1] = timeframe.union(merged[-1])
else:
merged.append(timeframe)
return merged
def list_of_timeframe_dicts(timeframes):
"""
Parameters
----------
timeframes : list of TimeFrame objects
Returns
-------
list of dicts
"""
# TODO: put this into TimeFrameGroup. #316
return [timeframe.to_dict() for timeframe in timeframes]
def timeframe_from_dict(d):
return TimeFrame.from_dict(d)
def list_of_timeframes_from_list_of_dicts(dicts):
# TODO: put this into TimeFrameGroup. #316
return [timeframe_from_dict(d) for d in dicts]
def convert_none_to_nat(timestamp):
return pd.NaT if timestamp is None else timestamp
def convert_nat_to_none(timestamp):
return None if timestamp is pd.NaT else timestamp
| apache-2.0 |
ngoix/OCRF | examples/cluster/plot_face_segmentation.py | 71 | 2839 | """
===================================================
Segmenting the picture of a raccoon face in regions
===================================================
This example uses :ref:`spectral_clustering` on a graph created from
voxel-to-voxel difference on an image to break this image into multiple
partly-homogeneous regions.
This procedure (spectral clustering on an image) is an efficient
approximate solution for finding normalized graph cuts.
There are two options to assign labels:
* with 'kmeans' spectral clustering will cluster samples in the embedding space
using a kmeans algorithm
* whereas 'discrete' will iteratively search for the closest partition
space to the embedding space.
"""
print(__doc__)
# Author: Gael Varoquaux <gael.varoquaux@normalesup.org>, Brian Cheung
# License: BSD 3 clause
import time
import numpy as np
import scipy as sp
import matplotlib.pyplot as plt
from sklearn.feature_extraction import image
from sklearn.cluster import spectral_clustering
from sklearn.utils.testing import SkipTest
from sklearn.utils.fixes import sp_version
if sp_version < (0, 12):
raise SkipTest("Skipping because SciPy version earlier than 0.12.0 and "
"thus does not include the scipy.misc.face() image.")
# load the raccoon face as a numpy array
try:
face = sp.face(gray=True)
except AttributeError:
# Newer versions of scipy have face in misc
from scipy import misc
face = misc.face(gray=True)
# Resize it to 10% of the original size to speed up the processing
face = sp.misc.imresize(face, 0.10) / 255.
# Convert the image into a graph with the value of the gradient on the
# edges.
graph = image.img_to_graph(face)
# Take a decreasing function of the gradient: an exponential
# The smaller beta is, the more independent the segmentation is of the
# actual image. For beta=1, the segmentation is close to a voronoi
beta = 5
eps = 1e-6
graph.data = np.exp(-beta * graph.data / graph.data.std()) + eps
# Apply spectral clustering (this step goes much faster if you have pyamg
# installed)
N_REGIONS = 25
#############################################################################
# Visualize the resulting regions
for assign_labels in ('kmeans', 'discretize'):
t0 = time.time()
labels = spectral_clustering(graph, n_clusters=N_REGIONS,
assign_labels=assign_labels, random_state=1)
t1 = time.time()
labels = labels.reshape(face.shape)
plt.figure(figsize=(5, 5))
plt.imshow(face, cmap=plt.cm.gray)
for l in range(N_REGIONS):
plt.contour(labels == l, contours=1,
colors=[plt.cm.spectral(l / float(N_REGIONS))])
plt.xticks(())
plt.yticks(())
title = 'Spectral clustering: %s, %.2fs' % (assign_labels, (t1 - t0))
print(title)
plt.title(title)
plt.show()
| bsd-3-clause |
nuclear-wizard/moose | test/tests/samplers/distribute/execute.py | 3 | 2296 | #!/usr/bin/env python3
from __future__ import print_function
import time
import argparse
import pandas
import matplotlib.pyplot as plt
import multiprocessing
import mooseutils
def execute(infile, outfile, n_samples, processors, test_type):
"""Helper for running a memory study with increasing MPI ranks."""
data = dict(n_procs=[], n_samples=[], total=[], per_proc=[], max_proc=[], time=[])
exe = mooseutils.find_moose_executable_recursive()
for n_procs in processors:
file_base = '{}_{}'.format(infile[:-2], n_procs)
exe_args = ['-i', infile,
'Outputs/file_base={}'.format(file_base),
'Postprocessors/test/test_type={}'.format(test_type),
'Samplers/sampler/num_rows={}'.format(int(n_samples))]
print('mpiexec -n {} {} {}'.format(n_procs, exe, ' '.join(exe_args)))
t = time.time()
out = mooseutils.run_executable(exe, exe_args, mpi=n_procs, suppress_output=True)
t = time.time() - t
local = pandas.read_csv('{}.csv'.format(file_base))
data['n_procs'].append(n_procs)
data['n_samples'].append(n_samples)
data['total'].append(local['total'].iloc[-1])
data['per_proc'].append(local['per_proc'].iloc[-1])
data['max_proc'].append(local['max_proc'].iloc[-1])
data['time'].append(t)
df = pandas.DataFrame(data, columns=['n_procs', 'n_samples', 'total', 'per_proc', 'max_proc', 'time'])
df.to_csv('{}.csv'.format(outfile), index=False)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description="Memory data for Sampler sample data methods.")
parser.add_argument('-r', '--rows', default=1e5, type=int, help="The number of rows (default: 1e5).")
parser.add_argument('-p', '--processors', default=[1,4], type=int, nargs='+', help="List of number of processors to use (default: [1,4]).")
args = parser.parse_args()
execute('distribute.i', 'distribute_none', 1, args.processors, 'getGlobalSamples')
execute('distribute.i', 'distribute_off', args.rows, args.processors, 'getGlobalSamples')
execute('distribute.i', 'distribute_on', args.rows, args.processors, 'getLocalSamples')
execute('distribute.i', 'distribute_row', args.rows, args.processors, 'getNextLocalRow')
| lgpl-2.1 |
RomainBrault/scikit-learn | sklearn/utils/tests/test_estimator_checks.py | 26 | 7393 | import scipy.sparse as sp
import numpy as np
import sys
from sklearn.externals.six.moves import cStringIO as StringIO
from sklearn.base import BaseEstimator, ClassifierMixin
from sklearn.utils.testing import assert_raises_regex, assert_true
from sklearn.utils.estimator_checks import check_estimator
from sklearn.utils.estimator_checks import check_estimators_unfitted
from sklearn.utils.estimator_checks import check_no_fit_attributes_set_in_init
from sklearn.ensemble import AdaBoostClassifier
from sklearn.linear_model import MultiTaskElasticNet
from sklearn.utils.validation import check_X_y, check_array
class CorrectNotFittedError(ValueError):
"""Exception class to raise if estimator is used before fitting.
Like NotFittedError, it inherits from ValueError, but not from
AttributeError. Used for testing only.
"""
class BaseBadClassifier(BaseEstimator, ClassifierMixin):
def fit(self, X, y):
return self
def predict(self, X):
return np.ones(X.shape[0])
class ChangesDict(BaseEstimator):
def __init__(self):
self.key = 0
def fit(self, X, y=None):
X, y = check_X_y(X, y)
return self
def predict(self, X):
X = check_array(X)
self.key = 1000
return np.ones(X.shape[0])
class SetsWrongAttribute(BaseEstimator):
def __init__(self):
self.acceptable_key = 0
def fit(self, X, y=None):
self.wrong_attribute = 0
X, y = check_X_y(X, y)
return self
class ChangesWrongAttribute(BaseEstimator):
def __init__(self):
self.wrong_attribute = 0
def fit(self, X, y=None):
self.wrong_attribute = 1
X, y = check_X_y(X, y)
return self
class NoCheckinPredict(BaseBadClassifier):
def fit(self, X, y):
X, y = check_X_y(X, y)
return self
class NoSparseClassifier(BaseBadClassifier):
def fit(self, X, y):
X, y = check_X_y(X, y, accept_sparse=['csr', 'csc'])
if sp.issparse(X):
raise ValueError("Nonsensical Error")
return self
def predict(self, X):
X = check_array(X)
return np.ones(X.shape[0])
class CorrectNotFittedErrorClassifier(BaseBadClassifier):
def fit(self, X, y):
X, y = check_X_y(X, y)
self.coef_ = np.ones(X.shape[1])
return self
def predict(self, X):
if not hasattr(self, 'coef_'):
raise CorrectNotFittedError("estimator is not fitted yet")
X = check_array(X)
return np.ones(X.shape[0])
class NoSampleWeightPandasSeriesType(BaseEstimator):
def fit(self, X, y, sample_weight=None):
# Convert data
X, y = check_X_y(X, y,
accept_sparse=("csr", "csc"),
multi_output=True,
y_numeric=True)
# Function is only called after we verify that pandas is installed
from pandas import Series
if isinstance(sample_weight, Series):
raise ValueError("Estimator does not accept 'sample_weight'"
"of type pandas.Series")
return self
def predict(self, X):
X = check_array(X)
return np.ones(X.shape[0])
def test_check_estimator():
# tests that the estimator actually fails on "bad" estimators.
# not a complete test of all checks, which are very extensive.
# check that we have a set_params and can clone
msg = "it does not implement a 'get_params' methods"
assert_raises_regex(TypeError, msg, check_estimator, object)
# check that we have a fit method
msg = "object has no attribute 'fit'"
assert_raises_regex(AttributeError, msg, check_estimator, BaseEstimator)
# check that fit does input validation
msg = "TypeError not raised"
assert_raises_regex(AssertionError, msg, check_estimator, BaseBadClassifier)
# check that sample_weights in fit accepts pandas.Series type
try:
from pandas import Series # noqa
msg = ("Estimator NoSampleWeightPandasSeriesType raises error if "
"'sample_weight' parameter is of type pandas.Series")
assert_raises_regex(
ValueError, msg, check_estimator, NoSampleWeightPandasSeriesType)
except ImportError:
pass
# check that predict does input validation (doesn't accept dicts in input)
msg = "Estimator doesn't check for NaN and inf in predict"
assert_raises_regex(AssertionError, msg, check_estimator, NoCheckinPredict)
# check that estimator state does not change
# at transform/predict/predict_proba time
msg = 'Estimator changes __dict__ during predict'
assert_raises_regex(AssertionError, msg, check_estimator, ChangesDict)
# check that `fit` only changes attribures that
# are private (start with an _ or end with a _).
msg = ('Estimator changes public attribute\(s\) during the fit method.'
' Estimators are only allowed to change attributes started'
' or ended with _, but wrong_attribute changed')
assert_raises_regex(AssertionError, msg,
check_estimator, ChangesWrongAttribute)
# check that `fit` doesn't add any public attribute
msg = ('Estimator adds public attribute\(s\) during the fit method.'
' Estimators are only allowed to add private attributes'
' either started with _ or ended'
' with _ but wrong_attribute added')
assert_raises_regex(AssertionError, msg,
check_estimator, SetsWrongAttribute)
# check for sparse matrix input handling
name = NoSparseClassifier.__name__
msg = "Estimator " + name + " doesn't seem to fail gracefully on sparse data"
# the check for sparse input handling prints to the stdout,
# instead of raising an error, so as not to remove the original traceback.
# that means we need to jump through some hoops to catch it.
old_stdout = sys.stdout
string_buffer = StringIO()
sys.stdout = string_buffer
try:
check_estimator(NoSparseClassifier)
except:
pass
finally:
sys.stdout = old_stdout
assert_true(msg in string_buffer.getvalue())
# doesn't error on actual estimator
check_estimator(AdaBoostClassifier)
check_estimator(MultiTaskElasticNet)
def test_check_estimators_unfitted():
# check that a ValueError/AttributeError is raised when calling predict
# on an unfitted estimator
msg = "AttributeError or ValueError not raised by predict"
assert_raises_regex(AssertionError, msg, check_estimators_unfitted,
"estimator", NoSparseClassifier)
# check that CorrectNotFittedError inherit from either ValueError
# or AttributeError
check_estimators_unfitted("estimator", CorrectNotFittedErrorClassifier)
def test_check_no_fit_attributes_set_in_init():
class NonConformantEstimator(object):
def __init__(self):
self.you_should_not_set_this_ = None
msg = ("By convention, attributes ending with '_'.+"
'should not be initialized in the constructor.+'
"Attribute 'you_should_not_set_this_' was found.+"
'in estimator estimator_name')
assert_raises_regex(AssertionError, msg,
check_no_fit_attributes_set_in_init,
'estimator_name',
NonConformantEstimator)
| bsd-3-clause |
Irsan88/SeqTools | DataProcessing/roda/tags/2.0/bin/scripts/plotHomHetRegions.py | 6 | 1692 | import sys
import matplotlib.pyplot as plt
vcfIn = open(sys.argv[1], 'r')
positions = {}
allChroms = []
lengthContigs = {}
for line in vcfIn:
if line[0] == '#':
if line[0:8] == '##contig':
line = line.rstrip().split(',')
chrom = line[0].split('=')[-1]
length = line[1].split('=')[-1]
lengthContigs[chrom] = length
continue
line = line.rstrip().split('\t')
if line[0] not in positions:
positions[line[0]] = []
allChroms.append(line[0])
alleleFreq = line[7].split(';')[1]
alleleFreq = alleleFreq.split('=')
if alleleFreq[0] != 'AF':
print "raar!" + str(alleleFreq)
alleleFreq = alleleFreq[1]
positions[line[0]].append([line[1], alleleFreq])
#subplots = [0, 611, 612, 613, 614, 615, 616]
#subplots = [0, 511, 512, 513, 514, 515]
subplots = [0, 411, 412, 413, 414]
loop = 0
nrFig = 0
allChroms.sort(key=lambda x: x[0])
fig = plt.figure()
for chrom in allChroms:
loop = loop + 1
if loop == 5:
nrFig = nrFig + 1
plt.savefig(sys.argv[2] + "_" + str(nrFig) + '.png')
fig = plt.figure()
loop = 1
print chrom
posx = []
posy = []
for pos in positions[chrom]:
freqs = pos[1].split(',')
lengthfreqs = len(freqs)
for i in range(0,lengthfreqs):
posx.append(int(pos[0]))
posy.append(float(freqs[i]))
ax = fig.add_subplot(subplots[loop])
ax.scatter(posx, posy, s=2, facecolor='0.5', lw = 0)
ax.set_ylabel('AF ' + str(chrom))
#ax.set_xlabel('Position on ' + str(chrom))
ax.set_ylim(-0.1,1.1)
ax.set_xlim(0, int(lengthContigs[chrom]))
for item in ([ax.title, ax.xaxis.label, ax.yaxis.label] + ax.get_xticklabels() + ax.get_yticklabels()):
item.set_fontsize(10)
nrFig = nrFig + 1
plt.savefig(sys.argv[2] + "_" + str(nrFig) + '.png')
| gpl-2.0 |
blink1073/scikit-image | viewer_examples/plugins/watershed_demo.py | 35 | 1277 | import matplotlib.pyplot as plt
from skimage import data
from skimage import filters
from skimage import morphology
from skimage.viewer import ImageViewer
from skimage.viewer.widgets import history
from skimage.viewer.plugins.labelplugin import LabelPainter
class OKCancelButtons(history.OKCancelButtons):
def update_original_image(self):
# OKCancelButtons updates the original image with the filtered image
# by default. Override this method to update the overlay.
self.plugin._show_watershed()
self.plugin.close()
class WatershedPlugin(LabelPainter):
def help(self):
helpstr = ("Watershed plugin",
"----------------",
"Use mouse to paint each region with a different label.",
"Press OK to display segmented image.")
return '\n'.join(helpstr)
def _show_watershed(self):
viewer = self.image_viewer
edge_image = filter.sobel(viewer.image)
labels = morphology.watershed(edge_image, self.paint_tool.overlay)
viewer.ax.imshow(labels, cmap=plt.cm.jet, alpha=0.5)
viewer.redraw()
image = data.coins()
plugin = WatershedPlugin()
plugin += OKCancelButtons()
viewer = ImageViewer(image)
viewer += plugin
viewer.show()
| bsd-3-clause |
ndingwall/scikit-learn | sklearn/multioutput.py | 7 | 30307 | """
This module implements multioutput regression and classification.
The estimators provided in this module are meta-estimators: they require
a base estimator to be provided in their constructor. The meta-estimator
extends single output estimators to multioutput estimators.
"""
# Author: Tim Head <betatim@gmail.com>
# Author: Hugo Bowne-Anderson <hugobowne@gmail.com>
# Author: Chris Rivera <chris.richard.rivera@gmail.com>
# Author: Michael Williamson
# Author: James Ashton Nichols <james.ashton.nichols@gmail.com>
#
# License: BSD 3 clause
import numpy as np
import scipy.sparse as sp
from joblib import Parallel
from abc import ABCMeta, abstractmethod
from .base import BaseEstimator, clone, MetaEstimatorMixin
from .base import RegressorMixin, ClassifierMixin, is_classifier
from .model_selection import cross_val_predict
from .utils import check_array, check_X_y, check_random_state
from .utils.metaestimators import if_delegate_has_method
from .utils.validation import (check_is_fitted, has_fit_parameter,
_check_fit_params, _deprecate_positional_args)
from .utils.multiclass import check_classification_targets
from .utils.fixes import delayed
__all__ = ["MultiOutputRegressor", "MultiOutputClassifier",
"ClassifierChain", "RegressorChain"]
def _fit_estimator(estimator, X, y, sample_weight=None, **fit_params):
estimator = clone(estimator)
if sample_weight is not None:
estimator.fit(X, y, sample_weight=sample_weight, **fit_params)
else:
estimator.fit(X, y, **fit_params)
return estimator
def _partial_fit_estimator(estimator, X, y, classes=None, sample_weight=None,
first_time=True):
if first_time:
estimator = clone(estimator)
if sample_weight is not None:
if classes is not None:
estimator.partial_fit(X, y, classes=classes,
sample_weight=sample_weight)
else:
estimator.partial_fit(X, y, sample_weight=sample_weight)
else:
if classes is not None:
estimator.partial_fit(X, y, classes=classes)
else:
estimator.partial_fit(X, y)
return estimator
class _MultiOutputEstimator(MetaEstimatorMixin,
BaseEstimator,
metaclass=ABCMeta):
@abstractmethod
@_deprecate_positional_args
def __init__(self, estimator, *, n_jobs=None):
self.estimator = estimator
self.n_jobs = n_jobs
@if_delegate_has_method('estimator')
def partial_fit(self, X, y, classes=None, sample_weight=None):
"""Incrementally fit the model to data.
Fit a separate model for each output variable.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Data.
y : {array-like, sparse matrix} of shape (n_samples, n_outputs)
Multi-output targets.
classes : list of ndarray of shape (n_outputs,)
Each array is unique classes for one output in str/int
Can be obtained by via
``[np.unique(y[:, i]) for i in range(y.shape[1])]``, where y is the
target matrix of the entire dataset.
This argument is required for the first call to partial_fit
and can be omitted in the subsequent calls.
Note that y doesn't need to contain all labels in `classes`.
sample_weight : array-like of shape (n_samples,), default=None
Sample weights. If None, then samples are equally weighted.
Only supported if the underlying regressor supports sample
weights.
Returns
-------
self : object
"""
X, y = check_X_y(X, y,
force_all_finite=False,
multi_output=True,
accept_sparse=True)
if y.ndim == 1:
raise ValueError("y must have at least two dimensions for "
"multi-output regression but has only one.")
if (sample_weight is not None and
not has_fit_parameter(self.estimator, 'sample_weight')):
raise ValueError("Underlying estimator does not support"
" sample weights.")
first_time = not hasattr(self, 'estimators_')
self.estimators_ = Parallel(n_jobs=self.n_jobs)(
delayed(_partial_fit_estimator)(
self.estimators_[i] if not first_time else self.estimator,
X, y[:, i],
classes[i] if classes is not None else None,
sample_weight, first_time) for i in range(y.shape[1]))
return self
def fit(self, X, y, sample_weight=None, **fit_params):
""" Fit the model to data.
Fit a separate model for each output variable.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Data.
y : {array-like, sparse matrix} of shape (n_samples, n_outputs)
Multi-output targets. An indicator matrix turns on multilabel
estimation.
sample_weight : array-like of shape (n_samples,), default=None
Sample weights. If None, then samples are equally weighted.
Only supported if the underlying regressor supports sample
weights.
**fit_params : dict of string -> object
Parameters passed to the ``estimator.fit`` method of each step.
.. versionadded:: 0.23
Returns
-------
self : object
"""
if not hasattr(self.estimator, "fit"):
raise ValueError("The base estimator should implement"
" a fit method")
X, y = self._validate_data(X, y,
force_all_finite=False,
multi_output=True, accept_sparse=True)
if is_classifier(self):
check_classification_targets(y)
if y.ndim == 1:
raise ValueError("y must have at least two dimensions for "
"multi-output regression but has only one.")
if (sample_weight is not None and
not has_fit_parameter(self.estimator, 'sample_weight')):
raise ValueError("Underlying estimator does not support"
" sample weights.")
fit_params_validated = _check_fit_params(X, fit_params)
self.estimators_ = Parallel(n_jobs=self.n_jobs)(
delayed(_fit_estimator)(
self.estimator, X, y[:, i], sample_weight,
**fit_params_validated)
for i in range(y.shape[1]))
return self
def predict(self, X):
"""Predict multi-output variable using a model
trained for each target variable.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Data.
Returns
-------
y : {array-like, sparse matrix} of shape (n_samples, n_outputs)
Multi-output targets predicted across multiple predictors.
Note: Separate models are generated for each predictor.
"""
check_is_fitted(self)
if not hasattr(self.estimator, "predict"):
raise ValueError("The base estimator should implement"
" a predict method")
X = check_array(X, force_all_finite=False, accept_sparse=True)
y = Parallel(n_jobs=self.n_jobs)(
delayed(e.predict)(X)
for e in self.estimators_)
return np.asarray(y).T
def _more_tags(self):
return {'multioutput_only': True}
class MultiOutputRegressor(RegressorMixin, _MultiOutputEstimator):
"""Multi target regression
This strategy consists of fitting one regressor per target. This is a
simple strategy for extending regressors that do not natively support
multi-target regression.
.. versionadded:: 0.18
Parameters
----------
estimator : estimator object
An estimator object implementing :term:`fit` and :term:`predict`.
n_jobs : int or None, optional (default=None)
The number of jobs to run in parallel.
:meth:`fit`, :meth:`predict` and :meth:`partial_fit` (if supported
by the passed estimator) will be parallelized for each target.
When individual estimators are fast to train or predict,
using ``n_jobs > 1`` can result in slower performance due
to the parallelism overhead.
``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.
``-1`` means using all available processes / threads.
See :term:`Glossary <n_jobs>` for more details.
.. versionchanged:: 0.20
`n_jobs` default changed from 1 to None
Attributes
----------
estimators_ : list of ``n_output`` estimators
Estimators used for predictions.
Examples
--------
>>> import numpy as np
>>> from sklearn.datasets import load_linnerud
>>> from sklearn.multioutput import MultiOutputRegressor
>>> from sklearn.linear_model import Ridge
>>> X, y = load_linnerud(return_X_y=True)
>>> clf = MultiOutputRegressor(Ridge(random_state=123)).fit(X, y)
>>> clf.predict(X[[0]])
array([[176..., 35..., 57...]])
"""
@_deprecate_positional_args
def __init__(self, estimator, *, n_jobs=None):
super().__init__(estimator, n_jobs=n_jobs)
@if_delegate_has_method('estimator')
def partial_fit(self, X, y, sample_weight=None):
"""Incrementally fit the model to data.
Fit a separate model for each output variable.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Data.
y : {array-like, sparse matrix} of shape (n_samples, n_outputs)
Multi-output targets.
sample_weight : array-like of shape (n_samples,), default=None
Sample weights. If None, then samples are equally weighted.
Only supported if the underlying regressor supports sample
weights.
Returns
-------
self : object
"""
super().partial_fit(
X, y, sample_weight=sample_weight)
class MultiOutputClassifier(ClassifierMixin, _MultiOutputEstimator):
"""Multi target classification
This strategy consists of fitting one classifier per target. This is a
simple strategy for extending classifiers that do not natively support
multi-target classification
Parameters
----------
estimator : estimator object
An estimator object implementing :term:`fit`, :term:`score` and
:term:`predict_proba`.
n_jobs : int or None, optional (default=None)
The number of jobs to run in parallel.
:meth:`fit`, :meth:`predict` and :meth:`partial_fit` (if supported
by the passed estimator) will be parallelized for each target.
When individual estimators are fast to train or predict,
using ``n_jobs > 1`` can result in slower performance due
to the parallelism overhead.
``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.
``-1`` means using all available processes / threads.
See :term:`Glossary <n_jobs>` for more details.
.. versionchanged:: 0.20
`n_jobs` default changed from 1 to None
Attributes
----------
classes_ : ndarray of shape (n_classes,)
Class labels.
estimators_ : list of ``n_output`` estimators
Estimators used for predictions.
Examples
--------
>>> import numpy as np
>>> from sklearn.datasets import make_multilabel_classification
>>> from sklearn.multioutput import MultiOutputClassifier
>>> from sklearn.neighbors import KNeighborsClassifier
>>> X, y = make_multilabel_classification(n_classes=3, random_state=0)
>>> clf = MultiOutputClassifier(KNeighborsClassifier()).fit(X, y)
>>> clf.predict(X[-2:])
array([[1, 1, 0], [1, 1, 1]])
"""
@_deprecate_positional_args
def __init__(self, estimator, *, n_jobs=None):
super().__init__(estimator, n_jobs=n_jobs)
def fit(self, X, Y, sample_weight=None, **fit_params):
"""Fit the model to data matrix X and targets Y.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
The input data.
Y : array-like of shape (n_samples, n_classes)
The target values.
sample_weight : array-like of shape (n_samples,), default=None
Sample weights. If None, then samples are equally weighted.
Only supported if the underlying classifier supports sample
weights.
**fit_params : dict of string -> object
Parameters passed to the ``estimator.fit`` method of each step.
.. versionadded:: 0.23
Returns
-------
self : object
"""
super().fit(X, Y, sample_weight, **fit_params)
self.classes_ = [estimator.classes_ for estimator in self.estimators_]
return self
@property
def predict_proba(self):
"""Probability estimates.
Returns prediction probabilities for each class of each output.
This method will raise a ``ValueError`` if any of the
estimators do not have ``predict_proba``.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Data
Returns
-------
p : array of shape (n_samples, n_classes), or a list of n_outputs \
such arrays if n_outputs > 1.
The class probabilities of the input samples. The order of the
classes corresponds to that in the attribute :term:`classes_`.
.. versionchanged:: 0.19
This function now returns a list of arrays where the length of
the list is ``n_outputs``, and each array is (``n_samples``,
``n_classes``) for that particular output.
"""
check_is_fitted(self)
if not all([hasattr(estimator, "predict_proba")
for estimator in self.estimators_]):
raise AttributeError("The base estimator should "
"implement predict_proba method")
return self._predict_proba
def _predict_proba(self, X):
results = [estimator.predict_proba(X) for estimator in
self.estimators_]
return results
def score(self, X, y):
"""Returns the mean accuracy on the given test data and labels.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Test samples
y : array-like of shape (n_samples, n_outputs)
True values for X
Returns
-------
scores : float
accuracy_score of self.predict(X) versus y
"""
check_is_fitted(self)
n_outputs_ = len(self.estimators_)
if y.ndim == 1:
raise ValueError("y must have at least two dimensions for "
"multi target classification but has only one")
if y.shape[1] != n_outputs_:
raise ValueError("The number of outputs of Y for fit {0} and"
" score {1} should be same".
format(n_outputs_, y.shape[1]))
y_pred = self.predict(X)
return np.mean(np.all(y == y_pred, axis=1))
def _more_tags(self):
# FIXME
return {'_skip_test': True}
class _BaseChain(BaseEstimator, metaclass=ABCMeta):
@_deprecate_positional_args
def __init__(self, base_estimator, *, order=None, cv=None,
random_state=None):
self.base_estimator = base_estimator
self.order = order
self.cv = cv
self.random_state = random_state
@abstractmethod
def fit(self, X, Y, **fit_params):
"""Fit the model to data matrix X and targets Y.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
The input data.
Y : array-like of shape (n_samples, n_classes)
The target values.
**fit_params : dict of string -> object
Parameters passed to the `fit` method of each step.
.. versionadded:: 0.23
Returns
-------
self : object
"""
X, Y = self._validate_data(X, Y, multi_output=True, accept_sparse=True)
random_state = check_random_state(self.random_state)
check_array(X, accept_sparse=True)
self.order_ = self.order
if isinstance(self.order_, tuple):
self.order_ = np.array(self.order_)
if self.order_ is None:
self.order_ = np.array(range(Y.shape[1]))
elif isinstance(self.order_, str):
if self.order_ == 'random':
self.order_ = random_state.permutation(Y.shape[1])
elif sorted(self.order_) != list(range(Y.shape[1])):
raise ValueError("invalid order")
self.estimators_ = [clone(self.base_estimator)
for _ in range(Y.shape[1])]
if self.cv is None:
Y_pred_chain = Y[:, self.order_]
if sp.issparse(X):
X_aug = sp.hstack((X, Y_pred_chain), format='lil')
X_aug = X_aug.tocsr()
else:
X_aug = np.hstack((X, Y_pred_chain))
elif sp.issparse(X):
Y_pred_chain = sp.lil_matrix((X.shape[0], Y.shape[1]))
X_aug = sp.hstack((X, Y_pred_chain), format='lil')
else:
Y_pred_chain = np.zeros((X.shape[0], Y.shape[1]))
X_aug = np.hstack((X, Y_pred_chain))
del Y_pred_chain
for chain_idx, estimator in enumerate(self.estimators_):
y = Y[:, self.order_[chain_idx]]
estimator.fit(X_aug[:, :(X.shape[1] + chain_idx)], y,
**fit_params)
if self.cv is not None and chain_idx < len(self.estimators_) - 1:
col_idx = X.shape[1] + chain_idx
cv_result = cross_val_predict(
self.base_estimator, X_aug[:, :col_idx],
y=y, cv=self.cv)
if sp.issparse(X_aug):
X_aug[:, col_idx] = np.expand_dims(cv_result, 1)
else:
X_aug[:, col_idx] = cv_result
return self
def predict(self, X):
"""Predict on the data matrix X using the ClassifierChain model.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
The input data.
Returns
-------
Y_pred : array-like of shape (n_samples, n_classes)
The predicted values.
"""
check_is_fitted(self)
X = check_array(X, accept_sparse=True)
Y_pred_chain = np.zeros((X.shape[0], len(self.estimators_)))
for chain_idx, estimator in enumerate(self.estimators_):
previous_predictions = Y_pred_chain[:, :chain_idx]
if sp.issparse(X):
if chain_idx == 0:
X_aug = X
else:
X_aug = sp.hstack((X, previous_predictions))
else:
X_aug = np.hstack((X, previous_predictions))
Y_pred_chain[:, chain_idx] = estimator.predict(X_aug)
inv_order = np.empty_like(self.order_)
inv_order[self.order_] = np.arange(len(self.order_))
Y_pred = Y_pred_chain[:, inv_order]
return Y_pred
class ClassifierChain(MetaEstimatorMixin, ClassifierMixin, _BaseChain):
"""A multi-label model that arranges binary classifiers into a chain.
Each model makes a prediction in the order specified by the chain using
all of the available features provided to the model plus the predictions
of models that are earlier in the chain.
Read more in the :ref:`User Guide <classifierchain>`.
.. versionadded:: 0.19
Parameters
----------
base_estimator : estimator
The base estimator from which the classifier chain is built.
order : array-like of shape (n_outputs,) or 'random', default=None
If None, the order will be determined by the order of columns in
the label matrix Y.::
order = [0, 1, 2, ..., Y.shape[1] - 1]
The order of the chain can be explicitly set by providing a list of
integers. For example, for a chain of length 5.::
order = [1, 3, 2, 4, 0]
means that the first model in the chain will make predictions for
column 1 in the Y matrix, the second model will make predictions
for column 3, etc.
If order is 'random' a random ordering will be used.
cv : int, cross-validation generator or an iterable, default=None
Determines whether to use cross validated predictions or true
labels for the results of previous estimators in the chain.
Possible inputs for cv are:
- None, to use true labels when fitting,
- integer, to specify the number of folds in a (Stratified)KFold,
- :term:`CV splitter`,
- An iterable yielding (train, test) splits as arrays of indices.
random_state : int, RandomState instance or None, optional (default=None)
If ``order='random'``, determines random number generation for the
chain order.
In addition, it controls the random seed given at each `base_estimator`
at each chaining iteration. Thus, it is only used when `base_estimator`
exposes a `random_state`.
Pass an int for reproducible output across multiple function calls.
See :term:`Glossary <random_state>`.
Attributes
----------
classes_ : list
A list of arrays of length ``len(estimators_)`` containing the
class labels for each estimator in the chain.
estimators_ : list
A list of clones of base_estimator.
order_ : list
The order of labels in the classifier chain.
Examples
--------
>>> from sklearn.datasets import make_multilabel_classification
>>> from sklearn.linear_model import LogisticRegression
>>> from sklearn.model_selection import train_test_split
>>> from sklearn.multioutput import ClassifierChain
>>> X, Y = make_multilabel_classification(
... n_samples=12, n_classes=3, random_state=0
... )
>>> X_train, X_test, Y_train, Y_test = train_test_split(
... X, Y, random_state=0
... )
>>> base_lr = LogisticRegression(solver='lbfgs', random_state=0)
>>> chain = ClassifierChain(base_lr, order='random', random_state=0)
>>> chain.fit(X_train, Y_train).predict(X_test)
array([[1., 1., 0.],
[1., 0., 0.],
[0., 1., 0.]])
>>> chain.predict_proba(X_test)
array([[0.8387..., 0.9431..., 0.4576...],
[0.8878..., 0.3684..., 0.2640...],
[0.0321..., 0.9935..., 0.0625...]])
See Also
--------
RegressorChain : Equivalent for regression.
MultioutputClassifier : Classifies each output independently rather than
chaining.
References
----------
Jesse Read, Bernhard Pfahringer, Geoff Holmes, Eibe Frank, "Classifier
Chains for Multi-label Classification", 2009.
"""
def fit(self, X, Y):
"""Fit the model to data matrix X and targets Y.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
The input data.
Y : array-like of shape (n_samples, n_classes)
The target values.
Returns
-------
self : object
"""
super().fit(X, Y)
self.classes_ = [estimator.classes_
for chain_idx, estimator
in enumerate(self.estimators_)]
return self
@if_delegate_has_method('base_estimator')
def predict_proba(self, X):
"""Predict probability estimates.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Returns
-------
Y_prob : array-like of shape (n_samples, n_classes)
"""
X = check_array(X, accept_sparse=True)
Y_prob_chain = np.zeros((X.shape[0], len(self.estimators_)))
Y_pred_chain = np.zeros((X.shape[0], len(self.estimators_)))
for chain_idx, estimator in enumerate(self.estimators_):
previous_predictions = Y_pred_chain[:, :chain_idx]
if sp.issparse(X):
X_aug = sp.hstack((X, previous_predictions))
else:
X_aug = np.hstack((X, previous_predictions))
Y_prob_chain[:, chain_idx] = estimator.predict_proba(X_aug)[:, 1]
Y_pred_chain[:, chain_idx] = estimator.predict(X_aug)
inv_order = np.empty_like(self.order_)
inv_order[self.order_] = np.arange(len(self.order_))
Y_prob = Y_prob_chain[:, inv_order]
return Y_prob
@if_delegate_has_method('base_estimator')
def decision_function(self, X):
"""Evaluate the decision_function of the models in the chain.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Returns
-------
Y_decision : array-like of shape (n_samples, n_classes)
Returns the decision function of the sample for each model
in the chain.
"""
Y_decision_chain = np.zeros((X.shape[0], len(self.estimators_)))
Y_pred_chain = np.zeros((X.shape[0], len(self.estimators_)))
for chain_idx, estimator in enumerate(self.estimators_):
previous_predictions = Y_pred_chain[:, :chain_idx]
if sp.issparse(X):
X_aug = sp.hstack((X, previous_predictions))
else:
X_aug = np.hstack((X, previous_predictions))
Y_decision_chain[:, chain_idx] = estimator.decision_function(X_aug)
Y_pred_chain[:, chain_idx] = estimator.predict(X_aug)
inv_order = np.empty_like(self.order_)
inv_order[self.order_] = np.arange(len(self.order_))
Y_decision = Y_decision_chain[:, inv_order]
return Y_decision
def _more_tags(self):
return {'_skip_test': True,
'multioutput_only': True}
class RegressorChain(MetaEstimatorMixin, RegressorMixin, _BaseChain):
"""A multi-label model that arranges regressions into a chain.
Each model makes a prediction in the order specified by the chain using
all of the available features provided to the model plus the predictions
of models that are earlier in the chain.
Read more in the :ref:`User Guide <regressorchain>`.
.. versionadded:: 0.20
Parameters
----------
base_estimator : estimator
The base estimator from which the classifier chain is built.
order : array-like of shape (n_outputs,) or 'random', default=None
If None, the order will be determined by the order of columns in
the label matrix Y.::
order = [0, 1, 2, ..., Y.shape[1] - 1]
The order of the chain can be explicitly set by providing a list of
integers. For example, for a chain of length 5.::
order = [1, 3, 2, 4, 0]
means that the first model in the chain will make predictions for
column 1 in the Y matrix, the second model will make predictions
for column 3, etc.
If order is 'random' a random ordering will be used.
cv : int, cross-validation generator or an iterable, default=None
Determines whether to use cross validated predictions or true
labels for the results of previous estimators in the chain.
Possible inputs for cv are:
- None, to use true labels when fitting,
- integer, to specify the number of folds in a (Stratified)KFold,
- :term:`CV splitter`,
- An iterable yielding (train, test) splits as arrays of indices.
random_state : int, RandomState instance or None, optional (default=None)
If ``order='random'``, determines random number generation for the
chain order.
In addition, it controls the random seed given at each `base_estimator`
at each chaining iteration. Thus, it is only used when `base_estimator`
exposes a `random_state`.
Pass an int for reproducible output across multiple function calls.
See :term:`Glossary <random_state>`.
Attributes
----------
estimators_ : list
A list of clones of base_estimator.
order_ : list
The order of labels in the classifier chain.
Examples
--------
>>> from sklearn.multioutput import RegressorChain
>>> from sklearn.linear_model import LogisticRegression
>>> logreg = LogisticRegression(solver='lbfgs',multi_class='multinomial')
>>> X, Y = [[1, 0], [0, 1], [1, 1]], [[0, 2], [1, 1], [2, 0]]
>>> chain = RegressorChain(base_estimator=logreg, order=[0, 1]).fit(X, Y)
>>> chain.predict(X)
array([[0., 2.],
[1., 1.],
[2., 0.]])
See Also
--------
ClassifierChain : Equivalent for classification.
MultioutputRegressor : Learns each output independently rather than
chaining.
"""
def fit(self, X, Y, **fit_params):
"""Fit the model to data matrix X and targets Y.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
The input data.
Y : array-like of shape (n_samples, n_classes)
The target values.
**fit_params : dict of string -> object
Parameters passed to the `fit` method at each step
of the regressor chain.
.. versionadded:: 0.23
Returns
-------
self : object
"""
super().fit(X, Y, **fit_params)
return self
def _more_tags(self):
return {'multioutput_only': True}
| bsd-3-clause |
aflaxman/scikit-learn | sklearn/cluster/k_means_.py | 6 | 60864 | """K-means clustering"""
# Authors: Gael Varoquaux <gael.varoquaux@normalesup.org>
# Thomas Rueckstiess <ruecksti@in.tum.de>
# James Bergstra <james.bergstra@umontreal.ca>
# Jan Schlueter <scikit-learn@jan-schlueter.de>
# Nelle Varoquaux
# Peter Prettenhofer <peter.prettenhofer@gmail.com>
# Olivier Grisel <olivier.grisel@ensta.org>
# Mathieu Blondel <mathieu@mblondel.org>
# Robert Layton <robertlayton@gmail.com>
# License: BSD 3 clause
import warnings
import numpy as np
import scipy.sparse as sp
from ..base import BaseEstimator, ClusterMixin, TransformerMixin
from ..metrics.pairwise import euclidean_distances
from ..metrics.pairwise import pairwise_distances_argmin_min
from ..utils.extmath import row_norms, squared_norm, stable_cumsum
from ..utils.sparsefuncs_fast import assign_rows_csr
from ..utils.sparsefuncs import mean_variance_axis
from ..utils import check_array
from ..utils import check_random_state
from ..utils import as_float_array
from ..utils import gen_batches
from ..utils.validation import check_is_fitted
from ..utils.validation import FLOAT_DTYPES
from ..externals.joblib import Parallel
from ..externals.joblib import delayed
from ..externals.six import string_types
from . import _k_means
from ._k_means_elkan import k_means_elkan
###############################################################################
# Initialization heuristic
def _k_init(X, n_clusters, x_squared_norms, random_state, n_local_trials=None):
"""Init n_clusters seeds according to k-means++
Parameters
-----------
X : array or sparse matrix, shape (n_samples, n_features)
The data to pick seeds for. To avoid memory copy, the input data
should be double precision (dtype=np.float64).
n_clusters : integer
The number of seeds to choose
x_squared_norms : array, shape (n_samples,)
Squared Euclidean norm of each data point.
random_state : numpy.RandomState
The generator used to initialize the centers.
n_local_trials : integer, optional
The number of seeding trials for each center (except the first),
of which the one reducing inertia the most is greedily chosen.
Set to None to make the number of trials depend logarithmically
on the number of seeds (2+log(k)); this is the default.
Notes
-----
Selects initial cluster centers for k-mean clustering in a smart way
to speed up convergence. see: Arthur, D. and Vassilvitskii, S.
"k-means++: the advantages of careful seeding". ACM-SIAM symposium
on Discrete algorithms. 2007
Version ported from http://www.stanford.edu/~darthur/kMeansppTest.zip,
which is the implementation used in the aforementioned paper.
"""
n_samples, n_features = X.shape
centers = np.empty((n_clusters, n_features), dtype=X.dtype)
assert x_squared_norms is not None, 'x_squared_norms None in _k_init'
# Set the number of local seeding trials if none is given
if n_local_trials is None:
# This is what Arthur/Vassilvitskii tried, but did not report
# specific results for other than mentioning in the conclusion
# that it helped.
n_local_trials = 2 + int(np.log(n_clusters))
# Pick first center randomly
center_id = random_state.randint(n_samples)
if sp.issparse(X):
centers[0] = X[center_id].toarray()
else:
centers[0] = X[center_id]
# Initialize list of closest distances and calculate current potential
closest_dist_sq = euclidean_distances(
centers[0, np.newaxis], X, Y_norm_squared=x_squared_norms,
squared=True)
current_pot = closest_dist_sq.sum()
# Pick the remaining n_clusters-1 points
for c in range(1, n_clusters):
# Choose center candidates by sampling with probability proportional
# to the squared distance to the closest existing center
rand_vals = random_state.random_sample(n_local_trials) * current_pot
candidate_ids = np.searchsorted(stable_cumsum(closest_dist_sq),
rand_vals)
# Compute distances to center candidates
distance_to_candidates = euclidean_distances(
X[candidate_ids], X, Y_norm_squared=x_squared_norms, squared=True)
# Decide which candidate is the best
best_candidate = None
best_pot = None
best_dist_sq = None
for trial in range(n_local_trials):
# Compute potential when including center candidate
new_dist_sq = np.minimum(closest_dist_sq,
distance_to_candidates[trial])
new_pot = new_dist_sq.sum()
# Store result if it is the best local trial so far
if (best_candidate is None) or (new_pot < best_pot):
best_candidate = candidate_ids[trial]
best_pot = new_pot
best_dist_sq = new_dist_sq
# Permanently add best center candidate found in local tries
if sp.issparse(X):
centers[c] = X[best_candidate].toarray()
else:
centers[c] = X[best_candidate]
current_pot = best_pot
closest_dist_sq = best_dist_sq
return centers
###############################################################################
# K-means batch estimation by EM (expectation maximization)
def _validate_center_shape(X, n_centers, centers):
"""Check if centers is compatible with X and n_centers"""
if len(centers) != n_centers:
raise ValueError('The shape of the initial centers (%s) '
'does not match the number of clusters %i'
% (centers.shape, n_centers))
if centers.shape[1] != X.shape[1]:
raise ValueError(
"The number of features of the initial centers %s "
"does not match the number of features of the data %s."
% (centers.shape[1], X.shape[1]))
def _tolerance(X, tol):
"""Return a tolerance which is independent of the dataset"""
if sp.issparse(X):
variances = mean_variance_axis(X, axis=0)[1]
else:
variances = np.var(X, axis=0)
return np.mean(variances) * tol
def k_means(X, n_clusters, init='k-means++', precompute_distances='auto',
n_init=10, max_iter=300, verbose=False,
tol=1e-4, random_state=None, copy_x=True, n_jobs=1,
algorithm="auto", return_n_iter=False):
"""K-means clustering algorithm.
Read more in the :ref:`User Guide <k_means>`.
Parameters
----------
X : array-like or sparse matrix, shape (n_samples, n_features)
The observations to cluster.
n_clusters : int
The number of clusters to form as well as the number of
centroids to generate.
init : {'k-means++', 'random', or ndarray, or a callable}, optional
Method for initialization, default to 'k-means++':
'k-means++' : selects initial cluster centers for k-mean
clustering in a smart way to speed up convergence. See section
Notes in k_init for more details.
'random': generate k centroids from a Gaussian with mean and
variance estimated from the data.
If an ndarray is passed, it should be of shape (n_clusters, n_features)
and gives the initial centers.
If a callable is passed, it should take arguments X, k and
and a random state and return an initialization.
precompute_distances : {'auto', True, False}
Precompute distances (faster but takes more memory).
'auto' : do not precompute distances if n_samples * n_clusters > 12
million. This corresponds to about 100MB overhead per job using
double precision.
True : always precompute distances
False : never precompute distances
n_init : int, optional, default: 10
Number of time the k-means algorithm will be run with different
centroid seeds. The final results will be the best output of
n_init consecutive runs in terms of inertia.
max_iter : int, optional, default 300
Maximum number of iterations of the k-means algorithm to run.
verbose : boolean, optional
Verbosity mode.
tol : float, optional
The relative increment in the results before declaring convergence.
random_state : int, RandomState instance or None, optional, default: None
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
copy_x : boolean, optional
When pre-computing distances it is more numerically accurate to center
the data first. If copy_x is True, then the original data is not
modified. If False, the original data is modified, and put back before
the function returns, but small numerical differences may be introduced
by subtracting and then adding the data mean.
n_jobs : int
The number of jobs to use for the computation. This works by computing
each of the n_init runs in parallel.
If -1 all CPUs are used. If 1 is given, no parallel computing code is
used at all, which is useful for debugging. For n_jobs below -1,
(n_cpus + 1 + n_jobs) are used. Thus for n_jobs = -2, all CPUs but one
are used.
algorithm : "auto", "full" or "elkan", default="auto"
K-means algorithm to use. The classical EM-style algorithm is "full".
The "elkan" variation is more efficient by using the triangle
inequality, but currently doesn't support sparse data. "auto" chooses
"elkan" for dense data and "full" for sparse data.
return_n_iter : bool, optional
Whether or not to return the number of iterations.
Returns
-------
centroid : float ndarray with shape (k, n_features)
Centroids found at the last iteration of k-means.
label : integer ndarray with shape (n_samples,)
label[i] is the code or index of the centroid the
i'th observation is closest to.
inertia : float
The final value of the inertia criterion (sum of squared distances to
the closest centroid for all observations in the training set).
best_n_iter : int
Number of iterations corresponding to the best results.
Returned only if `return_n_iter` is set to True.
"""
if n_init <= 0:
raise ValueError("Invalid number of initializations."
" n_init=%d must be bigger than zero." % n_init)
random_state = check_random_state(random_state)
if max_iter <= 0:
raise ValueError('Number of iterations should be a positive number,'
' got %d instead' % max_iter)
X = as_float_array(X, copy=copy_x)
tol = _tolerance(X, tol)
# If the distances are precomputed every job will create a matrix of shape
# (n_clusters, n_samples). To stop KMeans from eating up memory we only
# activate this if the created matrix is guaranteed to be under 100MB. 12
# million entries consume a little under 100MB if they are of type double.
if precompute_distances == 'auto':
n_samples = X.shape[0]
precompute_distances = (n_clusters * n_samples) < 12e6
elif isinstance(precompute_distances, bool):
pass
else:
raise ValueError("precompute_distances should be 'auto' or True/False"
", but a value of %r was passed" %
precompute_distances)
# Validate init array
if hasattr(init, '__array__'):
init = check_array(init, dtype=X.dtype.type, copy=True)
_validate_center_shape(X, n_clusters, init)
if n_init != 1:
warnings.warn(
'Explicit initial center position passed: '
'performing only one init in k-means instead of n_init=%d'
% n_init, RuntimeWarning, stacklevel=2)
n_init = 1
# subtract of mean of x for more accurate distance computations
if not sp.issparse(X):
X_mean = X.mean(axis=0)
# The copy was already done above
X -= X_mean
if hasattr(init, '__array__'):
init -= X_mean
# precompute squared norms of data points
x_squared_norms = row_norms(X, squared=True)
best_labels, best_inertia, best_centers = None, None, None
if n_clusters == 1:
# elkan doesn't make sense for a single cluster, full will produce
# the right result.
algorithm = "full"
if algorithm == "auto":
algorithm = "full" if sp.issparse(X) else 'elkan'
if algorithm == "full":
kmeans_single = _kmeans_single_lloyd
elif algorithm == "elkan":
kmeans_single = _kmeans_single_elkan
else:
raise ValueError("Algorithm must be 'auto', 'full' or 'elkan', got"
" %s" % str(algorithm))
if n_jobs == 1:
# For a single thread, less memory is needed if we just store one set
# of the best results (as opposed to one set per run per thread).
for it in range(n_init):
# run a k-means once
labels, inertia, centers, n_iter_ = kmeans_single(
X, n_clusters, max_iter=max_iter, init=init, verbose=verbose,
precompute_distances=precompute_distances, tol=tol,
x_squared_norms=x_squared_norms, random_state=random_state)
# determine if these results are the best so far
if best_inertia is None or inertia < best_inertia:
best_labels = labels.copy()
best_centers = centers.copy()
best_inertia = inertia
best_n_iter = n_iter_
else:
# parallelisation of k-means runs
seeds = random_state.randint(np.iinfo(np.int32).max, size=n_init)
results = Parallel(n_jobs=n_jobs, verbose=0)(
delayed(kmeans_single)(X, n_clusters, max_iter=max_iter, init=init,
verbose=verbose, tol=tol,
precompute_distances=precompute_distances,
x_squared_norms=x_squared_norms,
# Change seed to ensure variety
random_state=seed)
for seed in seeds)
# Get results with the lowest inertia
labels, inertia, centers, n_iters = zip(*results)
best = np.argmin(inertia)
best_labels = labels[best]
best_inertia = inertia[best]
best_centers = centers[best]
best_n_iter = n_iters[best]
if not sp.issparse(X):
if not copy_x:
X += X_mean
best_centers += X_mean
if return_n_iter:
return best_centers, best_labels, best_inertia, best_n_iter
else:
return best_centers, best_labels, best_inertia
def _kmeans_single_elkan(X, n_clusters, max_iter=300, init='k-means++',
verbose=False, x_squared_norms=None,
random_state=None, tol=1e-4,
precompute_distances=True):
if sp.issparse(X):
raise ValueError("algorithm='elkan' not supported for sparse input X")
X = check_array(X, order="C")
random_state = check_random_state(random_state)
if x_squared_norms is None:
x_squared_norms = row_norms(X, squared=True)
# init
centers = _init_centroids(X, n_clusters, init, random_state=random_state,
x_squared_norms=x_squared_norms)
centers = np.ascontiguousarray(centers)
if verbose:
print('Initialization complete')
centers, labels, n_iter = k_means_elkan(X, n_clusters, centers, tol=tol,
max_iter=max_iter, verbose=verbose)
inertia = np.sum((X - centers[labels]) ** 2, dtype=np.float64)
return labels, inertia, centers, n_iter
def _kmeans_single_lloyd(X, n_clusters, max_iter=300, init='k-means++',
verbose=False, x_squared_norms=None,
random_state=None, tol=1e-4,
precompute_distances=True):
"""A single run of k-means, assumes preparation completed prior.
Parameters
----------
X : array-like of floats, shape (n_samples, n_features)
The observations to cluster.
n_clusters : int
The number of clusters to form as well as the number of
centroids to generate.
max_iter : int, optional, default 300
Maximum number of iterations of the k-means algorithm to run.
init : {'k-means++', 'random', or ndarray, or a callable}, optional
Method for initialization, default to 'k-means++':
'k-means++' : selects initial cluster centers for k-mean
clustering in a smart way to speed up convergence. See section
Notes in k_init for more details.
'random': generate k centroids from a Gaussian with mean and
variance estimated from the data.
If an ndarray is passed, it should be of shape (k, p) and gives
the initial centers.
If a callable is passed, it should take arguments X, k and
and a random state and return an initialization.
tol : float, optional
The relative increment in the results before declaring convergence.
verbose : boolean, optional
Verbosity mode
x_squared_norms : array
Precomputed x_squared_norms.
precompute_distances : boolean, default: True
Precompute distances (faster but takes more memory).
random_state : int, RandomState instance or None, optional, default: None
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
centroid : float ndarray with shape (k, n_features)
Centroids found at the last iteration of k-means.
label : integer ndarray with shape (n_samples,)
label[i] is the code or index of the centroid the
i'th observation is closest to.
inertia : float
The final value of the inertia criterion (sum of squared distances to
the closest centroid for all observations in the training set).
n_iter : int
Number of iterations run.
"""
random_state = check_random_state(random_state)
best_labels, best_inertia, best_centers = None, None, None
# init
centers = _init_centroids(X, n_clusters, init, random_state=random_state,
x_squared_norms=x_squared_norms)
if verbose:
print("Initialization complete")
# Allocate memory to store the distances for each sample to its
# closer center for reallocation in case of ties
distances = np.zeros(shape=(X.shape[0],), dtype=X.dtype)
# iterations
for i in range(max_iter):
centers_old = centers.copy()
# labels assignment is also called the E-step of EM
labels, inertia = \
_labels_inertia(X, x_squared_norms, centers,
precompute_distances=precompute_distances,
distances=distances)
# computation of the means is also called the M-step of EM
if sp.issparse(X):
centers = _k_means._centers_sparse(X, labels, n_clusters,
distances)
else:
centers = _k_means._centers_dense(X, labels, n_clusters, distances)
if verbose:
print("Iteration %2d, inertia %.3f" % (i, inertia))
if best_inertia is None or inertia < best_inertia:
best_labels = labels.copy()
best_centers = centers.copy()
best_inertia = inertia
center_shift_total = squared_norm(centers_old - centers)
if center_shift_total <= tol:
if verbose:
print("Converged at iteration %d: "
"center shift %e within tolerance %e"
% (i, center_shift_total, tol))
break
if center_shift_total > 0:
# rerun E-step in case of non-convergence so that predicted labels
# match cluster centers
best_labels, best_inertia = \
_labels_inertia(X, x_squared_norms, best_centers,
precompute_distances=precompute_distances,
distances=distances)
return best_labels, best_inertia, best_centers, i + 1
def _labels_inertia_precompute_dense(X, x_squared_norms, centers, distances):
"""Compute labels and inertia using a full distance matrix.
This will overwrite the 'distances' array in-place.
Parameters
----------
X : numpy array, shape (n_sample, n_features)
Input data.
x_squared_norms : numpy array, shape (n_samples,)
Precomputed squared norms of X.
centers : numpy array, shape (n_clusters, n_features)
Cluster centers which data is assigned to.
distances : numpy array, shape (n_samples,)
Pre-allocated array in which distances are stored.
Returns
-------
labels : numpy array, dtype=np.int, shape (n_samples,)
Indices of clusters that samples are assigned to.
inertia : float
Sum of distances of samples to their closest cluster center.
"""
n_samples = X.shape[0]
# Breakup nearest neighbor distance computation into batches to prevent
# memory blowup in the case of a large number of samples and clusters.
# TODO: Once PR #7383 is merged use check_inputs=False in metric_kwargs.
labels, mindist = pairwise_distances_argmin_min(
X=X, Y=centers, metric='euclidean', metric_kwargs={'squared': True})
# cython k-means code assumes int32 inputs
labels = labels.astype(np.int32)
if n_samples == distances.shape[0]:
# distances will be changed in-place
distances[:] = mindist
inertia = mindist.sum()
return labels, inertia
def _labels_inertia(X, x_squared_norms, centers,
precompute_distances=True, distances=None):
"""E step of the K-means EM algorithm.
Compute the labels and the inertia of the given samples and centers.
This will compute the distances in-place.
Parameters
----------
X : float64 array-like or CSR sparse matrix, shape (n_samples, n_features)
The input samples to assign to the labels.
x_squared_norms : array, shape (n_samples,)
Precomputed squared euclidean norm of each data point, to speed up
computations.
centers : float array, shape (k, n_features)
The cluster centers.
precompute_distances : boolean, default: True
Precompute distances (faster but takes more memory).
distances : float array, shape (n_samples,)
Pre-allocated array to be filled in with each sample's distance
to the closest center.
Returns
-------
labels : int array of shape(n)
The resulting assignment
inertia : float
Sum of distances of samples to their closest cluster center.
"""
n_samples = X.shape[0]
# set the default value of centers to -1 to be able to detect any anomaly
# easily
labels = -np.ones(n_samples, np.int32)
if distances is None:
distances = np.zeros(shape=(0,), dtype=X.dtype)
# distances will be changed in-place
if sp.issparse(X):
inertia = _k_means._assign_labels_csr(
X, x_squared_norms, centers, labels, distances=distances)
else:
if precompute_distances:
return _labels_inertia_precompute_dense(X, x_squared_norms,
centers, distances)
inertia = _k_means._assign_labels_array(
X, x_squared_norms, centers, labels, distances=distances)
return labels, inertia
def _init_centroids(X, k, init, random_state=None, x_squared_norms=None,
init_size=None):
"""Compute the initial centroids
Parameters
----------
X : array, shape (n_samples, n_features)
k : int
number of centroids
init : {'k-means++', 'random' or ndarray or callable} optional
Method for initialization
random_state : int, RandomState instance or None, optional, default: None
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
x_squared_norms : array, shape (n_samples,), optional
Squared euclidean norm of each data point. Pass it if you have it at
hands already to avoid it being recomputed here. Default: None
init_size : int, optional
Number of samples to randomly sample for speeding up the
initialization (sometimes at the expense of accuracy): the
only algorithm is initialized by running a batch KMeans on a
random subset of the data. This needs to be larger than k.
Returns
-------
centers : array, shape(k, n_features)
"""
random_state = check_random_state(random_state)
n_samples = X.shape[0]
if x_squared_norms is None:
x_squared_norms = row_norms(X, squared=True)
if init_size is not None and init_size < n_samples:
if init_size < k:
warnings.warn(
"init_size=%d should be larger than k=%d. "
"Setting it to 3*k" % (init_size, k),
RuntimeWarning, stacklevel=2)
init_size = 3 * k
init_indices = random_state.randint(0, n_samples, init_size)
X = X[init_indices]
x_squared_norms = x_squared_norms[init_indices]
n_samples = X.shape[0]
elif n_samples < k:
raise ValueError(
"n_samples=%d should be larger than k=%d" % (n_samples, k))
if isinstance(init, string_types) and init == 'k-means++':
centers = _k_init(X, k, random_state=random_state,
x_squared_norms=x_squared_norms)
elif isinstance(init, string_types) and init == 'random':
seeds = random_state.permutation(n_samples)[:k]
centers = X[seeds]
elif hasattr(init, '__array__'):
# ensure that the centers have the same dtype as X
# this is a requirement of fused types of cython
centers = np.array(init, dtype=X.dtype)
elif callable(init):
centers = init(X, k, random_state=random_state)
centers = np.asarray(centers, dtype=X.dtype)
else:
raise ValueError("the init parameter for the k-means should "
"be 'k-means++' or 'random' or an ndarray, "
"'%s' (type '%s') was passed." % (init, type(init)))
if sp.issparse(centers):
centers = centers.toarray()
_validate_center_shape(X, k, centers)
return centers
class KMeans(BaseEstimator, ClusterMixin, TransformerMixin):
"""K-Means clustering
Read more in the :ref:`User Guide <k_means>`.
Parameters
----------
n_clusters : int, optional, default: 8
The number of clusters to form as well as the number of
centroids to generate.
init : {'k-means++', 'random' or an ndarray}
Method for initialization, defaults to 'k-means++':
'k-means++' : selects initial cluster centers for k-mean
clustering in a smart way to speed up convergence. See section
Notes in k_init for more details.
'random': choose k observations (rows) at random from data for
the initial centroids.
If an ndarray is passed, it should be of shape (n_clusters, n_features)
and gives the initial centers.
n_init : int, default: 10
Number of time the k-means algorithm will be run with different
centroid seeds. The final results will be the best output of
n_init consecutive runs in terms of inertia.
max_iter : int, default: 300
Maximum number of iterations of the k-means algorithm for a
single run.
tol : float, default: 1e-4
Relative tolerance with regards to inertia to declare convergence
precompute_distances : {'auto', True, False}
Precompute distances (faster but takes more memory).
'auto' : do not precompute distances if n_samples * n_clusters > 12
million. This corresponds to about 100MB overhead per job using
double precision.
True : always precompute distances
False : never precompute distances
verbose : int, default 0
Verbosity mode.
random_state : int, RandomState instance or None, optional, default: None
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
copy_x : boolean, default True
When pre-computing distances it is more numerically accurate to center
the data first. If copy_x is True, then the original data is not
modified. If False, the original data is modified, and put back before
the function returns, but small numerical differences may be introduced
by subtracting and then adding the data mean.
n_jobs : int
The number of jobs to use for the computation. This works by computing
each of the n_init runs in parallel.
If -1 all CPUs are used. If 1 is given, no parallel computing code is
used at all, which is useful for debugging. For n_jobs below -1,
(n_cpus + 1 + n_jobs) are used. Thus for n_jobs = -2, all CPUs but one
are used.
algorithm : "auto", "full" or "elkan", default="auto"
K-means algorithm to use. The classical EM-style algorithm is "full".
The "elkan" variation is more efficient by using the triangle
inequality, but currently doesn't support sparse data. "auto" chooses
"elkan" for dense data and "full" for sparse data.
Attributes
----------
cluster_centers_ : array, [n_clusters, n_features]
Coordinates of cluster centers
labels_ :
Labels of each point
inertia_ : float
Sum of distances of samples to their closest cluster center.
Examples
--------
>>> from sklearn.cluster import KMeans
>>> import numpy as np
>>> X = np.array([[1, 2], [1, 4], [1, 0],
... [4, 2], [4, 4], [4, 0]])
>>> kmeans = KMeans(n_clusters=2, random_state=0).fit(X)
>>> kmeans.labels_
array([0, 0, 0, 1, 1, 1], dtype=int32)
>>> kmeans.predict([[0, 0], [4, 4]])
array([0, 1], dtype=int32)
>>> kmeans.cluster_centers_
array([[ 1., 2.],
[ 4., 2.]])
See also
--------
MiniBatchKMeans
Alternative online implementation that does incremental updates
of the centers positions using mini-batches.
For large scale learning (say n_samples > 10k) MiniBatchKMeans is
probably much faster than the default batch implementation.
Notes
------
The k-means problem is solved using Lloyd's algorithm.
The average complexity is given by O(k n T), were n is the number of
samples and T is the number of iteration.
The worst case complexity is given by O(n^(k+2/p)) with
n = n_samples, p = n_features. (D. Arthur and S. Vassilvitskii,
'How slow is the k-means method?' SoCG2006)
In practice, the k-means algorithm is very fast (one of the fastest
clustering algorithms available), but it falls in local minima. That's why
it can be useful to restart it several times.
"""
def __init__(self, n_clusters=8, init='k-means++', n_init=10,
max_iter=300, tol=1e-4, precompute_distances='auto',
verbose=0, random_state=None, copy_x=True,
n_jobs=1, algorithm='auto'):
self.n_clusters = n_clusters
self.init = init
self.max_iter = max_iter
self.tol = tol
self.precompute_distances = precompute_distances
self.n_init = n_init
self.verbose = verbose
self.random_state = random_state
self.copy_x = copy_x
self.n_jobs = n_jobs
self.algorithm = algorithm
def _check_fit_data(self, X):
"""Verify that the number of samples given is larger than k"""
X = check_array(X, accept_sparse='csr', dtype=[np.float64, np.float32])
if X.shape[0] < self.n_clusters:
raise ValueError("n_samples=%d should be >= n_clusters=%d" % (
X.shape[0], self.n_clusters))
return X
def _check_test_data(self, X):
X = check_array(X, accept_sparse='csr', dtype=FLOAT_DTYPES)
n_samples, n_features = X.shape
expected_n_features = self.cluster_centers_.shape[1]
if not n_features == expected_n_features:
raise ValueError("Incorrect number of features. "
"Got %d features, expected %d" % (
n_features, expected_n_features))
return X
def fit(self, X, y=None):
"""Compute k-means clustering.
Parameters
----------
X : array-like or sparse matrix, shape=(n_samples, n_features)
Training instances to cluster.
y : Ignored
"""
random_state = check_random_state(self.random_state)
X = self._check_fit_data(X)
self.cluster_centers_, self.labels_, self.inertia_, self.n_iter_ = \
k_means(
X, n_clusters=self.n_clusters, init=self.init,
n_init=self.n_init, max_iter=self.max_iter, verbose=self.verbose,
precompute_distances=self.precompute_distances,
tol=self.tol, random_state=random_state, copy_x=self.copy_x,
n_jobs=self.n_jobs, algorithm=self.algorithm,
return_n_iter=True)
return self
def fit_predict(self, X, y=None):
"""Compute cluster centers and predict cluster index for each sample.
Convenience method; equivalent to calling fit(X) followed by
predict(X).
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
New data to transform.
u : Ignored
Returns
-------
labels : array, shape [n_samples,]
Index of the cluster each sample belongs to.
"""
return self.fit(X).labels_
def fit_transform(self, X, y=None):
"""Compute clustering and transform X to cluster-distance space.
Equivalent to fit(X).transform(X), but more efficiently implemented.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
New data to transform.
y : Ignored
Returns
-------
X_new : array, shape [n_samples, k]
X transformed in the new space.
"""
# Currently, this just skips a copy of the data if it is not in
# np.array or CSR format already.
# XXX This skips _check_test_data, which may change the dtype;
# we should refactor the input validation.
X = self._check_fit_data(X)
return self.fit(X)._transform(X)
def transform(self, X):
"""Transform X to a cluster-distance space.
In the new space, each dimension is the distance to the cluster
centers. Note that even if X is sparse, the array returned by
`transform` will typically be dense.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
New data to transform.
Returns
-------
X_new : array, shape [n_samples, k]
X transformed in the new space.
"""
check_is_fitted(self, 'cluster_centers_')
X = self._check_test_data(X)
return self._transform(X)
def _transform(self, X):
"""guts of transform method; no input validation"""
return euclidean_distances(X, self.cluster_centers_)
def predict(self, X):
"""Predict the closest cluster each sample in X belongs to.
In the vector quantization literature, `cluster_centers_` is called
the code book and each value returned by `predict` is the index of
the closest code in the code book.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
New data to predict.
Returns
-------
labels : array, shape [n_samples,]
Index of the cluster each sample belongs to.
"""
check_is_fitted(self, 'cluster_centers_')
X = self._check_test_data(X)
x_squared_norms = row_norms(X, squared=True)
return _labels_inertia(X, x_squared_norms, self.cluster_centers_)[0]
def score(self, X, y=None):
"""Opposite of the value of X on the K-means objective.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
New data.
y : Ignored
Returns
-------
score : float
Opposite of the value of X on the K-means objective.
"""
check_is_fitted(self, 'cluster_centers_')
X = self._check_test_data(X)
x_squared_norms = row_norms(X, squared=True)
return -_labels_inertia(X, x_squared_norms, self.cluster_centers_)[1]
def _mini_batch_step(X, x_squared_norms, centers, counts,
old_center_buffer, compute_squared_diff,
distances, random_reassign=False,
random_state=None, reassignment_ratio=.01,
verbose=False):
"""Incremental update of the centers for the Minibatch K-Means algorithm.
Parameters
----------
X : array, shape (n_samples, n_features)
The original data array.
x_squared_norms : array, shape (n_samples,)
Squared euclidean norm of each data point.
centers : array, shape (k, n_features)
The cluster centers. This array is MODIFIED IN PLACE
counts : array, shape (k,)
The vector in which we keep track of the numbers of elements in a
cluster. This array is MODIFIED IN PLACE
distances : array, dtype float, shape (n_samples), optional
If not None, should be a pre-allocated array that will be used to store
the distances of each sample to its closest center.
May not be None when random_reassign is True.
random_state : int, RandomState instance or None, optional, default: None
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
random_reassign : boolean, optional
If True, centers with very low counts are randomly reassigned
to observations.
reassignment_ratio : float, optional
Control the fraction of the maximum number of counts for a
center to be reassigned. A higher value means that low count
centers are more likely to be reassigned, which means that the
model will take longer to converge, but should converge in a
better clustering.
verbose : bool, optional, default False
Controls the verbosity.
compute_squared_diff : bool
If set to False, the squared diff computation is skipped.
old_center_buffer : int
Copy of old centers for monitoring convergence.
Returns
-------
inertia : float
Sum of distances of samples to their closest cluster center.
squared_diff : numpy array, shape (n_clusters,)
Squared distances between previous and updated cluster centers.
"""
# Perform label assignment to nearest centers
nearest_center, inertia = _labels_inertia(X, x_squared_norms, centers,
distances=distances)
if random_reassign and reassignment_ratio > 0:
random_state = check_random_state(random_state)
# Reassign clusters that have very low counts
to_reassign = counts < reassignment_ratio * counts.max()
# pick at most .5 * batch_size samples as new centers
if to_reassign.sum() > .5 * X.shape[0]:
indices_dont_reassign = np.argsort(counts)[int(.5 * X.shape[0]):]
to_reassign[indices_dont_reassign] = False
n_reassigns = to_reassign.sum()
if n_reassigns:
# Pick new clusters amongst observations with uniform probability
new_centers = random_state.choice(X.shape[0], replace=False,
size=n_reassigns)
if verbose:
print("[MiniBatchKMeans] Reassigning %i cluster centers."
% n_reassigns)
if sp.issparse(X) and not sp.issparse(centers):
assign_rows_csr(X, new_centers.astype(np.intp),
np.where(to_reassign)[0].astype(np.intp),
centers)
else:
centers[to_reassign] = X[new_centers]
# reset counts of reassigned centers, but don't reset them too small
# to avoid instant reassignment. This is a pretty dirty hack as it
# also modifies the learning rates.
counts[to_reassign] = np.min(counts[~to_reassign])
# implementation for the sparse CSR representation completely written in
# cython
if sp.issparse(X):
return inertia, _k_means._mini_batch_update_csr(
X, x_squared_norms, centers, counts, nearest_center,
old_center_buffer, compute_squared_diff)
# dense variant in mostly numpy (not as memory efficient though)
k = centers.shape[0]
squared_diff = 0.0
for center_idx in range(k):
# find points from minibatch that are assigned to this center
center_mask = nearest_center == center_idx
count = center_mask.sum()
if count > 0:
if compute_squared_diff:
old_center_buffer[:] = centers[center_idx]
# inplace remove previous count scaling
centers[center_idx] *= counts[center_idx]
# inplace sum with new points members of this cluster
centers[center_idx] += np.sum(X[center_mask], axis=0)
# update the count statistics for this center
counts[center_idx] += count
# inplace rescale to compute mean of all points (old and new)
# Note: numpy >= 1.10 does not support '/=' for the following
# expression for a mixture of int and float (see numpy issue #6464)
centers[center_idx] = centers[center_idx] / counts[center_idx]
# update the squared diff if necessary
if compute_squared_diff:
diff = centers[center_idx].ravel() - old_center_buffer.ravel()
squared_diff += np.dot(diff, diff)
return inertia, squared_diff
def _mini_batch_convergence(model, iteration_idx, n_iter, tol,
n_samples, centers_squared_diff, batch_inertia,
context, verbose=0):
"""Helper function to encapsulate the early stopping logic"""
# Normalize inertia to be able to compare values when
# batch_size changes
batch_inertia /= model.batch_size
centers_squared_diff /= model.batch_size
# Compute an Exponentially Weighted Average of the squared
# diff to monitor the convergence while discarding
# minibatch-local stochastic variability:
# https://en.wikipedia.org/wiki/Moving_average
ewa_diff = context.get('ewa_diff')
ewa_inertia = context.get('ewa_inertia')
if ewa_diff is None:
ewa_diff = centers_squared_diff
ewa_inertia = batch_inertia
else:
alpha = float(model.batch_size) * 2.0 / (n_samples + 1)
alpha = 1.0 if alpha > 1.0 else alpha
ewa_diff = ewa_diff * (1 - alpha) + centers_squared_diff * alpha
ewa_inertia = ewa_inertia * (1 - alpha) + batch_inertia * alpha
# Log progress to be able to monitor convergence
if verbose:
progress_msg = (
'Minibatch iteration %d/%d:'
' mean batch inertia: %f, ewa inertia: %f ' % (
iteration_idx + 1, n_iter, batch_inertia,
ewa_inertia))
print(progress_msg)
# Early stopping based on absolute tolerance on squared change of
# centers position (using EWA smoothing)
if tol > 0.0 and ewa_diff <= tol:
if verbose:
print('Converged (small centers change) at iteration %d/%d'
% (iteration_idx + 1, n_iter))
return True
# Early stopping heuristic due to lack of improvement on smoothed inertia
ewa_inertia_min = context.get('ewa_inertia_min')
no_improvement = context.get('no_improvement', 0)
if ewa_inertia_min is None or ewa_inertia < ewa_inertia_min:
no_improvement = 0
ewa_inertia_min = ewa_inertia
else:
no_improvement += 1
if (model.max_no_improvement is not None
and no_improvement >= model.max_no_improvement):
if verbose:
print('Converged (lack of improvement in inertia)'
' at iteration %d/%d'
% (iteration_idx + 1, n_iter))
return True
# update the convergence context to maintain state across successive calls:
context['ewa_diff'] = ewa_diff
context['ewa_inertia'] = ewa_inertia
context['ewa_inertia_min'] = ewa_inertia_min
context['no_improvement'] = no_improvement
return False
class MiniBatchKMeans(KMeans):
"""Mini-Batch K-Means clustering
Read more in the :ref:`User Guide <mini_batch_kmeans>`.
Parameters
----------
n_clusters : int, optional, default: 8
The number of clusters to form as well as the number of
centroids to generate.
init : {'k-means++', 'random' or an ndarray}, default: 'k-means++'
Method for initialization, defaults to 'k-means++':
'k-means++' : selects initial cluster centers for k-mean
clustering in a smart way to speed up convergence. See section
Notes in k_init for more details.
'random': choose k observations (rows) at random from data for
the initial centroids.
If an ndarray is passed, it should be of shape (n_clusters, n_features)
and gives the initial centers.
max_iter : int, optional
Maximum number of iterations over the complete dataset before
stopping independently of any early stopping criterion heuristics.
batch_size : int, optional, default: 100
Size of the mini batches.
verbose : boolean, optional
Verbosity mode.
compute_labels : boolean, default=True
Compute label assignment and inertia for the complete dataset
once the minibatch optimization has converged in fit.
random_state : int, RandomState instance or None, optional, default: None
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
tol : float, default: 0.0
Control early stopping based on the relative center changes as
measured by a smoothed, variance-normalized of the mean center
squared position changes. This early stopping heuristics is
closer to the one used for the batch variant of the algorithms
but induces a slight computational and memory overhead over the
inertia heuristic.
To disable convergence detection based on normalized center
change, set tol to 0.0 (default).
max_no_improvement : int, default: 10
Control early stopping based on the consecutive number of mini
batches that does not yield an improvement on the smoothed inertia.
To disable convergence detection based on inertia, set
max_no_improvement to None.
init_size : int, optional, default: 3 * batch_size
Number of samples to randomly sample for speeding up the
initialization (sometimes at the expense of accuracy): the
only algorithm is initialized by running a batch KMeans on a
random subset of the data. This needs to be larger than n_clusters.
n_init : int, default=3
Number of random initializations that are tried.
In contrast to KMeans, the algorithm is only run once, using the
best of the ``n_init`` initializations as measured by inertia.
reassignment_ratio : float, default: 0.01
Control the fraction of the maximum number of counts for a
center to be reassigned. A higher value means that low count
centers are more easily reassigned, which means that the
model will take longer to converge, but should converge in a
better clustering.
Attributes
----------
cluster_centers_ : array, [n_clusters, n_features]
Coordinates of cluster centers
labels_ :
Labels of each point (if compute_labels is set to True).
inertia_ : float
The value of the inertia criterion associated with the chosen
partition (if compute_labels is set to True). The inertia is
defined as the sum of square distances of samples to their nearest
neighbor.
See also
--------
KMeans
The classic implementation of the clustering method based on the
Lloyd's algorithm. It consumes the whole set of input data at each
iteration.
Notes
-----
See http://www.eecs.tufts.edu/~dsculley/papers/fastkmeans.pdf
"""
def __init__(self, n_clusters=8, init='k-means++', max_iter=100,
batch_size=100, verbose=0, compute_labels=True,
random_state=None, tol=0.0, max_no_improvement=10,
init_size=None, n_init=3, reassignment_ratio=0.01):
super(MiniBatchKMeans, self).__init__(
n_clusters=n_clusters, init=init, max_iter=max_iter,
verbose=verbose, random_state=random_state, tol=tol, n_init=n_init)
self.max_no_improvement = max_no_improvement
self.batch_size = batch_size
self.compute_labels = compute_labels
self.init_size = init_size
self.reassignment_ratio = reassignment_ratio
def fit(self, X, y=None):
"""Compute the centroids on X by chunking it into mini-batches.
Parameters
----------
X : array-like or sparse matrix, shape=(n_samples, n_features)
Training instances to cluster.
y : Ignored
"""
random_state = check_random_state(self.random_state)
X = check_array(X, accept_sparse="csr", order='C',
dtype=[np.float64, np.float32])
n_samples, n_features = X.shape
if n_samples < self.n_clusters:
raise ValueError("Number of samples smaller than number "
"of clusters.")
n_init = self.n_init
if hasattr(self.init, '__array__'):
self.init = np.ascontiguousarray(self.init, dtype=X.dtype)
if n_init != 1:
warnings.warn(
'Explicit initial center position passed: '
'performing only one init in MiniBatchKMeans instead of '
'n_init=%d'
% self.n_init, RuntimeWarning, stacklevel=2)
n_init = 1
x_squared_norms = row_norms(X, squared=True)
if self.tol > 0.0:
tol = _tolerance(X, self.tol)
# using tol-based early stopping needs the allocation of a
# dedicated before which can be expensive for high dim data:
# hence we allocate it outside of the main loop
old_center_buffer = np.zeros(n_features, dtype=X.dtype)
else:
tol = 0.0
# no need for the center buffer if tol-based early stopping is
# disabled
old_center_buffer = np.zeros(0, dtype=X.dtype)
distances = np.zeros(self.batch_size, dtype=X.dtype)
n_batches = int(np.ceil(float(n_samples) / self.batch_size))
n_iter = int(self.max_iter * n_batches)
init_size = self.init_size
if init_size is None:
init_size = 3 * self.batch_size
if init_size > n_samples:
init_size = n_samples
self.init_size_ = init_size
validation_indices = random_state.randint(0, n_samples, init_size)
X_valid = X[validation_indices]
x_squared_norms_valid = x_squared_norms[validation_indices]
# perform several inits with random sub-sets
best_inertia = None
for init_idx in range(n_init):
if self.verbose:
print("Init %d/%d with method: %s"
% (init_idx + 1, n_init, self.init))
counts = np.zeros(self.n_clusters, dtype=np.int32)
# TODO: once the `k_means` function works with sparse input we
# should refactor the following init to use it instead.
# Initialize the centers using only a fraction of the data as we
# expect n_samples to be very large when using MiniBatchKMeans
cluster_centers = _init_centroids(
X, self.n_clusters, self.init,
random_state=random_state,
x_squared_norms=x_squared_norms,
init_size=init_size)
# Compute the label assignment on the init dataset
batch_inertia, centers_squared_diff = _mini_batch_step(
X_valid, x_squared_norms[validation_indices],
cluster_centers, counts, old_center_buffer, False,
distances=None, verbose=self.verbose)
# Keep only the best cluster centers across independent inits on
# the common validation set
_, inertia = _labels_inertia(X_valid, x_squared_norms_valid,
cluster_centers)
if self.verbose:
print("Inertia for init %d/%d: %f"
% (init_idx + 1, n_init, inertia))
if best_inertia is None or inertia < best_inertia:
self.cluster_centers_ = cluster_centers
self.counts_ = counts
best_inertia = inertia
# Empty context to be used inplace by the convergence check routine
convergence_context = {}
# Perform the iterative optimization until the final convergence
# criterion
for iteration_idx in range(n_iter):
# Sample a minibatch from the full dataset
minibatch_indices = random_state.randint(
0, n_samples, self.batch_size)
# Perform the actual update step on the minibatch data
batch_inertia, centers_squared_diff = _mini_batch_step(
X[minibatch_indices], x_squared_norms[minibatch_indices],
self.cluster_centers_, self.counts_,
old_center_buffer, tol > 0.0, distances=distances,
# Here we randomly choose whether to perform
# random reassignment: the choice is done as a function
# of the iteration index, and the minimum number of
# counts, in order to force this reassignment to happen
# every once in a while
random_reassign=((iteration_idx + 1)
% (10 + self.counts_.min()) == 0),
random_state=random_state,
reassignment_ratio=self.reassignment_ratio,
verbose=self.verbose)
# Monitor convergence and do early stopping if necessary
if _mini_batch_convergence(
self, iteration_idx, n_iter, tol, n_samples,
centers_squared_diff, batch_inertia, convergence_context,
verbose=self.verbose):
break
self.n_iter_ = iteration_idx + 1
if self.compute_labels:
self.labels_, self.inertia_ = self._labels_inertia_minibatch(X)
return self
def _labels_inertia_minibatch(self, X):
"""Compute labels and inertia using mini batches.
This is slightly slower than doing everything at once but preventes
memory errors / segfaults.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Input data.
Returns
-------
labels : array, shap (n_samples,)
Cluster labels for each point.
inertia : float
Sum of squared distances of points to nearest cluster.
"""
if self.verbose:
print('Computing label assignment and total inertia')
x_squared_norms = row_norms(X, squared=True)
slices = gen_batches(X.shape[0], self.batch_size)
results = [_labels_inertia(X[s], x_squared_norms[s],
self.cluster_centers_) for s in slices]
labels, inertia = zip(*results)
return np.hstack(labels), np.sum(inertia)
def partial_fit(self, X, y=None):
"""Update k means estimate on a single mini-batch X.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Coordinates of the data points to cluster.
y : Ignored
"""
X = check_array(X, accept_sparse="csr")
n_samples, n_features = X.shape
if hasattr(self.init, '__array__'):
self.init = np.ascontiguousarray(self.init, dtype=X.dtype)
if n_samples == 0:
return self
x_squared_norms = row_norms(X, squared=True)
self.random_state_ = getattr(self, "random_state_",
check_random_state(self.random_state))
if (not hasattr(self, 'counts_')
or not hasattr(self, 'cluster_centers_')):
# this is the first call partial_fit on this object:
# initialize the cluster centers
self.cluster_centers_ = _init_centroids(
X, self.n_clusters, self.init,
random_state=self.random_state_,
x_squared_norms=x_squared_norms, init_size=self.init_size)
self.counts_ = np.zeros(self.n_clusters, dtype=np.int32)
random_reassign = False
distances = None
else:
# The lower the minimum count is, the more we do random
# reassignment, however, we don't want to do random
# reassignment too often, to allow for building up counts
random_reassign = self.random_state_.randint(
10 * (1 + self.counts_.min())) == 0
distances = np.zeros(X.shape[0], dtype=X.dtype)
_mini_batch_step(X, x_squared_norms, self.cluster_centers_,
self.counts_, np.zeros(0, dtype=X.dtype), 0,
random_reassign=random_reassign, distances=distances,
random_state=self.random_state_,
reassignment_ratio=self.reassignment_ratio,
verbose=self.verbose)
if self.compute_labels:
self.labels_, self.inertia_ = _labels_inertia(
X, x_squared_norms, self.cluster_centers_)
return self
def predict(self, X):
"""Predict the closest cluster each sample in X belongs to.
In the vector quantization literature, `cluster_centers_` is called
the code book and each value returned by `predict` is the index of
the closest code in the code book.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
New data to predict.
Returns
-------
labels : array, shape [n_samples,]
Index of the cluster each sample belongs to.
"""
check_is_fitted(self, 'cluster_centers_')
X = self._check_test_data(X)
return self._labels_inertia_minibatch(X)[0]
| bsd-3-clause |
crichardson17/starburst_atlas | Low_resolution_sims/Dusty_LowRes/Padova_cont/padova_cont_0/fullgrid/peaks_reader.py | 32 | 5021 | import csv
import matplotlib.pyplot as plt
from numpy import *
import scipy.interpolate
import math
from pylab import *
from matplotlib.ticker import MultipleLocator, FormatStrFormatter
import matplotlib.patches as patches
from matplotlib.path import Path
import os
# ---------------------------------------------------
#inputs
for file in os.listdir('.'):
if file.endswith("1.grd"):
gridfile1 = file
for file in os.listdir('.'):
if file.endswith("2.grd"):
gridfile2 = file
for file in os.listdir('.'):
if file.endswith("3.grd"):
gridfile3 = file
# ------------------------
for file in os.listdir('.'):
if file.endswith("1.txt"):
Elines1 = file
for file in os.listdir('.'):
if file.endswith("2.txt"):
Elines2 = file
for file in os.listdir('.'):
if file.endswith("3.txt"):
Elines3 = file
# ---------------------------------------------------
# ---------------------------------------------------
#this is where the grid information (phi and hdens) is read in and saved to grid.
grid1 = [];
grid2 = [];
grid3 = [];
with open(gridfile1, 'rb') as f:
csvReader = csv.reader(f,delimiter='\t')
for row in csvReader:
grid1.append(row);
grid1 = asarray(grid1)
with open(gridfile2, 'rb') as f:
csvReader = csv.reader(f,delimiter='\t')
for row in csvReader:
grid2.append(row);
grid2 = asarray(grid2)
with open(gridfile3, 'rb') as f:
csvReader = csv.reader(f,delimiter='\t')
for row in csvReader:
grid3.append(row);
grid3 = asarray(grid3)
#here is where the data for each line is read in and saved to dataEmissionlines
dataEmissionlines1 = [];
dataEmissionlines2 = [];
dataEmissionlines3 = [];
with open(Elines1, 'rb') as f:
csvReader = csv.reader(f,delimiter='\t')
headers = csvReader.next()
for row in csvReader:
dataEmissionlines1.append(row);
dataEmissionlines1 = asarray(dataEmissionlines1)
with open(Elines2, 'rb') as f:
csvReader = csv.reader(f,delimiter='\t')
headers2 = csvReader.next()
for row in csvReader:
dataEmissionlines2.append(row);
dataEmissionlines2 = asarray(dataEmissionlines2)
with open(Elines3, 'rb') as f:
csvReader = csv.reader(f,delimiter='\t')
headers3 = csvReader.next()
for row in csvReader:
dataEmissionlines3.append(row);
dataEmissionlines3 = asarray(dataEmissionlines3)
print "import files complete"
# ---------------------------------------------------
#for concatenating grid
#pull the phi and hdens values from each of the runs. exclude header lines
grid1new = zeros((len(grid1[:,0])-1,2))
grid1new[:,0] = grid1[1:,6]
grid1new[:,1] = grid1[1:,7]
grid2new = zeros((len(grid2[:,0])-1,2))
x = array(17.00000)
grid2new[:,0] = repeat(x,len(grid2[:,0])-1)
grid2new[:,1] = grid2[1:,6]
grid3new = zeros((len(grid3[:,0])-1,2))
grid3new[:,0] = grid3[1:,6]
grid3new[:,1] = grid3[1:,7]
grid = concatenate((grid1new,grid2new,grid3new))
hdens_values = grid[:,1]
phi_values = grid[:,0]
# ---------------------------------------------------
#for concatenating Emission lines data
Emissionlines = concatenate((dataEmissionlines1[:,1:],dataEmissionlines2[:,1:],dataEmissionlines3[:,1:]))
#for lines
headers = headers[1:]
concatenated_data = zeros((len(Emissionlines),len(Emissionlines[0])))
max_values = zeros((len(concatenated_data[0]),4))
# ---------------------------------------------------
#constructing grid by scaling
#select the scaling factor
#for 1215
#incident = Emissionlines[1:,4]
#for 4860
incident = concatenated_data[:,57]
#take the ratio of incident and all the lines and put it all in an array concatenated_data
for i in range(len(Emissionlines)):
for j in range(len(Emissionlines[0])):
if math.log(4860.*(float(Emissionlines[i,j])/float(Emissionlines[i,57])), 10) > 0:
concatenated_data[i,j] = math.log(4860.*(float(Emissionlines[i,j])/float(Emissionlines[i,57])), 10)
else:
concatenated_data[i,j] == 0
# for 1215
#for i in range(len(Emissionlines)):
# for j in range(len(Emissionlines[0])):
# if math.log(1215.*(float(Emissionlines[i,j])/float(Emissionlines[i,4])), 10) > 0:
# concatenated_data[i,j] = math.log(1215.*(float(Emissionlines[i,j])/float(Emissionlines[i,4])), 10)
# else:
# concatenated_data[i,j] == 0
# ---------------------------------------------------
#find the maxima to plot onto the contour plots
for j in range(len(concatenated_data[0])):
max_values[j,0] = max(concatenated_data[:,j])
max_values[j,1] = argmax(concatenated_data[:,j], axis = 0)
max_values[j,2] = hdens_values[max_values[j,1]]
max_values[j,3] = phi_values[max_values[j,1]]
#to round off the maxima
max_values[:,0] = [ '%.1f' % elem for elem in max_values[:,0] ]
print "data arranged"
# ---------------------------------------------------
#Creating the grid to interpolate with for contours.
gridarray = zeros((len(concatenated_data),2))
gridarray[:,0] = hdens_values
gridarray[:,1] = phi_values
x = gridarray[:,0]
y = gridarray[:,1]
# ---------------------------------------------------
savetxt('peaks', max_values, delimiter='\t')
| gpl-2.0 |
quole/gensim | gensim/test/test_sklearn_integration.py | 2 | 7081 | import six
import unittest
import numpy
import os
import codecs
import pickle
from scipy import sparse
try:
from sklearn.pipeline import Pipeline
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.datasets import load_files
from sklearn import linear_model
except ImportError:
raise unittest.SkipTest("Test requires scikit-learn to be installed, which is not available")
from gensim.sklearn_integration.sklearn_wrapper_gensim_ldamodel import SklearnWrapperLdaModel
from gensim.sklearn_integration.sklearn_wrapper_gensim_lsimodel import SklearnWrapperLsiModel
from gensim.corpora import Dictionary
from gensim import matutils
module_path = os.path.dirname(__file__) # needed because sample data files are located in the same folder
datapath = lambda fname: os.path.join(module_path, 'test_data', fname)
texts = [
['complier', 'system', 'computer'],
['eulerian', 'node', 'cycle', 'graph', 'tree', 'path'],
['graph', 'flow', 'network', 'graph'],
['loading', 'computer', 'system'],
['user', 'server', 'system'],
['tree', 'hamiltonian'],
['graph', 'trees'],
['computer', 'kernel', 'malfunction', 'computer'],
['server', 'system', 'computer'],
]
dictionary = Dictionary(texts)
corpus = [dictionary.doc2bow(text) for text in texts]
class TestSklearnLDAWrapper(unittest.TestCase):
def setUp(self):
self.model = SklearnWrapperLdaModel(id2word=dictionary, num_topics=2, passes=100, minimum_probability=0, random_state=numpy.random.seed(0))
self.model.fit(corpus)
def testPrintTopic(self):
topic = self.model.print_topics(2)
for k, v in topic:
self.assertTrue(isinstance(v, six.string_types))
self.assertTrue(isinstance(k, int))
def testTransform(self):
texts_new = ['graph', 'eulerian']
bow = self.model.id2word.doc2bow(texts_new)
matrix = self.model.transform(bow)
self.assertTrue(matrix.shape[0], 1)
self.assertTrue(matrix.shape[1], self.model.num_topics)
texts_new = [['graph', 'eulerian'], ['server', 'flow'], ['path', 'system']]
bow = []
for i in texts_new:
bow.append(self.model.id2word.doc2bow(i))
matrix = self.model.transform(bow)
self.assertTrue(matrix.shape[0], 3)
self.assertTrue(matrix.shape[1], self.model.num_topics)
def testGetTopicDist(self):
texts_new = ['graph', 'eulerian']
bow = self.model.id2word.doc2bow(texts_new)
doc_topics, word_topics, phi_values = self.model.get_topic_dist(bow, per_word_topics=True)
for k, v in word_topics:
self.assertTrue(isinstance(v, list))
self.assertTrue(isinstance(k, int))
for k, v in doc_topics:
self.assertTrue(isinstance(v, float))
self.assertTrue(isinstance(k, int))
for k, v in phi_values:
self.assertTrue(isinstance(v, list))
self.assertTrue(isinstance(k, int))
def testPartialFit(self):
for i in range(10):
self.model.partial_fit(X=corpus) # fit against the model again
doc = list(corpus)[0] # transform only the first document
transformed = self.model[doc]
transformed_approx = matutils.sparse2full(transformed, 2) # better approximation
expected = [0.13, 0.87]
passed = numpy.allclose(sorted(transformed_approx), sorted(expected), atol=1e-1)
self.assertTrue(passed)
def testCSRMatrixConversion(self):
arr = numpy.array([[1, 2, 0], [0, 0, 3], [1, 0, 0]])
sarr = sparse.csr_matrix(arr)
newmodel = SklearnWrapperLdaModel(num_topics=2, passes=100)
newmodel.fit(sarr)
topic = newmodel.print_topics()
for k, v in topic:
self.assertTrue(isinstance(v, six.string_types))
self.assertTrue(isinstance(k, int))
def testPipeline(self):
model = SklearnWrapperLdaModel(num_topics=2, passes=10, minimum_probability=0, random_state=numpy.random.seed(0))
with open(datapath('mini_newsgroup'), 'rb') as f:
compressed_content = f.read()
uncompressed_content = codecs.decode(compressed_content, 'zlib_codec')
cache = pickle.loads(uncompressed_content)
data = cache
id2word = Dictionary(map(lambda x: x.split(), data.data))
corpus = [id2word.doc2bow(i.split()) for i in data.data]
numpy.random.mtrand.RandomState(1) # set seed for getting same result
clf = linear_model.LogisticRegression(penalty='l2', C=0.1)
text_lda = Pipeline((('features', model,), ('classifier', clf)))
text_lda.fit(corpus, data.target)
score = text_lda.score(corpus, data.target)
self.assertGreater(score, 0.40)
class TestSklearnLSIWrapper(unittest.TestCase):
def setUp(self):
self.model = SklearnWrapperLsiModel(id2word=dictionary, num_topics=2)
self.model.fit(corpus)
def testModelSanity(self):
topic = self.model.print_topics(2)
for k, v in topic:
self.assertTrue(isinstance(v, six.string_types))
self.assertTrue(isinstance(k, int))
def testTransform(self):
texts_new = ['graph', 'eulerian']
bow = self.model.id2word.doc2bow(texts_new)
matrix = self.model.transform(bow)
self.assertTrue(matrix.shape[0], 1)
self.assertTrue(matrix.shape[1], self.model.num_topics)
texts_new = [['graph', 'eulerian'], ['server', 'flow'], ['path', 'system']]
bow = []
for i in texts_new:
bow.append(self.model.id2word.doc2bow(i))
matrix = self.model.transform(bow)
self.assertTrue(matrix.shape[0], 3)
self.assertTrue(matrix.shape[1], self.model.num_topics)
def testPartialFit(self):
for i in range(10):
self.model.partial_fit(X=corpus) # fit against the model again
doc = list(corpus)[0] # transform only the first document
transformed = self.model[doc]
transformed_approx = matutils.sparse2full(transformed, 2) # better approximation
expected = [1.39, 0.0]
passed = numpy.allclose(sorted(transformed_approx), sorted(expected), atol=1e-1)
self.assertTrue(passed)
def testPipeline(self):
model = SklearnWrapperLsiModel(num_topics=2)
with open(datapath('mini_newsgroup'), 'rb') as f:
compressed_content = f.read()
uncompressed_content = codecs.decode(compressed_content, 'zlib_codec')
cache = pickle.loads(uncompressed_content)
data = cache
id2word = Dictionary(map(lambda x: x.split(), data.data))
corpus = [id2word.doc2bow(i.split()) for i in data.data]
clf = linear_model.LogisticRegression(penalty='l2', C=0.1)
text_lda = Pipeline((('features', model,), ('classifier', clf)))
text_lda.fit(corpus, data.target)
score = text_lda.score(corpus, data.target)
self.assertGreater(score, 0.50)
if __name__ == '__main__':
unittest.main()
| lgpl-2.1 |
wllmtrng/ggplot | ggplot/scales/scale_facet.py | 13 | 10175 | from __future__ import (absolute_import, division, print_function,
unicode_literals)
# TODO: This is fairly repetiive and can definitely be
# condensed into a lot less code, but it's working for now
import numpy as np
import matplotlib.pyplot as plt
from .utils import calc_axis_breaks_and_limits
import sys
def scale_facet_wrap(rows, cols, positions, scaletype):
"""Set the scales on each subplot for wrapped faceting.
Parameters
----------
rows : int
number of rows in the faceted plot
cols : int
number of columns in the faceted plot
positions : list of int
zero-indexed list of faceted plot positions
scaletype : str or None
string indicating the type of scaling to apply to the rows and columns
- None : All plots get the same scale
- 'free_x' : each plot is free to determine its own x-scale, all plots have the same y-scale
- 'free_y' : each plot is free to determine its own y-scale, all plots have the same x-scale
- 'free' : plots are free to determine their own x- and y-scales
"""
x_extents, y_extents = {}, {}
# Calculate the extents for the plots
for pos in positions:
# Work on the subplot at the current position (adding 1 to pos because
# matplotlib 1-indexes their subplots)
plt.subplot(rows, cols, pos + 1)
# Update the x extents for each column
column, row = 0, 0
if scaletype in ["free", "free_x"]:
# If the x scale is free, all plots get their own x scale
column = pos % cols
row = int(pos / cols)
limits = plt.xlim()
# Get the current bounds for this column. Default lower limit is
# infinity (because all values < infinity) and the default upper limit
# is -infinity (because all values > -infinity).
lower, upper = x_extents.get((column, row), (float("inf"), float("-inf")))
lower = min(limits[0], lower)
upper = max(limits[1], upper)
x_extents[(column, row)] = (lower, upper)
column, row = 0, 0
if scaletype in ["free", "free_y"]:
# If the y scale is free, all plots get their own y scale
column = pos % cols
row = int(pos / cols)
limits = plt.ylim()
# Get the current bounds for this column. Default lower limit is
# infinity (because all values < infinity) and the default upper limit
# is -infinity (because all values > -infinity).
lower, upper = y_extents.get((column, row), (float("inf"), float("-inf")))
lower = min(limits[0], lower)
upper = max(limits[1], upper)
y_extents[(column, row)] = (lower, upper)
for pos in positions:
plt.subplot(rows, cols, pos + 1)
row = int(pos / cols)
column = pos % cols
# Find the extents for this position. Default to the extents at
# position column 0, row 0, in case all plots use the same scale
xmin, xmax = x_extents[(0, 0)]
ymin, ymax = y_extents[(0, 0)]
if scaletype in ["free", "free_x"]:
# If the x scale is free, look up the extents for this column and row
xmin, xmax = x_extents[(column, row)]
if scaletype in ["free", "free_y"]:
# If the y scale is free, look up the extents for this column and row
ymin, ymax = y_extents[(column, row)]
x_scale, x_min, x_max = calc_axis_breaks_and_limits(xmin, xmax, 4)
x_scale = np.round(x_scale, 2)
# Only apply x labels to plots if each plot has its own scale or the
# plot is in the bottom row of each column.
x_labs = []
if scaletype in ["free", "free_x"] or pos in positions[-cols:]:
x_labs = x_scale
plt.xticks(x_scale, x_labs)
plt.xlim(x_min, x_max )
# Set the y-axis scale and labels
y_scale, y_min, y_max = calc_axis_breaks_and_limits(ymin, ymax, 4)
y_scale = np.round(y_scale, 2)
# Only apply y labels to plots if each plot has its own scale or the
# plot is in the left column.
y_labs = []
if scaletype in ["free", "free_y"] or column == 0:
y_labs = y_scale
plt.yticks(y_scale, y_labs)
plt.ylim(y_min, y_max)
def scale_facet_grid(xdim, ydim, facet_pairs, scaletype):
# everyone gets the same scales
if scaletype is None:
min_x, max_x = 999999999, -999999999
min_y, max_y = 999999999, -999999999
for pos, _ in enumerate(facet_pairs):
pos += 1
plt.subplot(xdim, ydim, pos)
min_x = min(min_x, min(plt.xlim()))
max_x = max(max_x, max(plt.xlim()))
min_y = min(min_y, min(plt.ylim()))
max_y = max(max_y, max(plt.ylim()))
y_scale, y_min, y_max = calc_axis_breaks_and_limits(min_y, max_y, 4)
y_scale = np.round(y_scale, 2)
x_scale, x_min, x_max = calc_axis_breaks_and_limits(min_x, max_x, 4)
x_scale = np.round(x_scale, 2)
# for all axis set the individual axis limits and ticks
for pos, _ in enumerate(facet_pairs):
pos += 1
plt.subplot(xdim, ydim, pos)
y_labs = y_scale
if pos % ydim!=1:
y_labs = []
plt.yticks(y_scale, y_labs)
plt.ylim(y_min, y_max)
x_labs = x_scale
if pos <= (len(facet_pairs) - ydim):
x_labs = []
plt.xticks(x_scale, x_labs)
plt.xlim(x_min, x_max)
elif scaletype=="free_y":
min_x, max_x = 999999999, -999999999
min_ys, max_ys = {}, {}
for pos, _ in enumerate(facet_pairs):
pos += 1
plt.subplot(xdim, ydim, pos)
y_bucket = int((pos-1) / ydim)
min_ys[y_bucket] = min_ys.get(y_bucket, 999999999)
max_ys[y_bucket] = max_ys.get(y_bucket, -999999999)
min_x = min(min_x, min(plt.xlim()))
max_x = max(max_x, max(plt.xlim()))
min_ys[y_bucket] = min(min_ys[y_bucket], min(plt.ylim()))
max_ys[y_bucket] = max(max_ys[y_bucket], max(plt.ylim()))
for pos, _ in enumerate(facet_pairs):
pos += 1
plt.subplot(xdim, ydim, pos)
y_bucket = int((pos-1) / ydim)
y_scale, y_min, y_max = calc_axis_breaks_and_limits(min_ys[y_bucket], max_ys[y_bucket],4)
y_scale = np.round(y_scale, 2)
y_labs = y_scale
if pos % ydim!=1:
y_labs = []
plt.yticks(y_scale, y_labs)
plt.ylim(y_min, y_max)
x_scale, x_min, x_max = calc_axis_breaks_and_limits(min_x, max_x, 4)
x_scale = np.round(x_scale, 2)
x_labs = x_scale
if pos <= (len(facet_pairs) - ydim):
x_labs = []
plt.xticks(x_scale, x_labs)
plt.xlim(x_min, x_max)
elif scaletype=="free_x":
min_y, max_y = 999999999, -999999999
min_xs, max_xs = {}, {}
for pos, _ in enumerate(facet_pairs):
pos += 1
plt.subplot(xdim, ydim, pos)
x_bucket = int((pos-1) / xdim)
min_xs[x_bucket] = min_xs.get(x_bucket, 999999999)
max_xs[x_bucket] = max_xs.get(x_bucket, -999999999)
min_y = min(min_y, min(plt.ylim()))
max_y = max(max_y, max(plt.ylim()))
min_xs[x_bucket] = min(min_xs[x_bucket], min(plt.xlim()))
max_xs[x_bucket] = max(max_xs[x_bucket], max(plt.xlim()))
for pos, _ in enumerate(facet_pairs):
pos += 1
plt.subplot(xdim, ydim, pos)
x_bucket = int((pos-1) / xdim)
x_scale, x_min, x_max = calc_axis_breaks_and_limits(min_xs[x_bucket], max_xs[x_bucket],4)
x_scale = np.round(x_scale, 2)
x_labs = x_scale
if pos <= ((len(facet_pairs) - ydim)):
x_labs = []
plt.xticks(x_scale, x_labs)
plt.xlim(x_min, x_max)
y_scale, y_min, y_max = calc_axis_breaks_and_limits(min_y, max_y, 4)
y_scale = np.round(y_scale, 2)
y_labs = y_scale
if pos % ydim!=1:
y_labs = []
plt.yticks(y_scale, y_labs)
plt.ylim(y_min, y_max)
else:
min_xs, max_xs = {}, {}
min_ys, max_ys = {}, {}
for pos, _ in enumerate(facet_pairs):
pos += 1
plt.subplot(xdim, ydim, pos)
x_bucket = int((pos-1) / xdim)
min_xs[x_bucket] = min_xs.get(x_bucket, 999999999)
max_xs[x_bucket] = max_xs.get(x_bucket, -999999999)
min_xs[x_bucket] = min(min_xs[x_bucket], min(plt.xlim()))
max_xs[x_bucket] = max(max_xs[x_bucket], max(plt.xlim()))
y_bucket = int((pos-1) / ydim)
min_ys[y_bucket] = min_ys.get(y_bucket, 999999999)
max_ys[y_bucket] = max_ys.get(y_bucket, -999999999)
min_ys[y_bucket] = min(min_ys[y_bucket], min(plt.ylim()))
max_ys[y_bucket] = max(max_ys[y_bucket], max(plt.ylim()))
for pos, _ in enumerate(facet_pairs):
pos += 1
plt.subplot(xdim, ydim, pos)
x_bucket = int((pos-1) / xdim)
x_scale, x_min, x_max = calc_axis_breaks_and_limits(min_xs[x_bucket], max_xs[x_bucket],4)
x_scale = np.round(x_scale, 2)
x_labs = x_scale
if pos <= ((len(facet_pairs) - ydim)):
x_labs = []
plt.xticks(x_scale, x_labs)
plt.xlim(x_min, x_max)
y_bucket = int((pos-1) / ydim)
y_scale, y_min, y_max = calc_axis_breaks_and_limits(min_ys[y_bucket], max_ys[y_bucket],4)
y_scale = np.round(y_scale, 2)
y_labs = y_scale
if pos % ydim!=1:
y_labs = []
plt.yticks(y_scale, y_labs)
plt.ylim(y_min, y_max)
| bsd-2-clause |
eg-zhang/scikit-learn | examples/datasets/plot_random_dataset.py | 348 | 2254 | """
==============================================
Plot randomly generated classification dataset
==============================================
Plot several randomly generated 2D classification datasets.
This example illustrates the :func:`datasets.make_classification`
:func:`datasets.make_blobs` and :func:`datasets.make_gaussian_quantiles`
functions.
For ``make_classification``, three binary and two multi-class classification
datasets are generated, with different numbers of informative features and
clusters per class. """
print(__doc__)
import matplotlib.pyplot as plt
from sklearn.datasets import make_classification
from sklearn.datasets import make_blobs
from sklearn.datasets import make_gaussian_quantiles
plt.figure(figsize=(8, 8))
plt.subplots_adjust(bottom=.05, top=.9, left=.05, right=.95)
plt.subplot(321)
plt.title("One informative feature, one cluster per class", fontsize='small')
X1, Y1 = make_classification(n_features=2, n_redundant=0, n_informative=1,
n_clusters_per_class=1)
plt.scatter(X1[:, 0], X1[:, 1], marker='o', c=Y1)
plt.subplot(322)
plt.title("Two informative features, one cluster per class", fontsize='small')
X1, Y1 = make_classification(n_features=2, n_redundant=0, n_informative=2,
n_clusters_per_class=1)
plt.scatter(X1[:, 0], X1[:, 1], marker='o', c=Y1)
plt.subplot(323)
plt.title("Two informative features, two clusters per class", fontsize='small')
X2, Y2 = make_classification(n_features=2, n_redundant=0, n_informative=2)
plt.scatter(X2[:, 0], X2[:, 1], marker='o', c=Y2)
plt.subplot(324)
plt.title("Multi-class, two informative features, one cluster",
fontsize='small')
X1, Y1 = make_classification(n_features=2, n_redundant=0, n_informative=2,
n_clusters_per_class=1, n_classes=3)
plt.scatter(X1[:, 0], X1[:, 1], marker='o', c=Y1)
plt.subplot(325)
plt.title("Three blobs", fontsize='small')
X1, Y1 = make_blobs(n_features=2, centers=3)
plt.scatter(X1[:, 0], X1[:, 1], marker='o', c=Y1)
plt.subplot(326)
plt.title("Gaussian divided into three quantiles", fontsize='small')
X1, Y1 = make_gaussian_quantiles(n_features=2, n_classes=3)
plt.scatter(X1[:, 0], X1[:, 1], marker='o', c=Y1)
plt.show()
| bsd-3-clause |
alexfurtunatoifrn/parsecpy | parsecpy/dataprocess.py | 1 | 26272 | # -*- coding: utf-8 -*-
"""
Module with Classes that generates xArray DataArray
with processed data from Parsec applications execution.
"""
import os
from datetime import datetime
import json
import numpy as np
import xarray as xr
from copy import deepcopy
from collections import defaultdict
from typing import Dict, Any, List
from ._common import freq_hz
import matplotlib.pyplot as plt
from matplotlib import cm
from matplotlib import ticker
from matplotlib.widgets import Slider, RadioButtons
support3d = True
try:
from mpl_toolkits.mplot3d import Axes3D
except ImportError:
support3d = False
class ParsecData:
"""
Class that store parsec run measures values
Atrributes
config - The metadata about execution informations
measures - Resume dictionary with all measures times
Methods
loadata()
savedata()
times()
speedups()
plot2D()
plot3D
"""
config = {}
measures = {}
def __init__(self, filename=None):
"""
Create a empty object or initialized of data from a file saved
with savedata method.
:param filename: File name that store measures
"""
self.config = {}
self.measures = {}
self.power = defaultdict(list)
if filename:
self.loaddata(filename)
return
def __str__(self):
"""
Default output string representation of class
:return: specific formated string
"""
if not self.config:
return 'No data'
pkg = 'Package: ' + self.config['pkg']
dt = 'Date: ' + self.config['execdate']
command = 'Command: ' + self.config['command']
return pkg + '\n' + dt + '\n' + command
def loaddata(self, filename):
"""
Read a file previously saved with method savedata() and initialize
the object class dictionaries.
:param filename: Filename with data dictionary of execution times.
"""
if os.path.isfile(filename):
with open(filename) as f:
datadict = json.load(f)
if 'config' in datadict.keys():
if 'pkg' in datadict['config']:
self.config['pkg'] = datadict['config']['pkg']
if 'execdate' in datadict['config']:
self.config['execdate'] = datadict['config']['execdate']
if 'command' in datadict['config']:
self.config['command'] = datadict['config']['command']
if 'input_sizes' in datadict['config']:
self.config['input_sizes'] = datadict['config']['input_sizes']
if 'hostname' in datadict['config']:
self.config['hostname'] = datadict['config']['hostname']
if 'thread_cpu' in datadict['config']:
self.config['thread_cpu'] =\
datadict['config']['thread_cpu']
else:
print('Warning: The config data not must read')
if 'data' in datadict.keys():
self.measures = datadict['data']
else:
print('Warning: No data loaded')
if 'power' in datadict.keys():
self.power = datadict['power']
else:
print('Error: File not found')
return
def savedata(self, filename=None):
"""
Write to file the measures information stored on object class
:param filename: Filename to save on.
:return:
"""
if filename is None:
filedatename = self.config['execdate']
filename = self.config['pkg'] + '_datafile_' + filedatename + '.dat'
with open(filename, 'w') as f:
conftxt = self.config.copy()
conftxt['execdate'] = conftxt['execdate']
dictsave = {'config': conftxt,
'data': self.measures,
'power': self.power}
json.dump(dictsave, f, ensure_ascii=False)
return filename
@staticmethod
def contentextract(txt):
"""
Extract times values from a parsec log file output and return a
dictionary of data.
:param txt: Content text from a parsec output run.
:return: dict with extracted values.
"""
roitime = ''
realtime = ''
usertime = ''
systime = ''
for l in txt.split('\n'):
if l.strip().startswith("[PARSEC] Benchmarks to run:"):
benchmark = l.strip().split(':')[1]
if benchmark.startswith("parsec"):
benchmark = benchmark.strip().split('.')[1]
else:
benchmark = benchmark.strip()
elif l.strip().startswith("[PARSEC] Unpacking benchmark input"):
inputsize = l.strip().split("'")[1]
elif l.strip().startswith("[PARSEC] No archive for input"):
inputsize = l.strip().split("'")[1]
elif l.strip().startswith("[HOOKS] Total time spent in ROI"):
roitime = l.strip().split(':')[-1]
elif l.strip().startswith("real"):
realtime = l.strip().split('\t')[-1]
elif l.strip().startswith("user"):
usertime = l.strip().split('\t')[-1]
elif l.strip().startswith("sys"):
systime = l.strip().split('\t')[-1]
if roitime:
roitime = float(roitime.strip()[:-1])
else:
roitime = None
if realtime:
realtime = 60 * float(realtime.strip().split('m')[0]) \
+ float(realtime.strip().split('m')[1][:-1])
else:
realtime = None
if usertime:
usertime = 60 * float(usertime.strip().split('m')[0]) \
+ float(usertime.strip().split('m')[1][:-1])
else:
usertime = None
if systime:
systime = 60 * float(systime.strip().split('m')[0]) \
+ float(systime.strip().split('m')[1][:-1])
else:
systime = None
return {'benchmark': benchmark, 'input': inputsize, 'roitime': roitime,
'realtime': realtime, 'usertime': usertime, 'systime': systime}
def measurebuild(self, attrs, frequency=0,
inputsize=None, numberofcores=None):
"""
Resume all tests, grouped by input sizes and number of cores,
on a dictionary.
Dictionary format
{'inputsize':{'numberofcores1':['timevalue1', ... ], ... }, ...}
:param attrs: Attributes to insert into dictionary.
:param frequency: Custom CPU frequency (Mhz) at execution moment.
:param inputsize: Input size index used on execution.
:param numberofcores: Number of cores used on executed process.
:return:
"""
if numberofcores is None:
return None
if inputsize is None:
inputsize = attrs['input']
if attrs['roitime']:
ttime = attrs['roitime']
else:
ttime = attrs['realtime']
if frequency in self.measures.keys():
if inputsize in self.measures[frequency].keys():
if numberofcores in self.measures[frequency][inputsize].keys():
self.measures[frequency][inputsize][numberofcores].\
append(ttime)
else:
self.measures[frequency][inputsize][numberofcores] = \
[ttime]
else:
self.measures[frequency][inputsize] = {numberofcores: [ttime]}
else:
self.measures[frequency] = {inputsize: {numberofcores: [ttime]}}
return
def threadcpubuild(self, source, frequency, inputsize, numberofcores):
"""
Resume all execution threads cpu numbers, grouped by frequencies,
input sizes and number of cores and repetitions, on a dictionary.
Dictionary format
{'frequency':{'inputsize':{'numberofcores1':['cpusdict1', ... ]
, ... }}}
:param source: Attributes to insert into dictionary.
:param frequency: Frequency used on execution (0 if don't fixed).
:param inputsize: Input size used on execution.
:param numberofcores: Number of cores used on executed process.
:return:
"""
threadcpu = self.config['thread_cpu']
if frequency in threadcpu.keys():
if inputsize in threadcpu[frequency].keys():
if numberofcores in threadcpu[frequency][inputsize].keys():
inputdict = threadcpu[frequency][inputsize]
inputdict[numberofcores].append(source)
else:
threadcpu[frequency][inputsize][numberofcores] = \
[source]
else:
threadcpu[frequency][inputsize] = \
{numberofcores: [source]}
else:
threadcpu[frequency] = \
{inputsize: {numberofcores: [source]}}
return
def powerbuild(self, attrs: Dict[Any, Any], keys: List[Any]):
"""
Resume all energy sensors measurements, grouped by frequency, input
sizes and number of cores on a dictionary.
Dictionary format
{"inputsize":{"numberofcores1":["timevalue1", ... ], ... }, ...}
:param attrs: Attributes to insert into dictionary.
:param keys: Custom CPU frequency (Mhz) at execution moment.
:return:
"""
keys = filter(lambda x: x is not None, keys)
keys = map(str, keys)
self.power[";".join(keys)].append(attrs)
def threads(self):
"""
Return a xArray DataArray with resume of all threads,
grouped by frequency, input size and number of cores.
:return: dataframe with median of measures times.
"""
freq = []
cpus = []
size = []
cores = []
for f in sorted(self.config['thread_cpu'].keys(), key=int):
freq.append(int(f))
size = []
mf = self.config['thread_cpu'][f]
for s in sorted(mf.keys()):
size.append(s)
cores = []
mfs = mf[s]
for c in sorted(mfs.keys(), key=int):
cores.append(int(c))
cpus.append(mfs[c])
repetitions = range(1,len(mfs[c])+1)
cpus = np.array(cpus)
if len(freq) == 1:
cpus = cpus.reshape((len(size), len(cores), len(repetitions)))
coords = [('size', size), ('cores', cores),
('repetitions', repetitions)]
else:
if len(size) == 1:
cpus = cpus.reshape((len(freq), len(cores), len(repetitions)))
coords = [('frequency', freq), ('cores', cores),
('repetitions', repetitions)]
else:
cpus = cpus.reshape((len(freq), len(size), len(cores),
len(repetitions)))
coords = [('frequency', freq), ('size', size),
('cores', cores), ('repetitions', repetitions)]
xcpus = xr.DataArray(cpus, coords=coords)
return xcpus
def times(self):
"""
Return DataArray (xarray) with resume of all tests.
DataArray format
dims(frequency, size, cores)
data=numpy array with median of measures times.
:return: DataArray with median of measures times.
"""
freq = []
times = []
size = []
cores = []
config = deepcopy(self.config)
config.pop('thread_cpu')
for f in sorted(self.measures.keys()):
freq.append(int(f))
size = []
mf = self.measures[f]
for s in sorted(mf.keys(), key=int):
size.append(int(s))
cores = []
mfs = mf[s]
for c in sorted(mfs.keys(), key=int):
cores.append(int(c))
times.append(np.median(mfs[c]))
times = np.array(times)
if len(freq) == 1:
times = times.reshape((len(size), len(cores)))
coords = [('size', size), ('cores', cores)]
if freq[0] == 0:
config['frequency'] = 'dynamic'
else:
config['frequency'] = 'static: %s' % (freq[0])
else:
if len(size) == 1:
times = times.reshape((len(freq), len(cores)))
coords = [('frequency', freq), ('cores', cores)]
config['size'] = 'static: %s' % (size[0])
else:
times = times.reshape((len(freq), len(size), len(cores)))
coords = [('frequency', freq), ('size', size), ('cores', cores)]
xtimes = xr.DataArray(times, coords=coords)
xtimes.attrs = deepcopy(config)
return xtimes
def speedups(self, serialfrequencyfixed = False):
"""
Return DataArray (xarray) with resume of all speedups.
DataArray format
dims(frequency, size, cores)
data=numpy array with calculated speedups.
:param serialfrequencyfixed: If Ts is calculated using a fixed frequency.
:return: DataArray with calculated speedups.
"""
times = self.times()
try:
times.sel(cores=1)
except:
print('Error: Time measurement for 1 core not found')
return None
lcores = len(times.coords['cores'])
lfreq = len(times.coords['frequency'])
ldims = []
for c in times.dims:
ldims.append(len(times.coords[c]))
if len(ldims) == 2:
if serialfrequencyfixed:
timesonecore = np.repeat(np.repeat(times.values[-1, 0], lfreq), lcores).reshape(tuple(ldims))
else:
timesonecore = np.repeat(times.values[:, 0], lcores).reshape(tuple(ldims))
xspeedup = (timesonecore / times)[:,1:]
elif len(ldims) == 3:
lsize = len(times.coords['size'])
if serialfrequencyfixed:
timesonecore = np.repeat(np.repeat(np.array([times.values[-1, :, 0]]),lfreq, axis=0).reshape((lfreq,lsize)), lcores).reshape(tuple(ldims))
else:
timesonecore = np.repeat(times.values[:, :, 0], lcores).reshape(tuple(ldims))
xspeedup = (timesonecore / times)[:,:,1:]
xspeedup.attrs = times.attrs
return xspeedup
def efficiency(self):
"""
Return DataArray (xarray) with resume of all efficiencies.
DataArray format
dims(frequency, size, cores)
data=numpy array with calculated efficiencies.
:return: DataArray with calculated efficiencies.
"""
speedups = self.speedups()
xefficency = speedups/speedups.coords['cores']
xefficency.attrs = speedups.attrs
return xefficency
@staticmethod
def plot2D(data, title='', greycolor=False, filename=''):
"""
Plot the 2D (Speedup x Cores) lines graph.
:param data: DataArray to plot, generate by speedups(),
times() or efficiency().
:param title: Plot Title.
:param greycolor: If set color of graph to grey colormap.
:param filename: File name to save figure (eps format).
:return:
"""
if not data.size == 0:
if len(data.dims) != 2:
print('Error: Do not possible plot 3-dimensions data')
return
fig, ax = plt.subplots()
xs = data.coords['cores'].values
if 'size' in data.dims:
datalines = data.coords['size'].values
#xc_label = 'Input Size'
elif 'frequency' in data.dims:
datalines = data.coords['frequency'].values
#xc_label = 'Frequency'
if greycolor:
colors = plt.cm.Greys(
np.linspace(0, 1, len(datalines) + 10))
colors = colors[::-1]
colors = colors[:-5]
else:
colors = plt.cm.jet(np.linspace(0, 1, len(datalines)))
for i, d in enumerate(datalines):
if 'size' in data.dims:
ys = data.sel(size=d)
legendtitle= 'Sizes'
legendlabel = d
elif 'frequency' in data.dims:
ys = data.sel(frequency=d)
legendtitle= 'Frequencies'
legendlabel = freq_hz(d*1000)
line, = ax.plot(xs, ys, '-', linewidth=2, color=colors[i],
label='Speedup for %s' % legendlabel)
ax.legend(loc='lower right', title=legendtitle)
ax.set_xlabel('Number of Cores')
ax.set_xlim(0, xs.max())
ax.xaxis.set_major_locator(ticker.MultipleLocator(2.0))
ax.set_ylabel('Speedup')
ax.set_ylim(0, data.max().max()+1)
ax.yaxis.set_major_locator(ticker.MultipleLocator(1.0))
plt.title(title)
if filename:
plt.savefig(filename, dpi=1000)
plt.show()
else:
print('Error: Do not possible plot data without '
'speedups information')
@staticmethod
def plot3D(data, slidername=None, title='Speedup Surface', zlabel='speedup',
greycolor=False, filename=''):
"""
Plot the 3D (Speedup x cores x input size) surface.
:param data: DataArray to plot, generate by speedups(),
times() or efficiency().
:param slidername: name of dimension of DataArray to use on slider.
:param title: Plot Title.
:param zlabel: Z Axis Label.
:param greycolor: If set color of graph to grey colormap.
:param filename: File name to save figure (eps format).
:return:
"""
def update_plot3D(idx):
ax.clear()
if idx is None:
dataplot = data
if 'size' in data.dims:
xc = data.coords['size'].values
xc_label = 'Input Size'
elif 'frequency' in data.dims:
xc = [i*1000 for i in data.coords['frequency'].values]
xc_label = 'Frequency'
else:
if slidername == 'size':
dataplot = data.sel(size=int(idx))
xc = [i*1000 for i in dataplot.coords['frequency'].values]
xc_label = 'Frequency'
elif slidername == 'frequency':
idx = float(idx[:-3])*1e6
dataplot = data.sel(frequency=idx)
xc = dataplot.coords['size'].values
xc_label = 'Input Size'
yc = dataplot.coords['cores'].values
X, Y = np.meshgrid(yc, xc)
Z = dataplot.values
zmin = data.values.min()
zmax = data.values.max()
surfspeedup = ax.plot_surface(Y, X, Z, cmap=colormap,
linewidth=0.5, edgecolor='k',
linestyle='-',
vmin=(zmin - (zmax - zmin) / 10),
vmax=(zmax + (zmax - zmin) / 10))
ax.tick_params(labelsize='small')
ax.set_xlabel(xc_label)
if xc_label == 'Frequency':
ax.xaxis.set_major_formatter(ticker.EngFormatter(unit='Hz'))
ax.set_ylabel('Number of Cores')
ax.set_zlabel(zlabel)
ax.set_zlim(0, 1.10 * zmax)
fig.canvas.draw_idle()
if not support3d:
print('Warning: No 3D plot support. Please install matplotlib '
'with Axes3D toolkit')
return
fig = plt.figure()
ax = fig.gca(projection='3d')
plt.title(title)
if greycolor:
colormap = cm.Greys
else:
colormap = cm.coolwarm
if not data.size == 0:
if len(data.dims) == 2:
idx = None
elif len(data.dims) == 3:
if slidername in ('size','frequency'):
rax = plt.axes([0.01, 0.01, 0.17,
len(data.coords[slidername].values)*0.04],
facecolor='lightgoldenrodyellow')
if slidername == 'frequency':
raxtxt = ['{}GHz'.format(i) for i in
data.coords[slidername].values/1e6]
idx = '{}GHz'.format(data.coords[slidername].values[0]/1e6)
else:
raxtxt = [str(i) for i in
data.coords[slidername].values]
idx = str(data.coords[slidername].values[0])
radio = RadioButtons(rax, tuple(raxtxt))
for circle in radio.circles:
circle.set_radius(0.03)
radio.on_clicked(update_plot3D)
else:
print('Error: Not is possible to plot data with wrong '
'axis names')
return
else:
print('Error: Not is possible to plot data with wrong '
'number of axis')
return
update_plot3D(idx)
if filename:
plt.savefig(filename, dpi=1000)
plt.show()
else:
print('Error: Not is possible to plot data without '
'speedups information')
class ParsecLogsData(ParsecData):
"""
Class that store parsec run measures values obtained from
logs files
Atrributes
config: The metadata about execution informations
measures: Resume dictionary with all measures times
foldername: Folder where was found logs files
runfiles: List of processed files
benchmarks: List of benchmarks applications founder on log files
Methods:
loadata()
savedata()
fileproccess()
runlogfilesproc()
times()
speedups()
plot2D()
plot3D
"""
foldername = ''
runfiles = []
benchmarks = []
def __init__(self, foldername=None):
"""
Create a empty object or initialized of data from files found
in foldername
:param foldername: Folder name that store logs files
"""
ParsecData.__init__(self)
if foldername:
self.loaddata(foldername)
return
def __str__(self):
"""
Default output string representation of class
:return: specific formated string
"""
if not self.config:
return 'No data'
folder = 'Folder: ' + self.foldername
files = 'Processed Files: \n ' \
+ '\n '.join(self.runfiles)
pkg = 'Package: ' + self.config['pkg']
dt = 'Date: ' + self.config['execdate'].strftime("%d-%m-%Y_%H:%M:%S")
command = 'Command: ' + self.config['command']
return folder + '\n' + files + '\n' + pkg+'\n' + dt + '\n' + command
def loaddata(self, foldername):
"""
Read all logs files that found in foldername and initialize
the object class dictionaries.
:param foldername: Folder name with logs files data.
"""
if os.path.isdir(foldername):
self.foldername = foldername
for root, dirs, files in os.walk(foldername):
self.runfiles = [name for name in files if
name.startswith('run_')]
if self.runfiles:
self.runlogfilesprocess()
self.config['pkg'] = ', '.join(self.benchmarks)
self.config['execdate'] = datetime.now()
self.config['command'] = 'logsprocess folder => ' \
+ self.foldername
else:
print('Error: Folder name not found.')
return
def savedata(self):
"""
Write to file the measures information stored on object class
:return:
"""
filedatename = self.config['execdate'].strftime("%Y-%m-%d_%H:%M:%S")
with open('logs_' + self.foldername + '_datafile_' + filedatename
+ '.dat', 'w') as f:
conftxt = self.config.copy()
conftxt['execdate'] =\
conftxt['execdate'].strftime("%d-%m-%Y_%H:%M:%S")
dictsave = {'config': conftxt, 'data': self.measures}
json.dump(dictsave, f, ensure_ascii=False)
return
def fileprocess(self, filename):
"""
Process a parsec log file and return a dictionary with processed data.
:param filename: File name to extract the contents data.
:return: dictionary with extracted values.
"""
f = open(filename)
content = f.read()
bn = os.path.basename(filename)
parts = bn.split("_")
cores = int(parts[1])
dictattrs = self.contentextract(content)
f.close()
dictattrs['filename'] = bn
dictattrs['cores'] = cores
return dictattrs
def runlogfilesprocess(self):
"""
Process parsec log files with a folder and load data on
object class attributes
:return:
"""
benchmarksset = set()
for filename in self.runfiles:
filepath = os.path.join(self.foldername, filename)
fattrs = self.fileprocess(filepath)
self.measurebuild(fattrs)
benchmarksset.add(fattrs['benchmark'])
self.benchmarks = list(benchmarksset)
return
| mit |
Transkribus/TranskribusDU | TranskribusDU/gcn/DU_gcn_task.py | 1 | 37565 | # -*- coding: utf-8 -*-
import numpy as np
import tensorflow as tf
import pickle
import os.path
import random
import gcn.gcn_models as gcn_models
from gcn.gcn_datasets import GCNDataset
import time
FLAGS = tf.app.flags.FLAGS
tf.app.flags.DEFINE_string('train', '', "FilePath Train pickle file")
tf.app.flags.DEFINE_string('test', '', "FilePath for the pickle")
tf.app.flags.DEFINE_integer('fold', '1', "FilePath for the pickle")
tf.app.flags.DEFINE_string('out_dir', 'out_res', "outdirectory for saving the results")
tf.app.flags.DEFINE_integer('configid', 0, 'gridid')
tf.app.flags.DEFINE_bool('snake',False, 'whether to work on the snake dataset')
tf.app.flags.DEFINE_bool('das_train',False, ' Training the Model for the DAS paper')
tf.app.flags.DEFINE_bool('das_predict',False, 'Prediction Experiment for the DAS paper')
tf.app.flags.DEFINE_bool('das_predict_workflow',False, 'Prediction Experiment for the DAS paper')
# Details of the training configuration.
tf.app.flags.DEFINE_float('learning_rate', 0.1, """How large a learning rate to use when training, default 0.1 .""")
tf.app.flags.DEFINE_integer('nb_iter', 3000, """How many training steps to run before ending, default 1.""")
tf.app.flags.DEFINE_integer('nb_layer', 1, """How many layers """)
tf.app.flags.DEFINE_integer('eval_iter', 256, """How often to evaluate the training results.""")
tf.app.flags.DEFINE_string('path_report', 'default', """Path for saving the results """)
tf.app.flags.DEFINE_string('grid_configs', '3_4_5', """Configs to be runned on all the folds """)
tf.app.flags.DEFINE_integer('qsub_taskid', -1, 'qsub_taskid')
#For Snake python DU_gcn_task.py --snake=True --configid=22
import errno
import os
def mkdir_p(path):
try:
os.makedirs(path)
except OSError as exc: # Python >2.5
if exc.errno == errno.EEXIST and os.path.isdir(path):
pass
else:
raise
def _make_grid_qsub(grid_qsub=0):
if grid_qsub==0:
tid=0
C={}
for fold_id in [1,2,3,4]:
#for config in [4,5]:
#for config in [27,28,29]:
for config in [31]:
#for config in [3, 4]:
#for config in [5]:
C[tid]=(fold_id,config)
tid+=1
return C
else:
raise NotImplementedError
def get_config(config_id=0):
config = {}
if config_id == 0:
config['nb_iter'] = 1000
config['lr'] = 0.001
config['stack_instead_add'] = True
config['mu'] = 0.0
config['num_layers'] = 1
config['node_indim'] = -1
config['nconv_edge'] = 1
elif config_id==-1:
#Debug Configuration with few iterations
config['nb_iter'] = 10
config['lr'] = 0.001
config['stack_instead_add'] = True
config['mu'] = 0.0
config['num_layers'] = 2
config['node_indim'] = -1
config['nconv_edge'] = 10
elif config_id==1:
#config['nb_iter'] = 2000
config['nb_iter'] = 1000
config['lr'] = 0.001
config['stack_instead_add'] = False
config['mu'] = 0.0
config['num_layers'] = 1
config['node_indim'] = -1
config['nconv_edge'] = 1
config['fast_convolve'] = True
#config['train_Wn0']=False
elif config_id==2:
config['nb_iter'] = 2000
config['lr'] = 0.001
config['stack_instead_add'] = True
config['mu'] = 0.0
config['num_layers'] = 1
config['node_indim'] = -1
config['nconv_edge'] = 10
config['fast_convolve'] = True
elif config_id==3:
config['nb_iter'] = 2000
config['lr'] = 0.001
config['stack_instead_add'] = True
config['mu'] = 0.0
config['num_layers'] = 1
config['node_indim'] = -1
config['nconv_edge'] = 50
config['fast_convolve'] = True
elif config_id==4:
#config['nb_iter'] = 2000
config['nb_iter'] = 2000
config['lr'] = 0.001
config['stack_instead_add'] = True
config['mu'] = 0.0
config['num_layers'] = 2
config['node_indim'] = -1
config['nconv_edge'] = 7
config['fast_convolve'] = True
elif config_id==5:
#config['nb_iter'] = 2000
config['nb_iter'] = 2000
config['lr'] = 0.001
config['stack_instead_add'] = True
config['mu'] = 0.0
config['num_layers'] = 3
config['node_indim'] = -1 #INDIM =2 not working here
config['nconv_edge'] = 10
config['fast_convolve']=True
#config['train_Wn0']=False
#Projection
elif config_id == 6:
# config['nb_iter'] = 2000
config['nb_iter'] = 1500
config['lr'] = 0.001
config['stack_instead_add'] = True
config['mu'] = 0.1
config['num_layers'] = 2
config['node_indim'] = 20 # INDIM =2 not working here
config['nconv_edge'] = 10
#Config for snakes ..
elif config_id == 7:
config['nb_iter'] = 2000
config['lr'] = 0.001
config['stack_instead_add'] = True
config['mu'] = 0.1
config['num_layers'] = 1
config['node_indim'] = -1 # INDIM =2 not working here
config['nconv_edge'] = 121
#config['activation']=tf.tanh
elif config_id == 8:
config['nb_iter'] = 500
config['lr'] = 0.001
config['stack_instead_add'] = True
config['mu'] = 0.0
config['num_layers'] = 3
config['node_indim'] = -1 # INDIM =2 not working here
config['nconv_edge'] =10
#config['snake']=True
#config['activation'] = tf.tanh
#Feature in snake. No way just to consider on neighbor , in table this is possible due to the type of feature , which are group
###########################################
elif config_id == 9:
config['nb_iter'] = 2000
config['lr'] = 0.0005
config['stack_instead_add'] = True
config['mu'] = 0.0
config['num_layers'] = 5
config['node_indim'] = -1 # INDIM =2 not working here
config['nconv_edge'] =4
config['snake'] = True
# Testing Regularization Effect ...
# Back to Config 5 but with regularization
elif config_id==10:
#config['nb_iter'] = 2000
config['nb_iter'] = 2000
config['lr'] = 0.001
config['stack_instead_add'] = True
config['mu'] = 0.001
config['num_layers'] = 3
config['node_indim'] = -1 #INDIM =2 not working here
config['nconv_edge'] = 10
elif config_id==11:
#config['nb_iter'] = 2000
config['nb_iter'] = 2000
config['lr'] = 0.01
config['stack_instead_add'] = True
config['mu'] = 0.001
config['num_layers'] = 3
config['node_indim'] = -1 #INDIM =2 not working here
config['nconv_edge'] = 10
elif config_id==12:
#config['nb_iter'] = 2000
config['nb_iter'] = 2000
config['lr'] = 0.1
config['stack_instead_add'] = True
config['mu'] = 0.001
config['num_layers'] = 3
config['node_indim'] = -1 #INDIM =2 not working here
config['nconv_edge'] = 10
#Config Deep
elif config_id == 13:
# config['nb_iter'] = 2000
config['nb_iter'] = 2000
config['lr'] = 0.1
config['stack_instead_add'] = True
config['mu'] = 0.0
config['num_layers'] = 5
config['node_indim'] = -1 # INDIM =2 not working here
config['nconv_edge'] = 5
#Test Residual Connection
elif config_id == 14:
# config['nb_iter'] = 2000
config['nb_iter'] = 2000
config['lr'] = 0.1
config['stack_instead_add'] = True
config['mu'] = 0.0
config['num_layers'] = 3
config['node_indim'] = -1 # INDIM =2 not working here
config['nconv_edge'] = 10
config['residual_connection']=True
elif config_id == 15:
# config['nb_iter'] = 2000
config['nb_iter'] = 2000
config['lr'] = 0.001
config['stack_instead_add'] = True
config['mu'] = 0.0
config['num_layers'] = 2
config['node_indim'] = -1 # INDIM =2 not working here
config['nconv_edge'] = 50
config['shared_We']=True
elif config_id == 16:
# config['nb_iter'] = 2000
config['nb_iter'] = 2000
config['lr'] = 0.1
config['stack_instead_add'] = True
config['mu'] = 0.0
config['num_layers'] = 3
config['node_indim'] = -1 # INDIM =2 not working here
config['nconv_edge'] = 10
config['opti']=tf.train.AdagradOptimizer(config['lr'])
elif config_id == 17:
# config['nb_iter'] = 2000
config['nb_iter'] = 2000
config['lr'] = 0.001
config['stack_instead_add'] = True
config['mu'] = 0.0
config['num_layers'] = 3
config['node_indim'] = -1 # INDIM =2 not working here
config['nconv_edge'] = 10
config['opti']=tf.train.RMSPropOptimizer(config['lr'])
#Dropout Mode Test
elif config_id==18:
#config['nb_iter'] = 2000
config['nb_iter'] = 2000
config['lr'] = 0.001
config['stack_instead_add'] = True
config['mu'] = 0.0
config['num_layers'] = 3
config['node_indim'] = -1 #INDIM =2 not working here
config['nconv_edge'] = 10
config['dropout_rate'] = 0.2 #means we keep with a proba of 0.8
config['dropout_mode'] = 2
#Dropout Edges..
elif config_id==19:
#config['nb_iter'] = 2000
config['nb_iter'] = 2000
config['lr'] = 0.001
config['stack_instead_add'] = True
config['mu'] = 0.0
config['num_layers'] = 3
config['node_indim'] = -1 #INDIM =2 not working here
config['nconv_edge'] = 10
config['dropout_rate'] = 0.2 #means we keep with a proba of 0.8
config['dropout_mode'] = 4
elif config_id==20:
#config['nb_iter'] = 2000
config['nb_iter'] = 2000
config['lr'] = 0.005
config['stack_instead_add'] = True
config['mu'] = 0.0
config['num_layers'] = 3
config['node_indim'] = -1 #INDIM =2 not working here
config['nconv_edge'] = 10
config['dropout_rate'] = 0.0
config['dropout_mode'] = 0
elif config_id==21:
#config['nb_iter'] = 2000
config['nb_iter'] = 2000
config['lr'] = 0.0005
config['stack_instead_add'] = True
config['mu'] = 0.0
config['num_layers'] = 3
config['node_indim'] = -1 #INDIM =2 not working here
config['nconv_edge'] = 10
config['dropout_rate'] = 0.0
config['dropout_mode'] = 0
elif config_id == 22:
config['nb_iter'] = 200
config['lr'] = 0.001
config['stack_instead_add'] = True
config['mu'] = 0.0
#config['num_layers'] = 2 #Mean Node Accuracy 0.92
#config['num_layers'] = 5 #Mean Node Accuracy 0.9381
config['num_layers'] = 9 # --> 9523 converges quickly
config['node_indim'] = -1 # INDIM =2 not working here #Should add bias to convolutions, no ?
config['nconv_edge'] =4 #Already by default
config['snake']=True
config['dropout_rate'] = 0.0
config['dropout_mode'] = 0
elif config_id == 23:
config['nb_iter'] = 200
config['lr'] = 0.0001
config['stack_instead_add'] = True
config['mu'] = 0.0
#config['num_layers'] = 2 #Mean Node Accuracy 0.92
#config['num_layers'] = 3 #Mean Node Accuracy 0.9381
config['num_layers'] = 6 # --> 9523 converges quickly
config['node_indim'] = -1 # INDIM =2 not working here #Should add bias to convolutions, no ?
config['nconv_edge'] =4 #Already by default
config['snake']=True
config['dropout_rate'] = 0.1
config['dropout_mode'] = 2
elif config_id == 24:
config['nb_iter'] = 800
config['lr'] = 0.001
config['stack_instead_add'] = True
config['mu'] = 0.0
config['num_layers'] = 9
config['node_indim'] = -1 # INDIM =2 not working here
config['nconv_edge'] =5
config['dropout_rate'] = 0.0
config['dropout_mode'] = 0
#config['shared_We'] = True
elif config_id == 25:
config['nb_iter'] = 500
config['lr'] = 0.001
config['stack_instead_add'] = True
config['mu'] = 0.0
config['num_layers'] = 20
config['node_indim'] = -1 # INDIM =2 not working here
config['nconv_edge'] =2
config['dropout_rate'] = 0.0
config['dropout_mode'] = 0
#config['shared_We'] = True
elif config_id == 26: #Config for the Snake with the same feature rep as CRF ie the fixed_node one
config['nb_iter'] = 500
config['lr'] = 0.001
config['stack_instead_add'] = False #Default True
config['mu'] = 0.0
config['num_layers'] = 7
config['node_indim'] = -1 # INDIM =2 not working here
config['nconv_edge'] =10
config['dropout_rate'] = 0.0
config['dropout_mode'] = 0
elif config_id==27:
#This is config 5 but with stakcing
# config['nb_iter'] = 2000
config['nb_iter'] = 2000
config['lr'] = 0.001
config['stack_instead_add'] = False
config['mu'] = 0.0
config['num_layers'] = 3
config['node_indim'] = -1 # INDIM =2 not working here
config['nconv_edge'] = 10
elif config_id == 28:
# This is config 5 but with stakcing
# config['nb_iter'] = 2000
config['nb_iter'] = 2000
config['lr'] = 0.001
config['stack_instead_add'] = False
config['mu'] = 0.0
config['num_layers'] = 8
config['node_indim'] = -1 # INDIM =2 not working here
config['nconv_edge'] = 10
elif config_id == 29:
# This is config 5 but with stakcing
# config['nb_iter'] = 2000
config['nb_iter'] = 2000
config['lr'] = 0.001
config['stack_instead_add'] = False
config['mu'] = 0.0
config['num_layers'] = 5
config['node_indim'] = -1 # INDIM =2 not working here
config['nconv_edge'] = 20
elif config_id == 30:
# Same as 28 but with fast convolve
config['nb_iter'] = 2000
config['lr'] = 0.001
config['stack_instead_add'] = False
config['mu'] = 0.0
config['num_layers'] = 8
config['node_indim'] = -1 # INDIM =2 not working here
config['nconv_edge'] = 10
config['fast_convolve']=True
elif config_id == 31:
# Same as 28 but with fast convolve
config['nb_iter'] = 2000
config['lr'] = 0.001
config['stack_instead_add'] = False
config['mu'] = 0.001
config['num_layers'] = 8
config['node_indim'] = -1 # INDIM =2 not working here
config['nconv_edge'] = 10
config['fast_convolve']=True
elif config_id == 32:
# Same as 31 but with dropout
config['nb_iter'] = 2000
config['lr'] = 0.001
config['stack_instead_add'] = False
config['mu'] = 0.000
config['num_layers'] = 8
config['node_indim'] = -1 # INDIM =2 not working here
config['nconv_edge'] = 10
config['fast_convolve']=True
config['dropout_rate'] = 0.2
config['dropout_mode'] = 2
# config['shared_We'] = True
elif config_id == 33:
# Same as 28 but with fast convolve
config['nb_iter'] = 2000
config['lr'] = 0.001
config['stack_instead_add'] = False
config['mu'] = 0.001
config['num_layers'] = 8
config['node_indim'] = -1 # INDIM =2 not working here
config['nconv_edge'] = 1
config['fast_convolve']=True
elif config_id == 34:
# Same as 28 but with fast convolve
config['nb_iter'] = 2000
config['lr'] = 0.001
config['stack_instead_add'] = False
config['mu'] = 0.001
config['num_layers'] = 3
config['node_indim'] = -1 # INDIM =2 not working here
config['nconv_edge'] = 1
config['fast_convolve'] = True
elif config_id==35:
#This is 5 with small regularization as 31
#config['nb_iter'] = 2000
config['nb_iter'] = 2000
config['lr'] = 0.001
config['stack_instead_add'] = True
config['mu'] = 0.001
config['num_layers'] = 3
config['node_indim'] = -1 #INDIM =2 not working here
config['nconv_edge'] = 10
config['fast_convolve']=True
elif config_id == 36:
# Same as 28 but with fast convolve
config['nb_iter'] = 2000
config['lr'] = 0.001
config['stack_instead_add'] = False
config['mu'] = 0.001
config['num_layers'] = 10
config['node_indim'] = 10 # INDIM =2 not working here
config['nconv_edge'] = 10
config['fast_convolve']=True
elif config_id == 37:
# Same as 28 but with fast convolve
config['nb_iter'] = 2000
config['lr'] = 0.001
config['stack_instead_add'] = False
config['mu'] = 0.000
config['num_layers'] = 3
config['node_indim'] = -1 # INDIM =2 not working here
config['nconv_edge'] = 1
config['fast_convolve'] = True
elif config_id == 38:
# Same as 28 but with fast convolve
config['nb_iter'] = 2000
config['lr'] = 0.001
config['stack_instead_add'] = False
config['mu'] = 0.000
config['num_layers'] = 5
config['node_indim'] = -1 # INDIM =2 not working here
config['nconv_edge'] = 3
config['fast_convolve'] = True
elif config_id == 39:
# Same as 28 but with fast convolve
config['nb_iter'] = 2000
config['lr'] = 0.001
config['stack_instead_add'] = True
config['mu'] = 0.000
config['num_layers'] = 3
config['node_indim'] = -1 # INDIM =2 not working here
config['nconv_edge'] = 4
config['fast_convolve'] = True
elif config_id == 40:
# Same as 28 but with fast convolve
config['nb_iter'] = 2000
config['lr'] = 0.001
config['stack_instead_add'] = True
config['mu'] = 0.000
config['num_layers'] = 3
config['node_indim'] = -1 # INDIM =2 not working here
config['nconv_edge'] = 10
config['fast_convolve'] = True
config['logit_convolve'] = True
elif config_id==41:
#Same as 5 but with sum stacking
config['nb_iter'] = 2000
config['lr'] = 0.001
config['stack_instead_add'] = False
config['mu'] = 0.0
config['num_layers'] = 3
config['node_indim'] = -1 #INDIM =2 not working here
config['nconv_edge'] = 10
config['fast_convolve']=True
elif config_id==42:
#Baseline GCN model
config['model']='baseline'
config['nb_iter'] = 2000
config['lr'] = 0.001
config['mu'] = 0.0
config['num_layers'] = 3
config['node_indim'] = -1 #INDIM =2 not working here
elif config_id==43:
config['model']='baseline'
config['nb_iter'] = 2000
config['lr'] = 0.001
config['mu'] = 0.0
config['num_layers'] = 5
config['node_indim'] = -1
config['dropout_p'] = 0.0
config['dropout_mode'] = 0
elif config_id==44:
config['model']='baseline'
config['nb_iter'] = 2000
config['lr'] = 0.001
config['mu'] = 0.0
config['num_layers'] = 7
config['node_indim'] = -1
config['dropout_p'] = 0.0
config['dropout_mode'] = 0
elif config_id==45:
config['model']='baseline'
config['nb_iter'] = 2000
config['lr'] = 0.001
config['mu'] = 0.0
config['num_layers'] = 12
config['node_indim'] = -1
config['dropout_p'] = 0.0
config['dropout_mode'] = 0
elif config_id==46:
#same as 5 but with less iterations
#in order to measure predictions time
config['nb_iter'] = 200
config['lr'] = 0.001
config['stack_instead_add'] = True
config['mu'] = 0.0
config['num_layers'] = 3
config['node_indim'] = -1 #INDIM =2 not working here
config['nconv_edge'] = 10
config['fast_convolve']=True
elif config_id==47:
config['nb_iter'] = 1000
config['lr'] = 0.001
config['stack_instead_add'] = True
config['mu'] = 0.0
config['num_layers'] = 3
config['node_indim'] = -1 # INDIM =2 not working here
config['nconv_edge'] = 10
config['fast_convolve'] = True
config['dropout_p'] = 0.3
config['dropout_mode'] = 2
elif config_id==48:
#same as 5 but with less iterations
#in order to measure predictions time
config['nb_iter'] = 500
config['lr'] = 0.00001
config['stack_instead_add'] = True
config['mu'] = 0.0
config['num_layers'] = 3
config['node_indim'] = -1 #INDIM =2 not working here
config['nconv_edge'] = 10
config['fast_convolve']=True
#config['dropout_rate_edge']=0.2
#config['dropout_rate_edge_feat'] = 0.0
#config['dropout_rate_node'] = 0.2
else:
raise NotImplementedError
return config
def run_model_train_val_test(gcn_graph,
config_params,
outpicklefname,
ratio_train_val=0.1,
gcn_graph_test=None,
save_model_path=None
):
g_1 = tf.Graph()
with g_1.as_default():
node_dim = gcn_graph[0].X.shape[1]
edge_dim = gcn_graph[0].E.shape[1] - 2.0
nb_class = gcn_graph[0].Y.shape[1]
if 'model' in config_params and config_params['model']=='baseline':
gcn_model = gcn_models.GraphConvNet(node_dim,nb_class,
num_layers=config_params['num_layers'],
learning_rate=config_params['lr'],
mu=config_params['mu'],
node_indim=config_params['node_indim'],
)
else:
gcn_model = gcn_models.EdgeConvNet(node_dim, edge_dim, nb_class,
num_layers=config_params['num_layers'],
learning_rate=config_params['lr'],
mu=config_params['mu'],
node_indim=config_params['node_indim'],
nconv_edge=config_params['nconv_edge'],
residual_connection=config_params['residual_connection'] if 'residual_connection' in config_params else False
)
gcn_model.stack_instead_add = config_params['stack_instead_add']
if 'fast_convolve' in config_params:
gcn_model.fast_convolve = config_params['fast_convolve']
if 'logit_convolve' in config_params:
gcn_model.logit_convolve=config_params['logit_convolve']
if 'train_Wn0' in config_params:
gcn_model.train_Wn0= config_params['train_Wn0']
if 'dropout_rate_edge' in config_params:
gcn_model.dropout_rate_edge=config_params['dropout_rate_edge']
print('Dropout Edge', gcn_model.dropout_rate_edge)
if 'dropout_rate_edge_feat' in config_params:
gcn_model.dropout_rate_edge_feat=config_params['dropout_rate_edge_feat']
print('Dropout Edge', gcn_model.dropout_rate_edge_feat)
if 'dropout_rate_node' in config_params:
gcn_model.dropout_rate_node=config_params['dropout_rate_node']
print('Dropout Node', gcn_model.dropout_rate_node)
gcn_model.create_model()
#Split Training to get some validation
#ratio_train_val=0.2
split_idx= int(ratio_train_val*len(gcn_graph))
random.shuffle(gcn_graph)
gcn_graph_train=[]
gcn_graph_val=[]
gcn_graph_val.extend(gcn_graph[:split_idx])
gcn_graph_train.extend(gcn_graph[split_idx:])
with tf.Session() as session:
session.run([gcn_model.init])
R=gcn_model.train_with_validation_set(session,gcn_graph_train,gcn_graph_val,config_params['nb_iter'],eval_iter=10,patience=1000,graph_test=gcn_graph_test,save_model_path=save_model_path)
#R=gcn_model.train_All_lG(session,gcn_graph_train,gcn_graph_val,config_params['nb_iter'],eval_iter=10,patience=1000,graph_test=gcn_graph_test,save_model_path=save_model_path)
f=open(outpicklefname,'wb')
pickle.dump(R,f)
f.close()
#Ypred = gcn_model.predict_lG(session,gcn_graph_test)
'''
Y_true_flat=[]
Ypred_flat=[]
for graph,ypred in zip(gcn_graph_test,Ypred):
ytrue = np.argmax(graph.Y,axis=1)
Y_true_flat.extend(ytrue)
Ypred_flat.extend(ypred)
cm=sklearn.metrics.confusion_matrix(Y_true_flat,Ypred_flat)
print(cm)
out_conf_mat = outpicklefname+'.conf_mat.pkl'
g=open(out_conf_mat,'wb')
pickle.dump([Y_true_flat,Ypred_flat,cm],g)
g.close()
out_conf_mat_txt=outpicklefname+'.conf_mat.txt'
f=open(out_conf_mat_txt,'w')
f.write('Confusion Matrix \n')
f.write(str(cm)+'\n')
f.write(sklearn.metrics.classification_report(Y_true_flat,Ypred_flat))
f.close()
'''
def main_fold(foldid,configid,outdir):
'''
Simple Fold experiment, loading one fold, train and test
:param foldid:
:param configid:
:param outdir:
:return:
'''
pickle_train = '/nfs/project/read/testJL/TABLE/abp_quantile_models/abp_CV_fold_' + str(
foldid) + '_tlXlY_trn.pkl'
pickle_test = '/nfs/project/read/testJL/TABLE/abp_quantile_models/abp_CV_fold_' + str(foldid) + '_tlXlY_tst.pkl'
train_graph = GCNDataset.load_transkribus_pickle(pickle_train)
test_graph = GCNDataset.load_transkribus_pickle(pickle_test)
config = get_config(configid)
#acc_test = run_model(train_graph, config, test_graph)
#print('Accuracy Test', acc_test)
outpicklefname = os.path.join(FLAGS.out_dir, 'table_F' + str(FLAGS.fold) + '_C' + str(FLAGS.configid) + '.pickle')
run_model_train_val_test(train_graph,
config,
test_graph,
outpicklefname)
def main(_):
if FLAGS.snake is True:
pickle_train = '/home/meunier/Snake/snake_tlXlY_edge_trn.pkl'
pickle_test = '/home/meunier/Snake/snake_tlXlY_edge_tst.pkl'
#pickle_train = '/home/meunier/Snake/snake_tlXlY_trn.pkl'
#pickle_test = '/home/meunier/Snake/snake_tlXlY_tst.pkl'
#pickle_train = '/home/meunier/Snake/snake_tlXlY_fixed_trn.pkl'
#pickle_test = '/home/meunier/Snake/snake_tlXlY_fixed_tst.pkl'
#pickle_train='/home/meunier/Snake/snake_tlXlY_2_fixed_trn.pkl'
#pickle_test='/home/meunier/Snake/snake_tlXlY_2_fixed_tst.pkl'
train_graph = GCNDataset.load_snake_pickle(pickle_train)
test_graph = GCNDataset.load_snake_pickle(pickle_test)
config = get_config(FLAGS.configid)
acc_test = run_model(train_graph, config, test_graph)
print('Accuracy Test', acc_test)
elif FLAGS.das_train is True:
#Load all the files of table
# Train the model
graph_train=[]
debug=True
if debug:
pickle_train='/nfs/project/read/testJL/TABLE/das_abp_models/abp_full_tlXlY_trn.pkl'
pickle_train_ra ='/nfs/project/read/testJL/TABLE/abp_DAS_CRF_Xr.pkl'
print(pickle_train_ra,pickle_train)
#train_graph = GCNDataset.load_transkribus_pickle(pickle_train)
graph_train =GCNDataset.load_transkribus_reverse_arcs_pickle(pickle_train,pickle_train_ra,format_reverse='lx')
else:
i=1
pickle_train = '/nfs/project/read/testJL/TABLE/abp_quantile_models/abp_CV_fold_' + str(i) + '_tlXlY_trn.pkl'
pickle_test = '/nfs/project/read/testJL/TABLE/abp_quantile_models/abp_CV_fold_' + str(i) + '_tlXlY_tst.pkl'
# reversed edged
pickle_train_ra = '/nfs/project/read/testJL/TABLE/das_abp_models/abp_CV_fold_' + str(i) + '_tlXrlY_trn.pkl'
pickle_test_ra = '/nfs/project/read/testJL/TABLE/das_abp_models/abp_CV_fold_' + str(i) + '_tlXrlY_tst.pkl'
train_graph = GCNDataset.load_transkribus_reverse_arcs_pickle(pickle_train, pickle_train_ra)
test_graph = GCNDataset.load_transkribus_reverse_arcs_pickle(pickle_test, pickle_test_ra)
graph_train.extend(train_graph)
graph_train.extend(test_graph)
print('Graph Train Nb',len(graph_train))
#Load the other dataset for predictions
configid = FLAGS.configid
config = get_config(configid)
#config['nb_iter'] = 100
dirp =os.path.join('models_all','C'+str(configid))
mkdir_p(dirp)
save_model_dir=os.path.join(dirp,'alldas_exp1_C'+str(configid)+'.ckpt')
#I should save the pickle
outpicklefname=os.path.join(dirp,'alldas_exp1_C'+str(configid)+'.validation_scores.pickle')
run_model_train_val_test(graph_train, config, outpicklefname, ratio_train_val=0.1,save_model_path=save_model_dir)
#for test add gcn_graph_test=train_graph
elif FLAGS.das_predict is True:
do_test=False #some internal flags to do some testing
node_dim = 29
edge_dim = 140
nb_class = 5
configid = FLAGS.configid
config = get_config(configid)
#Get the best file
#TODO Get the best file
#node_dim = gcn_graph[0].X.shape[1]
#edge_dim = gcn_graph[0].E.shape[1] - 2.0
#nb_class = gcn_graph[0].Y.shape[1]
#f = open('archive_models/das_exp1_C31.validation_scores.pickle', 'rb')
val_pickle = os.path.join('models_all', 'C' + str(configid),
"alldas_exp1_C" + str(configid) + '.validation_scores.pickle')
print('Reading Training Info from:', val_pickle)
f = open(val_pickle, 'rb')
R = pickle.load(f)
val = R['val_acc']
print('Validation scores',val)
epoch_index = np.argmax(val)
print('Best performance on val set: Epoch',epoch_index)
gcn_model = gcn_models.EdgeConvNet(node_dim, edge_dim, nb_class,
num_layers=config['num_layers'],
learning_rate=config['lr'],
mu=config['mu'],
node_indim=config['node_indim'],
nconv_edge=config['nconv_edge'],
)
gcn_model.stack_instead_add = config['stack_instead_add']
if 'fast_convolve' in config:
gcn_model.fast_convolve = config['fast_convolve']
gcn_model.create_model()
if do_test:
graph_train = []
for i in range(1, 5):
pickle_train = '/nfs/project/read/testJL/TABLE/abp_quantile_models/abp_CV_fold_' + str(i) + '_tlXlY_trn.pkl'
print('loading ', pickle_train)
train_graph = GCNDataset.load_transkribus_pickle(pickle_train)
graph_train.extend(train_graph)
#TODO load the data for test
#/nfs/project/read/testJL/TABLE/abp_DAS_col9142_CRF_X.pkl
if FLAGS.das_predict_workflow :
pickle_predict='/nfs/project/read/testJL/TABLE/abp_DAS_col9142_workflow_X.pkl'
pickle_predict_ra = '/nfs/project/read/testJL/TABLE/abp_DAS_col9142_workflow_Xr.pkl'
else:
pickle_predict='/nfs/project/read/testJL/TABLE/abp_DAS_col9142_CRF_X.pkl'
pickle_predict_ra = '/nfs/project/read/testJL/TABLE/abp_DAS_col9142_CRF_Xr.pkl'
print('loading ', pickle_predict,pickle_predict_ra)
predict_graph = GCNDataset.load_test_pickle(pickle_predict,nb_class,pickle_reverse_arc=pickle_predict_ra)
with tf.Session() as session:
# Restore variables from disk.
session.run(gcn_model.init)
if do_test:
gcn_model.restore_model(session, "models/das_exp1_C31.ckpt-99")
print('Loaded models')
graphAcc,node_acc=gcn_model.test_lG(session,graph_train)
print(graphAcc,node_acc)
model_path =os.path.join('models_all','C'+str(configid),"alldas_exp1_C"+str(configid)+".ckpt-"+str(10*epoch_index))
print('Model_path',model_path)
gcn_model.restore_model(session, model_path)
print('Loaded models')
start_time = time.time()
lY_pred = gcn_model.predict_lG(session, predict_graph, verbose=False)
end_time = time.time()
print("--- %s seconds ---" % (end_time - start_time))
print('Number of graphs:',len(lY_pred))
#Convert to list as Python pickle does not seem like the array while the list can be pickled
lY_list=[]
for x in lY_pred:
lY_list.append(list(x))
#print(lY_list)
if FLAGS.das_predict_workflow:
outpicklefname = 'allmodel_das_predict_C'+str(configid)+'_workflow.pickle'
else:
outpicklefname = 'allmodel_das_predict_C'+str(configid)+'.pickle'
g=open(outpicklefname,'wb')
#print(lY_pred)
pickle.dump(lY_pred, g, protocol=2,fix_imports=True)
g.close()
elif FLAGS.qsub_taskid >-1:
GRID = _make_grid_qsub(0)
try:
fold_id,configid =GRID[FLAGS.qsub_taskid]
except:
print('Invalid Grid Parameters',FLAGS.qsub_taskid,GRID)
return -1
print('Experiement with FOLD',fold_id,' CONFIG',configid)
pickle_train = '/nfs/project/read/testJL/TABLE/abp_quantile_models/abp_CV_fold_' + str(
fold_id) + '_tlXlY_trn.pkl'
pickle_test = '/nfs/project/read/testJL/TABLE/abp_quantile_models/abp_CV_fold_' + str(
fold_id) + '_tlXlY_tst.pkl'
train_graph = GCNDataset.load_transkribus_pickle(pickle_train)
test_graph = GCNDataset.load_transkribus_pickle(pickle_test)
config = get_config(configid)
if os.path.exists(FLAGS.out_dir) is False:
print('Creating Dir',FLAGS.out_dir)
os.mkdir(FLAGS.out_dir)
outpicklefname = os.path.join(FLAGS.out_dir, 'table_F' + str(fold_id) + '_C' + str(configid) + '.pickle')
run_model_train_val_test(train_graph, config, outpicklefname,ratio_train_val=0.1,gcn_graph_test= test_graph)
else:
if FLAGS.fold==-1:
#Do it on all the fold for the specified configs
FOLD_IDS=[1,2,3,4]
sel_configs_ = FLAGS.grid_configs.split('_')
sel_configs = [int(x) for x in sel_configs_]
print('GRID on FOLDS',FOLD_IDS)
print('Model Configs', sel_configs)
for cid in sel_configs:
for fid in FOLD_IDS:
print('Running Fold',fid,'on Config',cid)
main_fold(fid,cid,FLAGS.out_dir)
else:
pickle_train = '/nfs/project/read/testJL/TABLE/abp_quantile_models/abp_CV_fold_' + str(
FLAGS.fold) + '_tlXlY_trn.pkl'
pickle_test = '/nfs/project/read/testJL/TABLE/abp_quantile_models/abp_CV_fold_' + str(FLAGS.fold) + '_tlXlY_tst.pkl'
#reversed edged
pickle_train_ra = '/nfs/project/read/testJL/TABLE/das_abp_models/abp_CV_fold_' + str(
FLAGS.fold) + '_tlXrlY_trn.pkl'
pickle_test_ra = '/nfs/project/read/testJL/TABLE/das_abp_models/abp_CV_fold_' + str(FLAGS.fold) + '_tlXrlY_tst.pkl'
#train_graph = GCNDataset.load_transkribus_pickle(pickle_train)
train_graph = GCNDataset.load_transkribus_reverse_arcs_pickle(pickle_train,pickle_train_ra)
print('Loaded Trained Graphs:',len(train_graph))
test_graph = GCNDataset.load_transkribus_reverse_arcs_pickle(pickle_test,pickle_test_ra)
#test_graph = GCNDataset.load_transkribus_pickle(pickle_test, pickle_test_ra)
print('Loaded Test Graphs:', len(test_graph))
config = get_config(FLAGS.configid)
#acc_test = run_model(train_graph, config, test_graph,eval_iter=1)
#print('Accuracy Test', acc_test)
outpicklefname=os.path.join(FLAGS.out_dir,'table_F'+str(FLAGS.fold)+'_C'+str(FLAGS.configid)+'.pickle')
run_model_train_val_test(train_graph,config,outpicklefname,gcn_graph_test= test_graph)
if __name__ == '__main__':
tf.app.run()
| bsd-3-clause |
tsilifis/chaos_basispy | demos/demo0/demo_isometry.py | 1 | 1032 | """
Visualize isometry constructed
using different orthogonalization methods
> d = 15
> random coefficients qe ~ N(0,1)
"""
import numpy as np
import matplotlib.pyplot as plt
import chaos_basispy as cb
import matplotlib
matplotlib.rcParams.update({'font.size': 7})
def visualize_isometry(d):
d
qe = np.random.normal(size=(d,))
A0 = cb.BasisAdapt().gauss_adaptation(qe, d, method = 0)
A1 = cb.BasisAdapt().gauss_adaptation(qe, d, method = 1)
A2 = cb.BasisAdapt().gauss_adaptation(qe, d, method = 2)
fig = plt.figure()
ax = fig.add_subplot(1,3,1)
ax.imshow(A0, interpolation='none')
plt.title(r'$method=0$',fontsize = 9)
ax = fig.add_subplot(1,3,2)
ax.imshow(A1, interpolation='none')
plt.title(r'$method=1$',fontsize = 9)
ax = fig.add_subplot(1,3,3)
ax.imshow(A2, interpolation='none')
plt.title(r'$method=2$', fontsize = 9)
plt.savefig('isometry.eps', dpi=1200)
if __name__ == '__main__':
visualize_isometry(d = 15)
plt.show()
| gpl-3.0 |
Obus/scikit-learn | examples/plot_johnson_lindenstrauss_bound.py | 134 | 7452 | """
=====================================================================
The Johnson-Lindenstrauss bound for embedding with random projections
=====================================================================
The `Johnson-Lindenstrauss lemma`_ states that any high dimensional
dataset can be randomly projected into a lower dimensional Euclidean
space while controlling the distortion in the pairwise distances.
.. _`Johnson-Lindenstrauss lemma`: http://en.wikipedia.org/wiki/Johnson%E2%80%93Lindenstrauss_lemma
Theoretical bounds
==================
The distortion introduced by a random projection `p` is asserted by
the fact that `p` is defining an eps-embedding with good probability
as defined by:
(1 - eps) ||u - v||^2 < ||p(u) - p(v)||^2 < (1 + eps) ||u - v||^2
Where u and v are any rows taken from a dataset of shape [n_samples,
n_features] and p is a projection by a random Gaussian N(0, 1) matrix
with shape [n_components, n_features] (or a sparse Achlioptas matrix).
The minimum number of components to guarantees the eps-embedding is
given by:
n_components >= 4 log(n_samples) / (eps^2 / 2 - eps^3 / 3)
The first plot shows that with an increasing number of samples ``n_samples``,
the minimal number of dimensions ``n_components`` increased logarithmically
in order to guarantee an ``eps``-embedding.
The second plot shows that an increase of the admissible
distortion ``eps`` allows to reduce drastically the minimal number of
dimensions ``n_components`` for a given number of samples ``n_samples``
Empirical validation
====================
We validate the above bounds on the the digits dataset or on the 20 newsgroups
text document (TF-IDF word frequencies) dataset:
- for the digits dataset, some 8x8 gray level pixels data for 500
handwritten digits pictures are randomly projected to spaces for various
larger number of dimensions ``n_components``.
- for the 20 newsgroups dataset some 500 documents with 100k
features in total are projected using a sparse random matrix to smaller
euclidean spaces with various values for the target number of dimensions
``n_components``.
The default dataset is the digits dataset. To run the example on the twenty
newsgroups dataset, pass the --twenty-newsgroups command line argument to this
script.
For each value of ``n_components``, we plot:
- 2D distribution of sample pairs with pairwise distances in original
and projected spaces as x and y axis respectively.
- 1D histogram of the ratio of those distances (projected / original).
We can see that for low values of ``n_components`` the distribution is wide
with many distorted pairs and a skewed distribution (due to the hard
limit of zero ratio on the left as distances are always positives)
while for larger values of n_components the distortion is controlled
and the distances are well preserved by the random projection.
Remarks
=======
According to the JL lemma, projecting 500 samples without too much distortion
will require at least several thousands dimensions, irrespective of the
number of features of the original dataset.
Hence using random projections on the digits dataset which only has 64 features
in the input space does not make sense: it does not allow for dimensionality
reduction in this case.
On the twenty newsgroups on the other hand the dimensionality can be decreased
from 56436 down to 10000 while reasonably preserving pairwise distances.
"""
print(__doc__)
import sys
from time import time
import numpy as np
import matplotlib.pyplot as plt
from sklearn.random_projection import johnson_lindenstrauss_min_dim
from sklearn.random_projection import SparseRandomProjection
from sklearn.datasets import fetch_20newsgroups_vectorized
from sklearn.datasets import load_digits
from sklearn.metrics.pairwise import euclidean_distances
# Part 1: plot the theoretical dependency between n_components_min and
# n_samples
# range of admissible distortions
eps_range = np.linspace(0.1, 0.99, 5)
colors = plt.cm.Blues(np.linspace(0.3, 1.0, len(eps_range)))
# range of number of samples (observation) to embed
n_samples_range = np.logspace(1, 9, 9)
plt.figure()
for eps, color in zip(eps_range, colors):
min_n_components = johnson_lindenstrauss_min_dim(n_samples_range, eps=eps)
plt.loglog(n_samples_range, min_n_components, color=color)
plt.legend(["eps = %0.1f" % eps for eps in eps_range], loc="lower right")
plt.xlabel("Number of observations to eps-embed")
plt.ylabel("Minimum number of dimensions")
plt.title("Johnson-Lindenstrauss bounds:\nn_samples vs n_components")
# range of admissible distortions
eps_range = np.linspace(0.01, 0.99, 100)
# range of number of samples (observation) to embed
n_samples_range = np.logspace(2, 6, 5)
colors = plt.cm.Blues(np.linspace(0.3, 1.0, len(n_samples_range)))
plt.figure()
for n_samples, color in zip(n_samples_range, colors):
min_n_components = johnson_lindenstrauss_min_dim(n_samples, eps=eps_range)
plt.semilogy(eps_range, min_n_components, color=color)
plt.legend(["n_samples = %d" % n for n in n_samples_range], loc="upper right")
plt.xlabel("Distortion eps")
plt.ylabel("Minimum number of dimensions")
plt.title("Johnson-Lindenstrauss bounds:\nn_components vs eps")
# Part 2: perform sparse random projection of some digits images which are
# quite low dimensional and dense or documents of the 20 newsgroups dataset
# which is both high dimensional and sparse
if '--twenty-newsgroups' in sys.argv:
# Need an internet connection hence not enabled by default
data = fetch_20newsgroups_vectorized().data[:500]
else:
data = load_digits().data[:500]
n_samples, n_features = data.shape
print("Embedding %d samples with dim %d using various random projections"
% (n_samples, n_features))
n_components_range = np.array([300, 1000, 10000])
dists = euclidean_distances(data, squared=True).ravel()
# select only non-identical samples pairs
nonzero = dists != 0
dists = dists[nonzero]
for n_components in n_components_range:
t0 = time()
rp = SparseRandomProjection(n_components=n_components)
projected_data = rp.fit_transform(data)
print("Projected %d samples from %d to %d in %0.3fs"
% (n_samples, n_features, n_components, time() - t0))
if hasattr(rp, 'components_'):
n_bytes = rp.components_.data.nbytes
n_bytes += rp.components_.indices.nbytes
print("Random matrix with size: %0.3fMB" % (n_bytes / 1e6))
projected_dists = euclidean_distances(
projected_data, squared=True).ravel()[nonzero]
plt.figure()
plt.hexbin(dists, projected_dists, gridsize=100, cmap=plt.cm.PuBu)
plt.xlabel("Pairwise squared distances in original space")
plt.ylabel("Pairwise squared distances in projected space")
plt.title("Pairwise distances distribution for n_components=%d" %
n_components)
cb = plt.colorbar()
cb.set_label('Sample pairs counts')
rates = projected_dists / dists
print("Mean distances rate: %0.2f (%0.2f)"
% (np.mean(rates), np.std(rates)))
plt.figure()
plt.hist(rates, bins=50, normed=True, range=(0., 2.))
plt.xlabel("Squared distances rate: projected / original")
plt.ylabel("Distribution of samples pairs")
plt.title("Histogram of pairwise distance rates for n_components=%d" %
n_components)
# TODO: compute the expected value of eps and add them to the previous plot
# as vertical lines / region
plt.show()
| bsd-3-clause |
johnveitch/cpnest | examples/diagnose_trajectory.py | 1 | 1089 | import numpy as np
import matplotlib.cm as cm
import matplotlib.pyplot as plt
import sys, os
np.seterr(all='raise')
def log_likelihood(x):
return np.sum([-0.5*x[n]**2-0.5*np.log(2.0*np.pi) for n in range(x.shape[0])])
mode = sys.argv[1]
if mode == 'delete':
allfiles = os.listdir('.')
toremove = [a for a in allfiles if 'trajectory_' in a and '.py' not in a]
for f in toremove: os.remove(f)
exit()
traj = np.genfromtxt('trajectory_'+sys.argv[1]+'.txt', names= True)
npts = 256
x = np.linspace(-10,10,npts)
y = np.linspace(-10,10,npts)
X, Y = np.meshgrid(x, y)
Z = np.zeros((npts,npts))
for i in range(npts):
for j in range(npts):
Z[i,j] = log_likelihood(np.array([x[i],y[j]]))
C = plt.contour(X, Y, Z, levels = [traj['logLmin'][0]], linewidths=1.0,colors='k')
plt.contourf(X, Y, Z, 6, cmap = cm.Greys_r)
S = plt.scatter(traj['0'], traj['1'], c=traj['logL'], s = 8)
plt.plot(traj['0'],traj['1'], color = 'k', lw = 0.5)
for k in range(traj.shape[0]):
plt.text(traj['0'][k],traj['1'][k], str(k), color="black", fontsize=8)
plt.colorbar(S)
plt.show()
| mit |
ryandougherty/mwa-capstone | MWA_Tools/build/matplotlib/examples/pylab_examples/finance_work2.py | 3 | 6269 | import datetime
import numpy as np
import matplotlib.colors as colors
import matplotlib.finance as finance
import matplotlib.dates as mdates
import matplotlib.ticker as mticker
import matplotlib.mlab as mlab
import matplotlib.pyplot as plt
import matplotlib.font_manager as font_manager
startdate = datetime.date(2006,1,1)
today = enddate = datetime.date.today()
ticker = 'SPY'
fh = finance.fetch_historical_yahoo(ticker, startdate, enddate)
# a numpy record array with fields: date, open, high, low, close, volume, adj_close)
r = mlab.csv2rec(fh); fh.close()
r.sort()
def moving_average(x, n, type='simple'):
"""
compute an n period moving average.
type is 'simple' | 'exponential'
"""
x = np.asarray(x)
if type=='simple':
weights = np.ones(n)
else:
weights = np.exp(np.linspace(-1., 0., n))
weights /= weights.sum()
a = np.convolve(x, weights, mode='full')[:len(x)]
a[:n] = a[n]
return a
def relative_strength(prices, n=14):
"""
compute the n period relative strength indicator
http://stockcharts.com/school/doku.php?id=chart_school:glossary_r#relativestrengthindex
http://www.investopedia.com/terms/r/rsi.asp
"""
deltas = np.diff(prices)
seed = deltas[:n+1]
up = seed[seed>=0].sum()/n
down = -seed[seed<0].sum()/n
rs = up/down
rsi = np.zeros_like(prices)
rsi[:n] = 100. - 100./(1.+rs)
for i in range(n, len(prices)):
delta = deltas[i-1] # cause the diff is 1 shorter
if delta>0:
upval = delta
downval = 0.
else:
upval = 0.
downval = -delta
up = (up*(n-1) + upval)/n
down = (down*(n-1) + downval)/n
rs = up/down
rsi[i] = 100. - 100./(1.+rs)
return rsi
def moving_average_convergence(x, nslow=26, nfast=12):
"""
compute the MACD (Moving Average Convergence/Divergence) using a fast and slow exponential moving avg'
return value is emaslow, emafast, macd which are len(x) arrays
"""
emaslow = moving_average(x, nslow, type='exponential')
emafast = moving_average(x, nfast, type='exponential')
return emaslow, emafast, emafast - emaslow
plt.rc('axes', grid=True)
plt.rc('grid', color='0.75', linestyle='-', linewidth=0.5)
textsize = 9
left, width = 0.1, 0.8
rect1 = [left, 0.7, width, 0.2]
rect2 = [left, 0.3, width, 0.4]
rect3 = [left, 0.1, width, 0.2]
fig = plt.figure(facecolor='white')
axescolor = '#f6f6f6' # the axies background color
ax1 = fig.add_axes(rect1, axisbg=axescolor) #left, bottom, width, height
ax2 = fig.add_axes(rect2, axisbg=axescolor, sharex=ax1)
ax2t = ax2.twinx()
ax3 = fig.add_axes(rect3, axisbg=axescolor, sharex=ax1)
### plot the relative strength indicator
prices = r.adj_close
rsi = relative_strength(prices)
fillcolor = 'darkgoldenrod'
ax1.plot(r.date, rsi, color=fillcolor)
ax1.axhline(70, color=fillcolor)
ax1.axhline(30, color=fillcolor)
ax1.fill_between(r.date, rsi, 70, where=(rsi>=70), facecolor=fillcolor, edgecolor=fillcolor)
ax1.fill_between(r.date, rsi, 30, where=(rsi<=30), facecolor=fillcolor, edgecolor=fillcolor)
ax1.text(0.6, 0.9, '>70 = overbought', va='top', transform=ax1.transAxes, fontsize=textsize)
ax1.text(0.6, 0.1, '<30 = oversold', transform=ax1.transAxes, fontsize=textsize)
ax1.set_ylim(0, 100)
ax1.set_yticks([30,70])
ax1.text(0.025, 0.95, 'RSI (14)', va='top', transform=ax1.transAxes, fontsize=textsize)
ax1.set_title('%s daily'%ticker)
### plot the price and volume data
dx = r.adj_close - r.close
low = r.low + dx
high = r.high + dx
deltas = np.zeros_like(prices)
deltas[1:] = np.diff(prices)
up = deltas>0
ax2.vlines(r.date[up], low[up], high[up], color='black', label='_nolegend_')
ax2.vlines(r.date[~up], low[~up], high[~up], color='black', label='_nolegend_')
ma20 = moving_average(prices, 20, type='simple')
ma200 = moving_average(prices, 200, type='simple')
linema20, = ax2.plot(r.date, ma20, color='blue', lw=2, label='MA (20)')
linema200, = ax2.plot(r.date, ma200, color='red', lw=2, label='MA (200)')
last = r[-1]
s = '%s O:%1.2f H:%1.2f L:%1.2f C:%1.2f, V:%1.1fM Chg:%+1.2f' % (
today.strftime('%d-%b-%Y'),
last.open, last.high,
last.low, last.close,
last.volume*1e-6,
last.close-last.open )
t4 = ax2.text(0.3, 0.9, s, transform=ax2.transAxes, fontsize=textsize)
props = font_manager.FontProperties(size=10)
leg = ax2.legend(loc='center left', shadow=True, fancybox=True, prop=props)
leg.get_frame().set_alpha(0.5)
volume = (r.close*r.volume)/1e6 # dollar volume in millions
vmax = volume.max()
poly = ax2t.fill_between(r.date, volume, 0, label='Volume', facecolor=fillcolor, edgecolor=fillcolor)
ax2t.set_ylim(0, 5*vmax)
ax2t.set_yticks([])
### compute the MACD indicator
fillcolor = 'darkslategrey'
nslow = 26
nfast = 12
nema = 9
emaslow, emafast, macd = moving_average_convergence(prices, nslow=nslow, nfast=nfast)
ema9 = moving_average(macd, nema, type='exponential')
ax3.plot(r.date, macd, color='black', lw=2)
ax3.plot(r.date, ema9, color='blue', lw=1)
ax3.fill_between(r.date, macd-ema9, 0, alpha=0.5, facecolor=fillcolor, edgecolor=fillcolor)
ax3.text(0.025, 0.95, 'MACD (%d, %d, %d)'%(nfast, nslow, nema), va='top',
transform=ax3.transAxes, fontsize=textsize)
#ax3.set_yticks([])
# turn off upper axis tick labels, rotate the lower ones, etc
for ax in ax1, ax2, ax2t, ax3:
if ax!=ax3:
for label in ax.get_xticklabels():
label.set_visible(False)
else:
for label in ax.get_xticklabels():
label.set_rotation(30)
label.set_horizontalalignment('right')
ax.fmt_xdata = mdates.DateFormatter('%Y-%m-%d')
class MyLocator(mticker.MaxNLocator):
def __init__(self, *args, **kwargs):
mticker.MaxNLocator.__init__(self, *args, **kwargs)
def __call__(self, *args, **kwargs):
return mticker.MaxNLocator.__call__(self, *args, **kwargs)
# at most 5 ticks, pruning the upper and lower so they don't overlap
# with other ticks
#ax2.yaxis.set_major_locator(mticker.MaxNLocator(5, prune='both'))
#ax3.yaxis.set_major_locator(mticker.MaxNLocator(5, prune='both'))
ax2.yaxis.set_major_locator(MyLocator(5, prune='both'))
ax3.yaxis.set_major_locator(MyLocator(5, prune='both'))
plt.show()
| gpl-2.0 |
fermiPy/lcpipe | summary_plot.py | 1 | 4708 | ## script to plot the output
## from fermipy,given a .npy file
## Sara Buson, Oct. 2017
## very basic, need to implement better
## the reading of png, laoding the npy
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.legend_handler import HandlerLine2D
import sys
from matplotlib.cbook import get_sample_data
import matplotlib.image as mpimg
def getLCdata(lc,f_scale,ts=''):
if not ts>-10: ts=25
print ts
""" -- reading the LC output --- """
s=lc.split('/')
src=s[-1].split('_lightcurve.npy')[0]
o = np.load(lc).flat[0]
_ts = o['ts']
mjd=o['tmin_mjd']
mjd_width = mjd[1]-mjd[0]
mjd_middle=mjd+mjd_width
flux=o['flux']/f_scale
flux_err=o['flux_err']/f_scale
ul = o['flux100_ul95']/f_scale
#ul_er = 0. /f_scale
N_pred=o['npred']
bin_qual= o['fit_quality']
condition=_ts<ts #array of True/False
#x=np.where(condition) #array of positions where condition is True
y = [ (ul[i] if condition[i] else flux[i]) for i in xrange(len(mjd_middle)) ]
ye =[ (np.array(flux_err).mean() if condition[i] else flux_err[i]) for i in xrange(len(mjd_middle)) ]
npred=[ ( 0 if condition[i] else N_pred[i]) for i in xrange(len(mjd_middle)) ]
## need to implement the key in fermipy
#index = [ (0 if condition[i] else Ind[i]) for i in xrange(len(mjd_middle)) ]
#indexe =[ (0 if condition[i] else Inde[i]) for i in xrange(len(mjd_middle)) ]
return src,mjd_middle, mjd_width, flux, flux_err, ul, npred, bin_qual, condition
def plotLC(lc, ts='',f_scale=1e-8, where='./',save=False):
plt.rcParams['legend.handlelength'] = 2.4
plt.rcParams['legend.numpoints'] = 1
plt.rcParams['legend.handletextpad']=0.9
plt.rcParams['legend.markerscale']=0
#plt.rcParams['lines.linewidth']=0
left = 0.075 # the left side of the subplots of the figure
right = 0.975 # the right side of the subplots of the figure
bottom = 0.06 # the bottom of the subplots of the figure
top = 0.95 # the top of the subplots of the figure
wspace = 0.08 # the amount of width reserved for blank space between subplots
hspace = 0.3 # the amount of height reserved for white space between subplots
grid_size = (2, 3)
fig, axs = plt.subplots(nrows=2, ncols=3, sharex=False,figsize=(12,8))
""" FERMIPY LC """
#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#
""" -- reading the LC output in separete function --- """
f_scale_label=str(f_scale).split('-0')[-1]
ax0 =plt.subplot2grid(grid_size, (0, 0), rowspan=1, colspan=3) ## <<--------
ax0.set_ylabel('[$10^{-%s} ph cm^{-2} s^{-1}$]'%f_scale_label)
ax0.set_xlabel('Time [MJD]')
ax0.grid()
src,mjd_middle, mjd_width, flux, fluxerr, ul, N_pred,bin_qual, lolims = getLCdata(lc,f_scale,ts=ts)
plt.errorbar(mjd_middle,flux, xerr=mjd_width, yerr=fluxerr, uplims=lolims,
color='green',marker='o',markersize=4,ls='none',label='%s (%i-day binning; TS>%.1f)'%(src,mjd_width,ts))
## coming..
## to be included if the Sun is within the ROI
## plt. plot([timeSun0,timeSun0],[0,5], label='SUN',ls='dashed', c='red',linewidth=2.0)
leg0 = ax0.legend()
plt.legend(loc='upper left')
ax0.axes.get_xaxis().set_visible(True)
## HERE PLOT NPRED // QUALITY
ax1 =plt.subplot2grid(grid_size, (1, 0), rowspan=1, colspan=1) ## <<--------
ax1.set_ylabel('Flux/Flux_err')
ax1.set_xlabel('Npred/sqrt(Npred)')
#ax1.set_xlim(lt,rt)
#ax1.set_ylim(-0.01,3)
ax1.grid()
ratio_F_Fe= flux/fluxerr
ratio_Npred= N_pred/np.sqrt(N_pred)
plt.errorbar(ratio_Npred, ratio_F_Fe, xerr=0, yerr=0, uplims=False,
color='orange',marker='o',markersize=4,ls='none',label='')#,xnewF, F_(xnewF),'-',xnewF1, F_(xnewF1),'-',lw=2,label='LAT',color='green')#, xnew, f2(xnew), '--')
## coming..
## to be included if the Sun is within the ROI
## plt. plot([timeSun0,timeSun0],[0,5], label='SUN',ls='dashed', c='red',linewidth=2.0)
leg1 = ax1.legend()
plt.legend(loc='upper left')
ax1.axes.get_xaxis().set_visible(True)
## HERE PLOT TSMAP
what='pointsource_powerlaw_2.00_tsmap_sqrt_ts.png'
img=mpimg.imread(where+what)
newax = plt.subplot2grid(grid_size, (1, 1), rowspan=1, colspan=1)
imgplot = plt.imshow(img)
newax.imshow(img)
newax.axis('off')
## HERE PLOT SED
what='%s_sed.png'%src
img_sed = plt.imread(where+what)
newax = plt.subplot2grid(grid_size, (1, 2), rowspan=1, colspan=1)
imgplot = plt.imshow(img_sed)
newax.axis('off')
plt.subplots_adjust(left=left, bottom=bottom, right=right, top=top,
wspace=wspace, hspace=hspace)
if save==True: plt.savefig('%s_summary.pdf'%src,transparent=True)
plt.show()
if __name__ == "__main__":
#try:
lcfile=sys.argv[1]
if sys.argv[2]: TS=float(sys.argv[2])
plotLC(lcfile,ts=TS,save=True)
#except: print 'usage:: python LC_file.npy ts'
| bsd-3-clause |
clemkoa/scikit-learn | sklearn/manifold/setup.py | 40 | 1284 | import os
from os.path import join
import numpy
from numpy.distutils.misc_util import Configuration
from sklearn._build_utils import get_blas_info
def configuration(parent_package="", top_path=None):
config = Configuration("manifold", parent_package, top_path)
libraries = []
if os.name == 'posix':
libraries.append('m')
config.add_extension("_utils",
sources=["_utils.pyx"],
include_dirs=[numpy.get_include()],
libraries=libraries,
extra_compile_args=["-O3"])
cblas_libs, blas_info = get_blas_info()
eca = blas_info.pop('extra_compile_args', [])
eca.append("-O4")
config.add_extension("_barnes_hut_tsne",
libraries=cblas_libs,
sources=["_barnes_hut_tsne.pyx"],
include_dirs=[join('..', 'src', 'cblas'),
numpy.get_include(),
blas_info.pop('include_dirs', [])],
extra_compile_args=eca, **blas_info)
config.add_subpackage('tests')
return config
if __name__ == "__main__":
from numpy.distutils.core import setup
setup(**configuration().todict())
| bsd-3-clause |
mariusvniekerk/ibis | ibis/client.py | 4 | 13243 | # Copyright 2014 Cloudera Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ibis.compat import zip as czip
from ibis.config import options
import ibis.expr.types as ir
import ibis.expr.operations as ops
import ibis.sql.compiler as comp
import ibis.common as com
import ibis.util as util
class Client(object):
pass
class Query(object):
"""
Abstraction for DDL query execution to enable both synchronous and
asynchronous queries, progress, cancellation and more (for backends
supporting such functionality).
"""
def __init__(self, client, ddl):
self.client = client
if isinstance(ddl, comp.DDL):
self.compiled_ddl = ddl.compile()
else:
self.compiled_ddl = ddl
self.result_wrapper = getattr(ddl, 'result_handler', None)
def execute(self):
# synchronous by default
with self.client._execute(self.compiled_ddl, results=True) as cur:
result = self._fetch(cur)
return self._wrap_result(result)
def _wrap_result(self, result):
if self.result_wrapper is not None:
result = self.result_wrapper(result)
return result
def _fetch(self, cursor):
import pandas as pd
rows = cursor.fetchall()
# TODO(wesm): please evaluate/reimpl to optimize for perf/memory
dtypes = [self._db_type_to_dtype(x[1]) for x in cursor.description]
names = [x[0] for x in cursor.description]
cols = {}
for (col, name, dtype) in czip(czip(*rows), names, dtypes):
try:
cols[name] = pd.Series(col, dtype=dtype)
except TypeError:
# coercing to specified dtype failed, e.g. NULL vals in int col
cols[name] = pd.Series(col)
return pd.DataFrame(cols, columns=names)
def _db_type_to_dtype(self, db_type):
raise NotImplementedError
class AsyncQuery(Query):
"""
Abstract asynchronous query
"""
def execute(self):
raise NotImplementedError
def is_finished(self):
raise NotImplementedError
def cancel(self):
raise NotImplementedError
def get_result(self):
raise NotImplementedError
class SQLClient(Client):
sync_query = Query
async_query = Query
def table(self, name, database=None):
"""
Create a table expression that references a particular table in the
database
Parameters
----------
name : string
database : string, optional
Returns
-------
table : TableExpr
"""
qualified_name = self._fully_qualified_name(name, database)
schema = self._get_table_schema(qualified_name)
node = ops.DatabaseTable(qualified_name, schema, self)
return self._table_expr_klass(node)
@property
def _table_expr_klass(self):
return ir.TableExpr
@property
def current_database(self):
return self.con.database
def database(self, name=None):
"""
Create a Database object for a given database name that can be used for
exploring and manipulating the objects (tables, functions, views, etc.)
inside
Parameters
----------
name : string
Name of database
Returns
-------
database : Database
"""
# TODO: validate existence of database
if name is None:
name = self.current_database
return self.database_class(name, self)
def _fully_qualified_name(self, name, database):
# XXX
return name
def _execute(self, query, results=False):
cur = self.con.execute(query)
if results:
return cur
else:
cur.release()
def sql(self, query):
"""
Convert a SQL query to an Ibis table expression
Parameters
----------
Returns
-------
table : TableExpr
"""
# Get the schema by adding a LIMIT 0 on to the end of the query. If
# there is already a limit in the query, we find and remove it
limited_query = """\
SELECT *
FROM (
{0}
) t0
LIMIT 0""".format(query)
schema = self._get_schema_using_query(limited_query)
node = ops.SQLQueryResult(query, schema, self)
return ir.TableExpr(node)
def raw_sql(self, query, results=False):
"""
Execute a given query string. Could have unexpected results if the
query modifies the behavior of the session in a way unknown to Ibis; be
careful.
Parameters
----------
query : string
SQL or DDL statement
results : boolean, default False
Pass True if the query as a result set
Returns
-------
cur : ImpalaCursor if results=True, None otherwise
You must call cur.release() after you are finished using the cursor.
"""
return self._execute(query, results=results)
def execute(self, expr, params=None, limit='default', async=False):
"""
Compile and execute Ibis expression using this backend client
interface, returning results in-memory in the appropriate object type
Parameters
----------
expr : Expr
limit : int, default None
For expressions yielding result yets; retrieve at most this number of
values/rows. Overrides any limit already set on the expression.
params : not yet implemented
async : boolean, default False
Returns
-------
output : input type dependent
Table expressions: pandas.DataFrame
Array expressions: pandas.Series
Scalar expressions: Python scalar value
"""
ast = self._build_ast_ensure_limit(expr, limit)
if len(ast.queries) > 1:
raise NotImplementedError
else:
return self._execute_query(ast.queries[0], async=async)
def _execute_query(self, ddl, async=False):
klass = self.async_query if async else self.sync_query
return klass(self, ddl).execute()
def compile(self, expr, params=None, limit=None):
"""
Translate expression to one or more queries according to backend target
Returns
-------
output : single query or list of queries
"""
ast = self._build_ast_ensure_limit(expr, limit)
queries = [query.compile() for query in ast.queries]
return queries[0] if len(queries) == 1 else queries
def _build_ast_ensure_limit(self, expr, limit):
ast = self._build_ast(expr)
# note: limit can still be None at this point, if the global
# default_limit is None
for query in reversed(ast.queries):
if (isinstance(query, comp.Select) and
not isinstance(expr, ir.ScalarExpr) and
query.table_set is not None):
if query.limit is None:
if limit == 'default':
query_limit = options.sql.default_limit
else:
query_limit = limit
if query_limit:
query.limit = {
'n': query_limit,
'offset': 0
}
elif limit is not None and limit != 'default':
query.limit = {'n': limit,
'offset': query.limit['offset']}
return ast
def explain(self, expr):
"""
Query for and return the query plan associated with the indicated
expression or SQL query.
Returns
-------
plan : string
"""
if isinstance(expr, ir.Expr):
ast = self._build_ast(expr)
if len(ast.queries) > 1:
raise Exception('Multi-query expression')
query = ast.queries[0].compile()
else:
query = expr
statement = 'EXPLAIN {0}'.format(query)
with self._execute(statement, results=True) as cur:
result = self._get_list(cur)
return 'Query:\n{0}\n\n{1}'.format(util.indent(query, 2),
'\n'.join(result))
def _build_ast(self, expr):
# Implement in clients
raise NotImplementedError
class QueryPipeline(object):
"""
Execute a series of queries, possibly asynchronously, and capture any
result sets generated
Note: No query pipelines have yet been implemented
"""
pass
def execute(expr, limit='default', async=False):
backend = find_backend(expr)
return backend.execute(expr, limit=limit, async=async)
def compile(expr, limit=None):
backend = find_backend(expr)
return backend.compile(expr, limit=limit)
def find_backend(expr):
backends = []
def walk(expr):
node = expr.op()
for arg in node.flat_args():
if isinstance(arg, Client):
backends.append(arg)
elif isinstance(arg, ir.Expr):
walk(arg)
walk(expr)
backends = util.unique_by_key(backends, id)
if len(backends) > 1:
raise ValueError('Multiple backends found')
elif len(backends) == 0:
default = options.default_backend
if default is None:
raise com.IbisError('Expression depends on no backends, '
'and found no default')
return default
return backends[0]
class Database(object):
def __init__(self, name, client):
self.name = name
self.client = client
def __repr__(self):
return "{0}('{1}')".format('Database', self.name)
def __dir__(self):
attrs = dir(type(self))
unqualified_tables = [self._unqualify(x) for x in self.tables]
return list(sorted(set(attrs + unqualified_tables)))
def __contains__(self, key):
return key in self.tables
@property
def tables(self):
return self.list_tables()
def __getitem__(self, key):
return self.table(key)
def __getattr__(self, key):
special_attrs = ['_ipython_display_', 'trait_names',
'_getAttributeNames']
try:
return object.__getattribute__(self, key)
except AttributeError:
if key in special_attrs:
raise
return self.table(key)
def _qualify(self, value):
return value
def _unqualify(self, value):
return value
def drop(self, force=False):
"""
Drop the database
Parameters
----------
drop : boolean, default False
Drop any objects if they exist, and do not fail if the databaes does
not exist
"""
self.client.drop_database(self.name, force=force)
def namespace(self, ns):
"""
Creates a derived Database instance for collections of objects having a
common prefix. For example, for tables fooa, foob, and fooc, creating
the "foo" namespace would enable you to reference those objects as a,
b, and c, respectively.
Returns
-------
ns : DatabaseNamespace
"""
return DatabaseNamespace(self, ns)
def table(self, name):
"""
Return a table expression referencing a table in this database
Returns
-------
table : TableExpr
"""
qualified_name = self._qualify(name)
return self.client.table(qualified_name, self.name)
def list_tables(self, like=None):
return self.client.list_tables(like=self._qualify_like(like),
database=self.name)
def _qualify_like(self, like):
return like
class DatabaseNamespace(Database):
def __init__(self, parent, namespace):
self.parent = parent
self.namespace = namespace
def __repr__(self):
return ("{0}(database={1!r}, namespace={2!r})"
.format('DatabaseNamespace', self.name, self.namespace))
@property
def client(self):
return self.parent.client
@property
def name(self):
return self.parent.name
def _qualify(self, value):
return self.namespace + value
def _unqualify(self, value):
return value.replace(self.namespace, '', 1)
def _qualify_like(self, like):
if like:
return self.namespace + like
else:
return '{0}*'.format(self.namespace)
class DatabaseEntity(object):
pass
class View(DatabaseEntity):
def drop(self):
pass
| apache-2.0 |
sorgerlab/belpy | indra/belief/wm_scorer.py | 1 | 2125 | from io import StringIO
import copy
import pandas
import requests
from . import SimpleScorer, BayesianScorer
default_priors = {'hume': [13, 7], 'cwms': [13, 7], 'sofia': [13, 7]}
def load_eidos_curation_table():
"""Return a pandas table of Eidos curation data."""
url = 'https://raw.githubusercontent.com/clulab/eidos/master/' + \
'src/main/resources/org/clulab/wm/eidos/english/confidence/' + \
'rule_summary.tsv'
# Load the table of scores from the URL above into a data frame
res = StringIO(requests.get(url).text)
table = pandas.read_table(res, sep='\t')
# Drop the last "Grant total" row
table = table.drop(table.index[len(table)-1])
return table
def get_eidos_bayesian_scorer(prior_counts=None):
"""Return a BayesianScorer based on Eidos curation counts."""
table = load_eidos_curation_table()
subtype_counts = {'eidos': {r: [c, i] for r, c, i in
zip(table['RULE'], table['Num correct'],
table['Num incorrect'])}}
prior_counts = prior_counts if prior_counts else copy.deepcopy(
default_priors)
scorer = BayesianScorer(prior_counts=prior_counts,
subtype_counts=subtype_counts)
return scorer
def get_eidos_scorer():
"""Return a SimpleScorer based on Eidos curated precision estimates."""
table = load_eidos_curation_table()
# Get the overall precision
total_num = table['COUNT of RULE'].sum()
weighted_sum = table['COUNT of RULE'].dot(table['% correct'])
precision = weighted_sum / total_num
# We have to divide this into a random and systematic component, for now
# in an ad-hoc manner
syst_error = 0.05
rand_error = 1 - precision - syst_error
prior_probs = {'rand': {'eidos': rand_error}, 'syst': {'eidos': syst_error}}
# Get a dict of rule-specific errors.
subtype_probs = {'eidos':
{k: 1.0-min(v, 0.95)-syst_error for k, v
in zip(table['RULE'], table['% correct'])}}
scorer = SimpleScorer(prior_probs, subtype_probs)
return scorer
| mit |
vibhorag/scikit-learn | sklearn/metrics/scorer.py | 211 | 13141 | """
The :mod:`sklearn.metrics.scorer` submodule implements a flexible
interface for model selection and evaluation using
arbitrary score functions.
A scorer object is a callable that can be passed to
:class:`sklearn.grid_search.GridSearchCV` or
:func:`sklearn.cross_validation.cross_val_score` as the ``scoring`` parameter,
to specify how a model should be evaluated.
The signature of the call is ``(estimator, X, y)`` where ``estimator``
is the model to be evaluated, ``X`` is the test data and ``y`` is the
ground truth labeling (or ``None`` in the case of unsupervised models).
"""
# Authors: Andreas Mueller <amueller@ais.uni-bonn.de>
# Lars Buitinck <L.J.Buitinck@uva.nl>
# Arnaud Joly <arnaud.v.joly@gmail.com>
# License: Simplified BSD
from abc import ABCMeta, abstractmethod
from functools import partial
import numpy as np
from . import (r2_score, median_absolute_error, mean_absolute_error,
mean_squared_error, accuracy_score, f1_score,
roc_auc_score, average_precision_score,
precision_score, recall_score, log_loss)
from .cluster import adjusted_rand_score
from ..utils.multiclass import type_of_target
from ..externals import six
from ..base import is_regressor
class _BaseScorer(six.with_metaclass(ABCMeta, object)):
def __init__(self, score_func, sign, kwargs):
self._kwargs = kwargs
self._score_func = score_func
self._sign = sign
@abstractmethod
def __call__(self, estimator, X, y, sample_weight=None):
pass
def __repr__(self):
kwargs_string = "".join([", %s=%s" % (str(k), str(v))
for k, v in self._kwargs.items()])
return ("make_scorer(%s%s%s%s)"
% (self._score_func.__name__,
"" if self._sign > 0 else ", greater_is_better=False",
self._factory_args(), kwargs_string))
def _factory_args(self):
"""Return non-default make_scorer arguments for repr."""
return ""
class _PredictScorer(_BaseScorer):
def __call__(self, estimator, X, y_true, sample_weight=None):
"""Evaluate predicted target values for X relative to y_true.
Parameters
----------
estimator : object
Trained estimator to use for scoring. Must have a predict_proba
method; the output of that is used to compute the score.
X : array-like or sparse matrix
Test data that will be fed to estimator.predict.
y_true : array-like
Gold standard target values for X.
sample_weight : array-like, optional (default=None)
Sample weights.
Returns
-------
score : float
Score function applied to prediction of estimator on X.
"""
y_pred = estimator.predict(X)
if sample_weight is not None:
return self._sign * self._score_func(y_true, y_pred,
sample_weight=sample_weight,
**self._kwargs)
else:
return self._sign * self._score_func(y_true, y_pred,
**self._kwargs)
class _ProbaScorer(_BaseScorer):
def __call__(self, clf, X, y, sample_weight=None):
"""Evaluate predicted probabilities for X relative to y_true.
Parameters
----------
clf : object
Trained classifier to use for scoring. Must have a predict_proba
method; the output of that is used to compute the score.
X : array-like or sparse matrix
Test data that will be fed to clf.predict_proba.
y : array-like
Gold standard target values for X. These must be class labels,
not probabilities.
sample_weight : array-like, optional (default=None)
Sample weights.
Returns
-------
score : float
Score function applied to prediction of estimator on X.
"""
y_pred = clf.predict_proba(X)
if sample_weight is not None:
return self._sign * self._score_func(y, y_pred,
sample_weight=sample_weight,
**self._kwargs)
else:
return self._sign * self._score_func(y, y_pred, **self._kwargs)
def _factory_args(self):
return ", needs_proba=True"
class _ThresholdScorer(_BaseScorer):
def __call__(self, clf, X, y, sample_weight=None):
"""Evaluate decision function output for X relative to y_true.
Parameters
----------
clf : object
Trained classifier to use for scoring. Must have either a
decision_function method or a predict_proba method; the output of
that is used to compute the score.
X : array-like or sparse matrix
Test data that will be fed to clf.decision_function or
clf.predict_proba.
y : array-like
Gold standard target values for X. These must be class labels,
not decision function values.
sample_weight : array-like, optional (default=None)
Sample weights.
Returns
-------
score : float
Score function applied to prediction of estimator on X.
"""
y_type = type_of_target(y)
if y_type not in ("binary", "multilabel-indicator"):
raise ValueError("{0} format is not supported".format(y_type))
if is_regressor(clf):
y_pred = clf.predict(X)
else:
try:
y_pred = clf.decision_function(X)
# For multi-output multi-class estimator
if isinstance(y_pred, list):
y_pred = np.vstack(p for p in y_pred).T
except (NotImplementedError, AttributeError):
y_pred = clf.predict_proba(X)
if y_type == "binary":
y_pred = y_pred[:, 1]
elif isinstance(y_pred, list):
y_pred = np.vstack([p[:, -1] for p in y_pred]).T
if sample_weight is not None:
return self._sign * self._score_func(y, y_pred,
sample_weight=sample_weight,
**self._kwargs)
else:
return self._sign * self._score_func(y, y_pred, **self._kwargs)
def _factory_args(self):
return ", needs_threshold=True"
def get_scorer(scoring):
if isinstance(scoring, six.string_types):
try:
scorer = SCORERS[scoring]
except KeyError:
raise ValueError('%r is not a valid scoring value. '
'Valid options are %s'
% (scoring, sorted(SCORERS.keys())))
else:
scorer = scoring
return scorer
def _passthrough_scorer(estimator, *args, **kwargs):
"""Function that wraps estimator.score"""
return estimator.score(*args, **kwargs)
def check_scoring(estimator, scoring=None, allow_none=False):
"""Determine scorer from user options.
A TypeError will be thrown if the estimator cannot be scored.
Parameters
----------
estimator : estimator object implementing 'fit'
The object to use to fit the data.
scoring : string, callable or None, optional, default: None
A string (see model evaluation documentation) or
a scorer callable object / function with signature
``scorer(estimator, X, y)``.
allow_none : boolean, optional, default: False
If no scoring is specified and the estimator has no score function, we
can either return None or raise an exception.
Returns
-------
scoring : callable
A scorer callable object / function with signature
``scorer(estimator, X, y)``.
"""
has_scoring = scoring is not None
if not hasattr(estimator, 'fit'):
raise TypeError("estimator should a be an estimator implementing "
"'fit' method, %r was passed" % estimator)
elif has_scoring:
return get_scorer(scoring)
elif hasattr(estimator, 'score'):
return _passthrough_scorer
elif allow_none:
return None
else:
raise TypeError(
"If no scoring is specified, the estimator passed should "
"have a 'score' method. The estimator %r does not." % estimator)
def make_scorer(score_func, greater_is_better=True, needs_proba=False,
needs_threshold=False, **kwargs):
"""Make a scorer from a performance metric or loss function.
This factory function wraps scoring functions for use in GridSearchCV
and cross_val_score. It takes a score function, such as ``accuracy_score``,
``mean_squared_error``, ``adjusted_rand_index`` or ``average_precision``
and returns a callable that scores an estimator's output.
Read more in the :ref:`User Guide <scoring>`.
Parameters
----------
score_func : callable,
Score function (or loss function) with signature
``score_func(y, y_pred, **kwargs)``.
greater_is_better : boolean, default=True
Whether score_func is a score function (default), meaning high is good,
or a loss function, meaning low is good. In the latter case, the
scorer object will sign-flip the outcome of the score_func.
needs_proba : boolean, default=False
Whether score_func requires predict_proba to get probability estimates
out of a classifier.
needs_threshold : boolean, default=False
Whether score_func takes a continuous decision certainty.
This only works for binary classification using estimators that
have either a decision_function or predict_proba method.
For example ``average_precision`` or the area under the roc curve
can not be computed using discrete predictions alone.
**kwargs : additional arguments
Additional parameters to be passed to score_func.
Returns
-------
scorer : callable
Callable object that returns a scalar score; greater is better.
Examples
--------
>>> from sklearn.metrics import fbeta_score, make_scorer
>>> ftwo_scorer = make_scorer(fbeta_score, beta=2)
>>> ftwo_scorer
make_scorer(fbeta_score, beta=2)
>>> from sklearn.grid_search import GridSearchCV
>>> from sklearn.svm import LinearSVC
>>> grid = GridSearchCV(LinearSVC(), param_grid={'C': [1, 10]},
... scoring=ftwo_scorer)
"""
sign = 1 if greater_is_better else -1
if needs_proba and needs_threshold:
raise ValueError("Set either needs_proba or needs_threshold to True,"
" but not both.")
if needs_proba:
cls = _ProbaScorer
elif needs_threshold:
cls = _ThresholdScorer
else:
cls = _PredictScorer
return cls(score_func, sign, kwargs)
# Standard regression scores
r2_scorer = make_scorer(r2_score)
mean_squared_error_scorer = make_scorer(mean_squared_error,
greater_is_better=False)
mean_absolute_error_scorer = make_scorer(mean_absolute_error,
greater_is_better=False)
median_absolute_error_scorer = make_scorer(median_absolute_error,
greater_is_better=False)
# Standard Classification Scores
accuracy_scorer = make_scorer(accuracy_score)
f1_scorer = make_scorer(f1_score)
# Score functions that need decision values
roc_auc_scorer = make_scorer(roc_auc_score, greater_is_better=True,
needs_threshold=True)
average_precision_scorer = make_scorer(average_precision_score,
needs_threshold=True)
precision_scorer = make_scorer(precision_score)
recall_scorer = make_scorer(recall_score)
# Score function for probabilistic classification
log_loss_scorer = make_scorer(log_loss, greater_is_better=False,
needs_proba=True)
# Clustering scores
adjusted_rand_scorer = make_scorer(adjusted_rand_score)
SCORERS = dict(r2=r2_scorer,
median_absolute_error=median_absolute_error_scorer,
mean_absolute_error=mean_absolute_error_scorer,
mean_squared_error=mean_squared_error_scorer,
accuracy=accuracy_scorer, roc_auc=roc_auc_scorer,
average_precision=average_precision_scorer,
log_loss=log_loss_scorer,
adjusted_rand_score=adjusted_rand_scorer)
for name, metric in [('precision', precision_score),
('recall', recall_score), ('f1', f1_score)]:
SCORERS[name] = make_scorer(metric)
for average in ['macro', 'micro', 'samples', 'weighted']:
qualified_name = '{0}_{1}'.format(name, average)
SCORERS[qualified_name] = make_scorer(partial(metric, pos_label=None,
average=average))
| bsd-3-clause |
dnjohnstone/hyperspy | hyperspy/tests/drawing/test_utils.py | 1 | 1293 | # Copyright 2007-2020 The HyperSpy developers
#
# This file is part of HyperSpy.
#
# HyperSpy is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# HyperSpy is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with HyperSpy. If not, see <http://www.gnu.org/licenses/>.
from unittest.mock import Mock
import matplotlib
import pytest
import hyperspy.drawing.utils as utils
def test_create_figure():
if matplotlib.get_backend() == "agg":
pytest.xfail("{} backend does not support on_close event.".format(
matplotlib.get_backend()))
dummy_function = Mock()
fig = utils.create_figure(window_title="test title",
_on_figure_window_close=dummy_function)
assert isinstance(fig, matplotlib.figure.Figure) == True
matplotlib.pyplot.close(fig)
dummy_function.assert_called_once_with()
| gpl-3.0 |
alexeyum/scikit-learn | sklearn/decomposition/tests/test_pca.py | 21 | 18046 | import numpy as np
from itertools import product
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_no_warnings
from sklearn.utils.testing import assert_warns_message
from sklearn.utils.testing import ignore_warnings
from sklearn.utils.testing import assert_less
from sklearn import datasets
from sklearn.decomposition import PCA
from sklearn.decomposition import RandomizedPCA
from sklearn.decomposition.pca import _assess_dimension_
from sklearn.decomposition.pca import _infer_dimension_
iris = datasets.load_iris()
solver_list = ['full', 'arpack', 'randomized', 'auto']
def test_pca():
# PCA on dense arrays
X = iris.data
for n_comp in np.arange(X.shape[1]):
pca = PCA(n_components=n_comp, svd_solver='full')
X_r = pca.fit(X).transform(X)
np.testing.assert_equal(X_r.shape[1], n_comp)
X_r2 = pca.fit_transform(X)
assert_array_almost_equal(X_r, X_r2)
X_r = pca.transform(X)
X_r2 = pca.fit_transform(X)
assert_array_almost_equal(X_r, X_r2)
# Test get_covariance and get_precision
cov = pca.get_covariance()
precision = pca.get_precision()
assert_array_almost_equal(np.dot(cov, precision),
np.eye(X.shape[1]), 12)
# test explained_variance_ratio_ == 1 with all components
pca = PCA(svd_solver='full')
pca.fit(X)
assert_almost_equal(pca.explained_variance_ratio_.sum(), 1.0, 3)
def test_pca_arpack_solver():
# PCA on dense arrays
X = iris.data
d = X.shape[1]
# Loop excluding the extremes, invalid inputs for arpack
for n_comp in np.arange(1, d):
pca = PCA(n_components=n_comp, svd_solver='arpack', random_state=0)
X_r = pca.fit(X).transform(X)
np.testing.assert_equal(X_r.shape[1], n_comp)
X_r2 = pca.fit_transform(X)
assert_array_almost_equal(X_r, X_r2)
X_r = pca.transform(X)
assert_array_almost_equal(X_r, X_r2)
# Test get_covariance and get_precision
cov = pca.get_covariance()
precision = pca.get_precision()
assert_array_almost_equal(np.dot(cov, precision),
np.eye(d), 12)
pca = PCA(n_components=0, svd_solver='arpack', random_state=0)
assert_raises(ValueError, pca.fit, X)
# Check internal state
assert_equal(pca.n_components,
PCA(n_components=0,
svd_solver='arpack', random_state=0).n_components)
assert_equal(pca.svd_solver,
PCA(n_components=0,
svd_solver='arpack', random_state=0).svd_solver)
pca = PCA(n_components=d, svd_solver='arpack', random_state=0)
assert_raises(ValueError, pca.fit, X)
assert_equal(pca.n_components,
PCA(n_components=d,
svd_solver='arpack', random_state=0).n_components)
assert_equal(pca.svd_solver,
PCA(n_components=0,
svd_solver='arpack', random_state=0).svd_solver)
def test_pca_randomized_solver():
# PCA on dense arrays
X = iris.data
# Loop excluding the 0, invalid for randomized
for n_comp in np.arange(1, X.shape[1]):
pca = PCA(n_components=n_comp, svd_solver='randomized', random_state=0)
X_r = pca.fit(X).transform(X)
np.testing.assert_equal(X_r.shape[1], n_comp)
X_r2 = pca.fit_transform(X)
assert_array_almost_equal(X_r, X_r2)
X_r = pca.transform(X)
assert_array_almost_equal(X_r, X_r2)
# Test get_covariance and get_precision
cov = pca.get_covariance()
precision = pca.get_precision()
assert_array_almost_equal(np.dot(cov, precision),
np.eye(X.shape[1]), 12)
pca = PCA(n_components=0, svd_solver='randomized', random_state=0)
assert_raises(ValueError, pca.fit, X)
pca = PCA(n_components=0, svd_solver='randomized', random_state=0)
assert_raises(ValueError, pca.fit, X)
# Check internal state
assert_equal(pca.n_components,
PCA(n_components=0,
svd_solver='randomized', random_state=0).n_components)
assert_equal(pca.svd_solver,
PCA(n_components=0,
svd_solver='randomized', random_state=0).svd_solver)
def test_no_empty_slice_warning():
# test if we avoid numpy warnings for computing over empty arrays
n_components = 10
n_features = n_components + 2 # anything > n_comps triggered it in 0.16
X = np.random.uniform(-1, 1, size=(n_components, n_features))
pca = PCA(n_components=n_components)
assert_no_warnings(pca.fit, X)
def test_whitening():
# Check that PCA output has unit-variance
rng = np.random.RandomState(0)
n_samples = 100
n_features = 80
n_components = 30
rank = 50
# some low rank data with correlated features
X = np.dot(rng.randn(n_samples, rank),
np.dot(np.diag(np.linspace(10.0, 1.0, rank)),
rng.randn(rank, n_features)))
# the component-wise variance of the first 50 features is 3 times the
# mean component-wise variance of the remaining 30 features
X[:, :50] *= 3
assert_equal(X.shape, (n_samples, n_features))
# the component-wise variance is thus highly varying:
assert_greater(X.std(axis=0).std(), 43.8)
for solver, copy in product(solver_list, (True, False)):
# whiten the data while projecting to the lower dim subspace
X_ = X.copy() # make sure we keep an original across iterations.
pca = PCA(n_components=n_components, whiten=True, copy=copy,
svd_solver=solver, random_state=0, iterated_power=7)
# test fit_transform
X_whitened = pca.fit_transform(X_.copy())
assert_equal(X_whitened.shape, (n_samples, n_components))
X_whitened2 = pca.transform(X_)
assert_array_almost_equal(X_whitened, X_whitened2)
assert_almost_equal(X_whitened.std(axis=0), np.ones(n_components),
decimal=6)
assert_almost_equal(X_whitened.mean(axis=0), np.zeros(n_components))
X_ = X.copy()
pca = PCA(n_components=n_components, whiten=False, copy=copy,
svd_solver=solver).fit(X_)
X_unwhitened = pca.transform(X_)
assert_equal(X_unwhitened.shape, (n_samples, n_components))
# in that case the output components still have varying variances
assert_almost_equal(X_unwhitened.std(axis=0).std(), 74.1, 1)
# we always center, so no test for non-centering.
# Ignore warnings from switching to more power iterations in randomized_svd
@ignore_warnings
def test_explained_variance():
# Check that PCA output has unit-variance
rng = np.random.RandomState(0)
n_samples = 100
n_features = 80
X = rng.randn(n_samples, n_features)
pca = PCA(n_components=2, svd_solver='full').fit(X)
apca = PCA(n_components=2, svd_solver='arpack', random_state=0).fit(X)
assert_array_almost_equal(pca.explained_variance_,
apca.explained_variance_, 1)
assert_array_almost_equal(pca.explained_variance_ratio_,
apca.explained_variance_ratio_, 3)
rpca = PCA(n_components=2, svd_solver='randomized', random_state=42).fit(X)
assert_array_almost_equal(pca.explained_variance_,
rpca.explained_variance_, 1)
assert_array_almost_equal(pca.explained_variance_ratio_,
rpca.explained_variance_ratio_, 1)
# compare to empirical variances
X_pca = pca.transform(X)
assert_array_almost_equal(pca.explained_variance_,
np.var(X_pca, axis=0))
X_pca = apca.transform(X)
assert_array_almost_equal(apca.explained_variance_,
np.var(X_pca, axis=0))
X_rpca = rpca.transform(X)
assert_array_almost_equal(rpca.explained_variance_, np.var(X_rpca, axis=0),
decimal=1)
# Same with correlated data
X = datasets.make_classification(n_samples, n_features,
n_informative=n_features-2,
random_state=rng)[0]
pca = PCA(n_components=2).fit(X)
rpca = PCA(n_components=2, svd_solver='randomized',
random_state=rng).fit(X)
assert_array_almost_equal(pca.explained_variance_ratio_,
rpca.explained_variance_ratio_, 5)
def test_pca_check_projection():
# Test that the projection of data is correct
rng = np.random.RandomState(0)
n, p = 100, 3
X = rng.randn(n, p) * .1
X[:10] += np.array([3, 4, 5])
Xt = 0.1 * rng.randn(1, p) + np.array([3, 4, 5])
for solver in solver_list:
Yt = PCA(n_components=2, svd_solver=solver).fit(X).transform(Xt)
Yt /= np.sqrt((Yt ** 2).sum())
assert_almost_equal(np.abs(Yt[0][0]), 1., 1)
def test_pca_inverse():
# Test that the projection of data can be inverted
rng = np.random.RandomState(0)
n, p = 50, 3
X = rng.randn(n, p) # spherical data
X[:, 1] *= .00001 # make middle component relatively small
X += [5, 4, 3] # make a large mean
# same check that we can find the original data from the transformed
# signal (since the data is almost of rank n_components)
pca = PCA(n_components=2, svd_solver='full').fit(X)
Y = pca.transform(X)
Y_inverse = pca.inverse_transform(Y)
assert_almost_equal(X, Y_inverse, decimal=3)
# same as above with whitening (approximate reconstruction)
for solver in solver_list:
pca = PCA(n_components=2, whiten=True, svd_solver=solver)
pca.fit(X)
Y = pca.transform(X)
Y_inverse = pca.inverse_transform(Y)
assert_almost_equal(X, Y_inverse, decimal=3)
def test_pca_validation():
X = [[0, 1], [1, 0]]
for solver in solver_list:
for n_components in [-1, 3]:
assert_raises(ValueError,
PCA(n_components, svd_solver=solver).fit, X)
def test_randomized_pca_check_projection():
# Test that the projection by randomized PCA on dense data is correct
rng = np.random.RandomState(0)
n, p = 100, 3
X = rng.randn(n, p) * .1
X[:10] += np.array([3, 4, 5])
Xt = 0.1 * rng.randn(1, p) + np.array([3, 4, 5])
Yt = PCA(n_components=2, svd_solver='randomized',
random_state=0).fit(X).transform(Xt)
Yt /= np.sqrt((Yt ** 2).sum())
assert_almost_equal(np.abs(Yt[0][0]), 1., 1)
def test_randomized_pca_check_list():
# Test that the projection by randomized PCA on list data is correct
X = [[1.0, 0.0], [0.0, 1.0]]
X_transformed = PCA(n_components=1, svd_solver='randomized',
random_state=0).fit(X).transform(X)
assert_equal(X_transformed.shape, (2, 1))
assert_almost_equal(X_transformed.mean(), 0.00, 2)
assert_almost_equal(X_transformed.std(), 0.71, 2)
def test_randomized_pca_inverse():
# Test that randomized PCA is inversible on dense data
rng = np.random.RandomState(0)
n, p = 50, 3
X = rng.randn(n, p) # spherical data
X[:, 1] *= .00001 # make middle component relatively small
X += [5, 4, 3] # make a large mean
# same check that we can find the original data from the transformed signal
# (since the data is almost of rank n_components)
pca = PCA(n_components=2, svd_solver='randomized', random_state=0).fit(X)
Y = pca.transform(X)
Y_inverse = pca.inverse_transform(Y)
assert_almost_equal(X, Y_inverse, decimal=2)
# same as above with whitening (approximate reconstruction)
pca = PCA(n_components=2, whiten=True, svd_solver='randomized',
random_state=0).fit(X)
Y = pca.transform(X)
Y_inverse = pca.inverse_transform(Y)
relative_max_delta = (np.abs(X - Y_inverse) / np.abs(X).mean()).max()
assert_less(relative_max_delta, 1e-5)
def test_pca_dim():
# Check automated dimensionality setting
rng = np.random.RandomState(0)
n, p = 100, 5
X = rng.randn(n, p) * .1
X[:10] += np.array([3, 4, 5, 1, 2])
pca = PCA(n_components='mle', svd_solver='full').fit(X)
assert_equal(pca.n_components, 'mle')
assert_equal(pca.n_components_, 1)
def test_infer_dim_1():
# TODO: explain what this is testing
# Or at least use explicit variable names...
n, p = 1000, 5
rng = np.random.RandomState(0)
X = (rng.randn(n, p) * .1 + rng.randn(n, 1) * np.array([3, 4, 5, 1, 2]) +
np.array([1, 0, 7, 4, 6]))
pca = PCA(n_components=p, svd_solver='full')
pca.fit(X)
spect = pca.explained_variance_
ll = []
for k in range(p):
ll.append(_assess_dimension_(spect, k, n, p))
ll = np.array(ll)
assert_greater(ll[1], ll.max() - .01 * n)
def test_infer_dim_2():
# TODO: explain what this is testing
# Or at least use explicit variable names...
n, p = 1000, 5
rng = np.random.RandomState(0)
X = rng.randn(n, p) * .1
X[:10] += np.array([3, 4, 5, 1, 2])
X[10:20] += np.array([6, 0, 7, 2, -1])
pca = PCA(n_components=p, svd_solver='full')
pca.fit(X)
spect = pca.explained_variance_
assert_greater(_infer_dimension_(spect, n, p), 1)
def test_infer_dim_3():
n, p = 100, 5
rng = np.random.RandomState(0)
X = rng.randn(n, p) * .1
X[:10] += np.array([3, 4, 5, 1, 2])
X[10:20] += np.array([6, 0, 7, 2, -1])
X[30:40] += 2 * np.array([-1, 1, -1, 1, -1])
pca = PCA(n_components=p, svd_solver='full')
pca.fit(X)
spect = pca.explained_variance_
assert_greater(_infer_dimension_(spect, n, p), 2)
def test_infer_dim_by_explained_variance():
X = iris.data
pca = PCA(n_components=0.95, svd_solver='full')
pca.fit(X)
assert_equal(pca.n_components, 0.95)
assert_equal(pca.n_components_, 2)
pca = PCA(n_components=0.01, svd_solver='full')
pca.fit(X)
assert_equal(pca.n_components, 0.01)
assert_equal(pca.n_components_, 1)
rng = np.random.RandomState(0)
# more features than samples
X = rng.rand(5, 20)
pca = PCA(n_components=.5, svd_solver='full').fit(X)
assert_equal(pca.n_components, 0.5)
assert_equal(pca.n_components_, 2)
def test_pca_score():
# Test that probabilistic PCA scoring yields a reasonable score
n, p = 1000, 3
rng = np.random.RandomState(0)
X = rng.randn(n, p) * .1 + np.array([3, 4, 5])
for solver in solver_list:
pca = PCA(n_components=2, svd_solver=solver)
pca.fit(X)
ll1 = pca.score(X)
h = -0.5 * np.log(2 * np.pi * np.exp(1) * 0.1 ** 2) * p
np.testing.assert_almost_equal(ll1 / h, 1, 0)
def test_pca_score2():
# Test that probabilistic PCA correctly separated different datasets
n, p = 100, 3
rng = np.random.RandomState(0)
X = rng.randn(n, p) * .1 + np.array([3, 4, 5])
for solver in solver_list:
pca = PCA(n_components=2, svd_solver=solver)
pca.fit(X)
ll1 = pca.score(X)
ll2 = pca.score(rng.randn(n, p) * .2 + np.array([3, 4, 5]))
assert_greater(ll1, ll2)
# Test that it gives different scores if whiten=True
pca = PCA(n_components=2, whiten=True, svd_solver=solver)
pca.fit(X)
ll2 = pca.score(X)
assert_true(ll1 > ll2)
def test_pca_score3():
# Check that probabilistic PCA selects the right model
n, p = 200, 3
rng = np.random.RandomState(0)
Xl = (rng.randn(n, p) + rng.randn(n, 1) * np.array([3, 4, 5]) +
np.array([1, 0, 7]))
Xt = (rng.randn(n, p) + rng.randn(n, 1) * np.array([3, 4, 5]) +
np.array([1, 0, 7]))
ll = np.zeros(p)
for k in range(p):
pca = PCA(n_components=k, svd_solver='full')
pca.fit(Xl)
ll[k] = pca.score(Xt)
assert_true(ll.argmax() == 1)
def test_svd_solver_auto():
rng = np.random.RandomState(0)
X = rng.uniform(size=(1000, 50))
# case: n_components in (0,1) => 'full'
pca = PCA(n_components=.5)
pca.fit(X)
pca_test = PCA(n_components=.5, svd_solver='full')
pca_test.fit(X)
assert_array_almost_equal(pca.components_, pca_test.components_)
# case: max(X.shape) <= 500 => 'full'
pca = PCA(n_components=5, random_state=0)
Y = X[:10, :]
pca.fit(Y)
pca_test = PCA(n_components=5, svd_solver='full', random_state=0)
pca_test.fit(Y)
assert_array_almost_equal(pca.components_, pca_test.components_)
# case: n_components >= .8 * min(X.shape) => 'full'
pca = PCA(n_components=50)
pca.fit(X)
pca_test = PCA(n_components=50, svd_solver='full')
pca_test.fit(X)
assert_array_almost_equal(pca.components_, pca_test.components_)
# n_components >= 1 and n_components < .8 * min(X.shape) => 'randomized'
pca = PCA(n_components=10, random_state=0)
pca.fit(X)
pca_test = PCA(n_components=10, svd_solver='randomized', random_state=0)
pca_test.fit(X)
assert_array_almost_equal(pca.components_, pca_test.components_)
def test_deprecation_randomized_pca():
rng = np.random.RandomState(0)
X = rng.random_sample((5, 4))
depr_message = ("Class RandomizedPCA is deprecated; RandomizedPCA will be "
"removed in 0.20. Use PCA(svd_solver='randomized') "
"instead. The new implementation DOES NOT store "
"whiten components_. Apply transform to get them.")
def fit_deprecated(X):
global Y
rpca = RandomizedPCA(random_state=0)
Y = rpca.fit_transform(X)
assert_warns_message(DeprecationWarning, depr_message, fit_deprecated, X)
Y_pca = PCA(svd_solver='randomized', random_state=0).fit_transform(X)
assert_array_almost_equal(Y, Y_pca)
| bsd-3-clause |
miguelfrde/stanford-cs231n | assignment1/cs231n/classifiers/neural_net.py | 1 | 10841 | from __future__ import print_function
import numpy as np
import matplotlib.pyplot as plt
from past.builtins import xrange
class TwoLayerNet(object):
"""
A two-layer fully-connected neural network. The net has an input dimension of
N, a hidden layer dimension of H, and performs classification over C classes.
We train the network with a softmax loss function and L2 regularization on the
weight matrices. The network uses a ReLU nonlinearity after the first fully
connected layer.
In other words, the network has the following architecture:
input - fully connected layer - ReLU - fully connected layer - softmax
The outputs of the second fully-connected layer are the scores for each class.
"""
def __init__(self, input_size, hidden_size, output_size, std=1e-4):
"""
Initialize the model. Weights are initialized to small random values and
biases are initialized to zero. Weights and biases are stored in the
variable self.params, which is a dictionary with the following keys:
W1: First layer weights; has shape (D, H)
b1: First layer biases; has shape (H,)
W2: Second layer weights; has shape (H, C)
b2: Second layer biases; has shape (C,)
Inputs:
- input_size: The dimension D of the input data.
- hidden_size: The number of neurons H in the hidden layer.
- output_size: The number of classes C.
"""
self.params = {}
self.params['W1'] = std * np.random.randn(input_size, hidden_size)
self.params['b1'] = np.zeros(hidden_size)
self.params['W2'] = std * np.random.randn(hidden_size, output_size)
self.params['b2'] = np.zeros(output_size)
def loss(self, X, y=None, reg=0.0):
"""
Compute the loss and gradients for a two layer fully connected neural
network.
Inputs:
- X: Input data of shape (N, D). Each X[i] is a training sample.
- y: Vector of training labels. y[i] is the label for X[i], and each y[i] is
an integer in the range 0 <= y[i] < C. This parameter is optional; if it
is not passed then we only return scores, and if it is passed then we
instead return the loss and gradients.
- reg: Regularization strength.
Returns:
If y is None, return a matrix scores of shape (N, C) where scores[i, c] is
the score for class c on input X[i].
If y is not None, instead return a tuple of:
- loss: Loss (data loss and regularization loss) for this batch of training
samples.
- grads: Dictionary mapping parameter names to gradients of those parameters
with respect to the loss function; has the same keys as self.params.
"""
# Unpack variables from the params dictionary
W1, b1 = self.params['W1'], self.params['b1']
W2, b2 = self.params['W2'], self.params['b2']
N, D = X.shape
# Compute the forward pass
scores = None
#############################################################################
# TODO: Perform the forward pass, computing the class scores for the input. #
# Store the result in the scores variable, which should be an array of #
# shape (N, C). #
#############################################################################
hidden1 = np.maximum(0, X.dot(W1) + b1)
scores = hidden1.dot(W2) + b2
#############################################################################
# END OF YOUR CODE #
#############################################################################
# If the targets are not given then jump out, we're done
if y is None:
return scores
# Compute the loss
loss = None
#############################################################################
# TODO: Finish the forward pass, and compute the loss. This should include #
# both the data loss and L2 regularization for W1 and W2. Store the result #
# in the variable loss, which should be a scalar. Use the Softmax #
# classifier loss. #
#############################################################################
shift_scores = scores - np.max(scores, axis=1).reshape(-1, 1)
s = np.exp(shift_scores) / np.sum(np.exp(shift_scores), axis=1).reshape(-1, 1)
loss = np.sum(-np.log(s[range(N), y]))
loss = loss/N + 0.5*reg*(np.sum(W1 * W1) + np.sum(W2 * W2))
#############################################################################
# END OF YOUR CODE #
#############################################################################
# Backward pass: compute gradients
grads = {}
#############################################################################
# TODO: Compute the backward pass, computing the derivatives of the weights #
# and biases. Store the results in the grads dictionary. For example, #
# grads['W1'] should store the gradient on W1, and be a matrix of same size #
#############################################################################
indices = np.zeros(s.shape)
indices[range(N), y] = 1
d_scores = (s - indices)/N
d_hidden = d_scores.dot(W2.T)
d_hidden = (hidden1 > 0) * d_hidden
grads['W2'] = np.dot(hidden1.T, d_scores) + reg * W2
grads['b2'] = np.sum(d_scores, axis=0)
grads['W1'] = X.T.dot(d_hidden) + reg*W1
grads['b1'] = np.sum(d_hidden, axis=0)
#############################################################################
# END OF YOUR CODE #
#############################################################################
return loss, grads
def train(self, X, y, X_val, y_val,
learning_rate=1e-3, learning_rate_decay=0.95,
reg=5e-6, num_iters=100,
batch_size=200, verbose=False):
"""
Train this neural network using stochastic gradient descent.
Inputs:
- X: A numpy array of shape (N, D) giving training data.
- y: A numpy array f shape (N,) giving training labels; y[i] = c means that
X[i] has label c, where 0 <= c < C.
- X_val: A numpy array of shape (N_val, D) giving validation data.
- y_val: A numpy array of shape (N_val,) giving validation labels.
- learning_rate: Scalar giving learning rate for optimization.
- learning_rate_decay: Scalar giving factor used to decay the learning rate
after each epoch.
- reg: Scalar giving regularization strength.
- num_iters: Number of steps to take when optimizing.
- batch_size: Number of training examples to use per step.
- verbose: boolean; if true print progress during optimization.
"""
num_train = X.shape[0]
iterations_per_epoch = max(num_train / batch_size, 1)
# Use SGD to optimize the parameters in self.model
loss_history = []
train_acc_history = []
val_acc_history = []
for it in xrange(num_iters):
X_batch = None
y_batch = None
#########################################################################
# TODO: Create a random minibatch of training data and labels, storing #
# them in X_batch and y_batch respectively. #
#########################################################################
indices = np.random.choice(num_train, batch_size, replace=True)
X_batch = X[indices]
y_batch = y[indices]
#########################################################################
# END OF YOUR CODE #
#########################################################################
# Compute loss and gradients using the current minibatch
loss, grads = self.loss(X_batch, y=y_batch, reg=reg)
loss_history.append(loss)
#########################################################################
# TODO: Use the gradients in the grads dictionary to update the #
# parameters of the network (stored in the dictionary self.params) #
# using stochastic gradient descent. You'll need to use the gradients #
# stored in the grads dictionary defined above. #
#########################################################################
self.params['W1'] -= learning_rate * grads['W1']
self.params['W2'] -= learning_rate * grads['W2']
self.params['b1'] -= learning_rate * grads['b1']
self.params['b2'] -= learning_rate * grads['b2']
#########################################################################
# END OF YOUR CODE #
#########################################################################
if verbose and it % 100 == 0:
print('iteration %d / %d: loss %f' % (it, num_iters, loss))
# Every epoch, check train and val accuracy and decay learning rate.
if it % iterations_per_epoch == 0:
# Check accuracy
train_acc = (self.predict(X_batch) == y_batch).mean()
val_acc = (self.predict(X_val) == y_val).mean()
train_acc_history.append(train_acc)
val_acc_history.append(val_acc)
# Decay learning rate
learning_rate *= learning_rate_decay
return {
'loss_history': loss_history,
'train_acc_history': train_acc_history,
'val_acc_history': val_acc_history,
}
def predict(self, X):
"""
Use the trained weights of this two-layer network to predict labels for
data points. For each data point we predict scores for each of the C
classes, and assign each data point to the class with the highest score.
Inputs:
- X: A numpy array of shape (N, D) giving N D-dimensional data points to
classify.
Returns:
- y_pred: A numpy array of shape (N,) giving predicted labels for each of
the elements of X. For all i, y_pred[i] = c means that X[i] is predicted
to have class c, where 0 <= c < C.
"""
y_pred = None
###########################################################################
# TODO: Implement this function; it should be VERY simple! #
###########################################################################
W1, b1 = self.params['W1'], self.params['b1']
W2, b2 = self.params['W2'], self.params['b2']
hidden1 = np.maximum(0, X.dot(W1) + b1)
y_pred = np.argmax(hidden1.dot(W2) + b2, axis=1)
###########################################################################
# END OF YOUR CODE #
###########################################################################
return y_pred
| mit |
ZobairAlijan/osf.io | scripts/analytics/email_invites.py | 55 | 1332 | # -*- coding: utf-8 -*-
import os
import matplotlib.pyplot as plt
from framework.mongo import database
from website import settings
from utils import plot_dates, mkdirp
user_collection = database['user']
FIG_PATH = os.path.join(settings.ANALYTICS_PATH, 'figs', 'features')
mkdirp(FIG_PATH)
def analyze_email_invites():
invited = user_collection.find({'unclaimed_records': {'$ne': {}}})
dates_invited = [
user['date_registered']
for user in invited
]
if not dates_invited:
return
fig = plot_dates(dates_invited)
plt.title('email invitations ({}) total)'.format(len(dates_invited)))
plt.savefig(os.path.join(FIG_PATH, 'email-invites.png'))
plt.close()
def analyze_email_confirmations():
confirmed = user_collection.find({
'unclaimed_records': {'$ne': {}},
'is_claimed': True,
})
dates_confirmed = [
user['date_confirmed']
for user in confirmed
]
if not dates_confirmed:
return
fig = plot_dates(dates_confirmed)
plt.title('confirmed email invitations ({}) total)'.format(len(dates_confirmed)))
plt.savefig(os.path.join(FIG_PATH, 'email-invite-confirmations.png'))
plt.close()
def main():
analyze_email_invites()
analyze_email_confirmations()
if __name__ == '__main__':
main()
| apache-2.0 |
vuchetichbalint/useful_python_scripts | dockerize_ml_model_as_restapi/dockerize/flask/app/main.py | 1 | 3432 | # -*- coding: utf-8 -*-
import time
import sys
from functools import wraps
from flask import Flask, jsonify, request
from flask_restful import Api, Resource, abort
from sklearn.neighbors import KNeighborsClassifier
import pickle
import numpy as np
import pandas as pd
from minio import Minio
import joblib
from io import BytesIO
sys.path.append('/app/src')
#sys.path.insert(0, './src')
from modell import predicting
app = Flask(__name__)
api = Api(app)
api_key = 'secret_key'
minio_client = Minio(
endpoint='minio:9000',
access_key='bence',
secret_key='tollasmadar',
secure=False
)
def make_number_if_possible(s):
if s.replace('.','',1).isdigit():
if '.' in s:
return float(s)
else:
return int(s)
return s
def numberify_dict(d):
for k in d:
d[k] = make_number_if_possible(d[k])
return d
def require_apikey(view_function):
@wraps(view_function)
# the new, post-decoration function. Note *args and **kwargs here.
def decorated_function(*args, **kwargs):
if request.headers.get('token') and request.headers.get('token') == api_key:
return view_function(*args, **kwargs)
else:
abort(401)
return decorated_function
class PredictorResource(Resource):
model = None
header_df = None
training_data = None
def predict(self, args):
d = args.to_dict()
d = numberify_dict(d)
df = pd.DataFrame([d])
x = pd.concat([df, self.header_df])
x['type'] = 'test'
with open('/app/shape.txt', 'a') as the_file:
the_file.write(
'1: '+ str( x.shape
)
)
the_file.write('\n')
prediction = predicting(x, self.training_data, self.model).iloc[0,:].to_json()
return prediction
def apply_model(self, x):
x = self.preprocess_data(x)
print(x)
print(type(x))
preds = self.model.predict(x)
return preds.tolist()
@require_apikey
def post(self):
return jsonify(msg='qwe')
if self.model == None:
print('elso futas ///////////////////////////////////////////')
self.build_model()
if self.header_df == None:
data = minio_client.get_object('data', 'data_header.df')
self.header_df = pickle.load(BytesIO(data.data))
message = request.json
preds = self.apply_model(message['input_features'])
print(preds)
return jsonify(predictions=preds)
#@require_apikey
def get(self):
if self.model == None:
data = minio_client.get_object('models', 'mymodel.pkl')
self.model = pickle.load(BytesIO(data.data))
if self.header_df == None:
data = minio_client.get_object('data', 'sample_data.csv')
df = pd.read_csv(BytesIO(data.data))
self.header_df = df.iloc[0:0]
if self.training_data == None:
data = minio_client.get_object('data', 'data_eladasok_2014_2019.csv')
df = pd.read_csv(BytesIO(data.data))
df['type'] = 'train'
self.training_data = df
prediction = self.predict(request.args)
return jsonify(prediction=prediction)
api.add_resource(PredictorResource, '/predict')
if __name__ == '__main__':
app.run(debug=True, port=80, host='0.0.0.0') | gpl-3.0 |
kyleabeauchamp/DBayes | dbayes/test_dipoles.py | 1 | 1044 | import pymc
import pymbar
import dipoles
import numpy as np
import pandas as pd
import simtk.openmm.app as app
import simtk.openmm as mm
import simtk.unit as u
import mdtraj as md
q0 = pymc.Uniform("q0", 0.0, 1.)
sigma0 = pymc.Uniform("sigma0", 0.08, 0.5)
epsilon0 = pymc.Uniform("epsilon0", 0.1, 2.0)
sigma1 = pymc.Uniform("sigma0", 0.08, 0.5)
epsilon1 = pymc.Uniform("epsilon0", 0.1, 2.0)
r0 = pymc.Uniform("r0", 0.05, 0.5)
model = pymc.Model([q0, sigma0, epsilon0, sigma1, epsilon1, r0])
model.draw_from_prior()
dipole = dipoles.Dipole(1000)
temperature = 300 * u.kelvin
pressure = 1.0 * u.atmospheres
values, mu, sigma = dipoles.simulate_density(dipole, temperature, pressure, print_frequency=25)
data = []
for k in range(10):
model.draw_from_prior()
values, mu, sigma = dipoles.simulate_density(dipole, temperature, pressure, print_frequency=25)
data.append(dict(q0=q0.value, sigma0=sigma0.value, epsilon0=epsilon0.value, sigma1=sigma1.value, epsilon1=epsilon1.value, r0=r0.value, density=mu, density_error=sigma))
| gpl-2.0 |
handroissuazo/tensorflow | tensorflow/contrib/learn/python/learn/tests/dataframe/feeding_functions_test.py | 12 | 5049 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests feeding functions using arrays and `DataFrames`."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import sys
# TODO: #6568 Remove this hack that makes dlopen() not crash.
if hasattr(sys, "getdlopenflags") and hasattr(sys, "setdlopenflags"):
import ctypes
sys.setdlopenflags(sys.getdlopenflags() | ctypes.RTLD_GLOBAL)
import numpy as np
from tensorflow.contrib.learn.python.learn.dataframe.queues import feeding_functions as ff
from tensorflow.python.platform import test
# pylint: disable=g-import-not-at-top
try:
import pandas as pd
HAS_PANDAS = True
except ImportError:
HAS_PANDAS = False
def vals_to_list(a):
return {
key: val.tolist() if isinstance(val, np.ndarray) else val
for key, val in a.items()
}
class _FeedingFunctionsTestCase(test.TestCase):
"""Tests for feeding functions."""
def testArrayFeedFnBatchOne(self):
array = np.arange(32).reshape([16, 2])
placeholders = ["index_placeholder", "value_placeholder"]
aff = ff._ArrayFeedFn(placeholders, array, 1)
# cycle around a couple times
for x in range(0, 100):
i = x % 16
expected = {
"index_placeholder": [i],
"value_placeholder": [[2 * i, 2 * i + 1]]
}
actual = aff()
self.assertEqual(expected, vals_to_list(actual))
def testArrayFeedFnBatchFive(self):
array = np.arange(32).reshape([16, 2])
placeholders = ["index_placeholder", "value_placeholder"]
aff = ff._ArrayFeedFn(placeholders, array, 5)
# cycle around a couple times
for _ in range(0, 101, 2):
aff()
expected = {
"index_placeholder": [15, 0, 1, 2, 3],
"value_placeholder": [[30, 31], [0, 1], [2, 3], [4, 5], [6, 7]]
}
actual = aff()
self.assertEqual(expected, vals_to_list(actual))
def testArrayFeedFnBatchOneHundred(self):
array = np.arange(32).reshape([16, 2])
placeholders = ["index_placeholder", "value_placeholder"]
aff = ff._ArrayFeedFn(placeholders, array, 100)
expected = {
"index_placeholder":
list(range(0, 16)) * 6 + list(range(0, 4)),
"value_placeholder":
np.arange(32).reshape([16, 2]).tolist() * 6 +
[[0, 1], [2, 3], [4, 5], [6, 7]]
}
actual = aff()
self.assertEqual(expected, vals_to_list(actual))
def testPandasFeedFnBatchOne(self):
if not HAS_PANDAS:
return
array1 = np.arange(32, 64)
array2 = np.arange(64, 96)
df = pd.DataFrame({"a": array1, "b": array2}, index=np.arange(96, 128))
placeholders = ["index_placeholder", "a_placeholder", "b_placeholder"]
aff = ff._PandasFeedFn(placeholders, df, 1)
# cycle around a couple times
for x in range(0, 100):
i = x % 32
expected = {
"index_placeholder": [i + 96],
"a_placeholder": [32 + i],
"b_placeholder": [64 + i]
}
actual = aff()
self.assertEqual(expected, vals_to_list(actual))
def testPandasFeedFnBatchFive(self):
if not HAS_PANDAS:
return
array1 = np.arange(32, 64)
array2 = np.arange(64, 96)
df = pd.DataFrame({"a": array1, "b": array2}, index=np.arange(96, 128))
placeholders = ["index_placeholder", "a_placeholder", "b_placeholder"]
aff = ff._PandasFeedFn(placeholders, df, 5)
# cycle around a couple times
for _ in range(0, 101, 2):
aff()
expected = {
"index_placeholder": [127, 96, 97, 98, 99],
"a_placeholder": [63, 32, 33, 34, 35],
"b_placeholder": [95, 64, 65, 66, 67]
}
actual = aff()
self.assertEqual(expected, vals_to_list(actual))
def testPandasFeedFnBatchOneHundred(self):
if not HAS_PANDAS:
return
array1 = np.arange(32, 64)
array2 = np.arange(64, 96)
df = pd.DataFrame({"a": array1, "b": array2}, index=np.arange(96, 128))
placeholders = ["index_placeholder", "a_placeholder", "b_placeholder"]
aff = ff._PandasFeedFn(placeholders, df, 100)
expected = {
"index_placeholder": list(range(96, 128)) * 3 + list(range(96, 100)),
"a_placeholder": list(range(32, 64)) * 3 + list(range(32, 36)),
"b_placeholder": list(range(64, 96)) * 3 + list(range(64, 68))
}
actual = aff()
self.assertEqual(expected, vals_to_list(actual))
if __name__ == "__main__":
test.main()
| apache-2.0 |
ozak/geopandas | setup.py | 1 | 1878 | #!/usr/bin/env/python
"""Installation script
"""
import os
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
import versioneer
LONG_DESCRIPTION = """GeoPandas is a project to add support for geographic data to
`pandas`_ objects.
The goal of GeoPandas is to make working with geospatial data in
python easier. It combines the capabilities of `pandas`_ and `shapely`_,
providing geospatial operations in pandas and a high-level interface
to multiple geometries to shapely. GeoPandas enables you to easily do
operations in python that would otherwise require a spatial database
such as PostGIS.
.. _pandas: http://pandas.pydata.org
.. _shapely: http://toblerity.github.io/shapely
"""
if os.environ.get('READTHEDOCS', False) == 'True':
INSTALL_REQUIRES = []
else:
INSTALL_REQUIRES = ['pandas', 'shapely', 'fiona', 'pyproj']
# get all data dirs in the datasets module
data_files = []
for item in os.listdir("geopandas/datasets"):
if not item.startswith('__'):
if os.path.isdir(os.path.join("geopandas/datasets/", item)):
data_files.append(os.path.join("datasets", item, '*'))
elif item.endswith('.zip'):
data_files.append(os.path.join("datasets", item))
data_files.append('tests/data/*')
setup(name='geopandas',
version=versioneer.get_version(),
description='Geographic pandas extensions',
license='BSD',
author='GeoPandas contributors',
author_email='kjordahl@alum.mit.edu',
url='http://geopandas.org',
long_description=LONG_DESCRIPTION,
packages=['geopandas', 'geopandas.io', 'geopandas.tools',
'geopandas.datasets',
'geopandas.tests', 'geopandas.tools.tests'],
package_data={'geopandas': data_files},
install_requires=INSTALL_REQUIRES,
cmdclass=versioneer.get_cmdclass())
| bsd-3-clause |
glennq/scikit-learn | examples/svm/plot_separating_hyperplane_unbalanced.py | 329 | 1850 | """
=================================================
SVM: Separating hyperplane for unbalanced classes
=================================================
Find the optimal separating hyperplane using an SVC for classes that
are unbalanced.
We first find the separating plane with a plain SVC and then plot
(dashed) the separating hyperplane with automatically correction for
unbalanced classes.
.. currentmodule:: sklearn.linear_model
.. note::
This example will also work by replacing ``SVC(kernel="linear")``
with ``SGDClassifier(loss="hinge")``. Setting the ``loss`` parameter
of the :class:`SGDClassifier` equal to ``hinge`` will yield behaviour
such as that of a SVC with a linear kernel.
For example try instead of the ``SVC``::
clf = SGDClassifier(n_iter=100, alpha=0.01)
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn import svm
#from sklearn.linear_model import SGDClassifier
# we create 40 separable points
rng = np.random.RandomState(0)
n_samples_1 = 1000
n_samples_2 = 100
X = np.r_[1.5 * rng.randn(n_samples_1, 2),
0.5 * rng.randn(n_samples_2, 2) + [2, 2]]
y = [0] * (n_samples_1) + [1] * (n_samples_2)
# fit the model and get the separating hyperplane
clf = svm.SVC(kernel='linear', C=1.0)
clf.fit(X, y)
w = clf.coef_[0]
a = -w[0] / w[1]
xx = np.linspace(-5, 5)
yy = a * xx - clf.intercept_[0] / w[1]
# get the separating hyperplane using weighted classes
wclf = svm.SVC(kernel='linear', class_weight={1: 10})
wclf.fit(X, y)
ww = wclf.coef_[0]
wa = -ww[0] / ww[1]
wyy = wa * xx - wclf.intercept_[0] / ww[1]
# plot separating hyperplanes and samples
h0 = plt.plot(xx, yy, 'k-', label='no weights')
h1 = plt.plot(xx, wyy, 'k--', label='with weights')
plt.scatter(X[:, 0], X[:, 1], c=y, cmap=plt.cm.Paired)
plt.legend()
plt.axis('tight')
plt.show()
| bsd-3-clause |
rhjvedder/Plague_models | tools/TempLoader.py | 1 | 6867 | """This program is designed to read temperature data from a csv file"""
import pandas as pd
from datetime import date
import netCDF4 as nc
from os.path import dirname, abspath
import os.path
class TempLoader:
def __init__(self, start=1991, end=1999, update=False, fname="1107903"):
self.parent_dir = dirname(dirname(abspath(__file__)))
self.loc = os.path.join(self.parent_dir, fname + ".csv")
self.start = start
self.end = end
self.update = bool(update)
# years_list = pd.date_range(date(1990, 1, 1), date(2000, 12, 31)).tolist()
self.years_range = pd.date_range(date(self.start, 1, 1), date(self.end, 12, 31)).tolist()
# -- Params
self.temp = [[27.1, 82], [27.2, 83], [27.5, 81], [27.4, 73], [26.1, 67], [24.6, 64], [24.2, 62], [24.6, 60],
[25.4, 63], [26.8, 66], [27.7, 72], [27.4, 80]]
self.warming = {"1980": 0.150, "1981": 0.145, "1982": 0.19, "1983": 0.19, "1984": 0.165, "1985": 0.140,
"1986": 0.175, "1987": 0.2, "1988": 0.24, "1989": 0.275, "1990": 0.31, "1991": 0.30,
"1992": 0.28, "1993": 0.285, "1994": 0.29, "1995": 0.285, "1996": 0.318, "1997": 0.351,
"1998": 0.384, "1999": 0.417, "2000": 0.45, "2001": 0.483, "2002": 0.516, "2003": 0.549,
"2004": 0.582, "2005": 0.590, "2006": 0.582, "2007": 0.575, "2008": 0.590, "2009": 0.582,
"2010": 0.575}
self.template_temps = {}
self.temp_missing = []
self.temp_list = []
for time in self.years_range:
time = time.strftime("%Y-%m-%d")
time_s = time.split("-")
year = time_s[0]
self.template_temps[time] = list(map(lambda x: x + self.warming[year], self.temp[int(time_s[1]) - 1][:-1]))
self.template_temps[time].append(self.temp[int(time_s[1]) - 1][1])
def read_raw(self):
with open(self.loc, 'r') as file:
count = 0
prev_day = 0
prev_temp = 0
for line in file.readlines():
if count > 0:
line = line.replace("\n", '')
line = line[1:-1]
data = line.split('\",\"')
year, month, day = data[2].split("-")
year = int(year)
month = int(month)
day = int(day)
temp = float(data[3])
if self.start <= year <= self.end:
if day != prev_day and day - prev_day > 1:
span = day - prev_day - 1
if span == 1:
s_temp = (temp + prev_temp) / 2
self.template_temps["{}-{}-{}".format(year, TempLoader.str_int(month),
TempLoader.str_int(prev_day + 1))
][0] = round(s_temp, 2)
else:
if temp != prev_temp:
s_temp = (temp - prev_temp) / span
for i in range(1, span):
i_temp = prev_temp + (i * s_temp)
date = "{}-{}-{}".format(year, TempLoader.str_int(month),
TempLoader.str_int(prev_day + i))
self.template_temps[date][0] = round(i_temp, 2)
else:
for i in range(i, span):
date = "{}-{}-{}".format(year, TempLoader.str_int(month),
TempLoader.str_int(prev_day + i))
self.template_temps[date][0] = temp
self.template_temps[data[2]][0] = temp
prev_temp = temp
prev_day = day
count += 1
self.fill_holes()
if self.update:
self.update_data()
else:
for n, i in enumerate(self.years_range):
if 273 <= n <= 396:
self.template_temps[i.strftime("%Y-%m-%d")][0] = self.temp_missing[n - 273]
self.temp_list.append(self.template_temps[i.strftime("%Y-%m-%d")][0])
return self.template_temps, self.temp_list
def update_data(self):
with open(os.path.join(self.parent_dir, "data", "temp_data.csv"), 'w') as file:
for n, i in enumerate(self.years_range):
offset = (1991 - self.start) * 365
if offset + 273 <= n <= offset + 396:
self.template_temps[i.strftime("%Y-%m-%d")][0] = self.temp_missing[n - (offset + 273)]
file.write(i.strftime("%Y-%m-%d") + ":{},{}".format(self.template_temps[i.strftime("%Y-%m-%d")][0],
self.template_temps[i.strftime("%Y-%m-%d")][1]) +
"\n")
self.temp_list.append(self.template_temps[i.strftime("%Y-%m-%d")][0])
def fill_holes(self):
tmax_1991_data = nc.Dataset(os.path.join(self.parent_dir, 'data', 'tmax.1991.nc'), 'r')
tmin_1991_data = nc.Dataset(os.path.join(self.parent_dir, 'data', 'tmin.1991.nc'), 'r')
tmax_1992_data = nc.Dataset(os.path.join(self.parent_dir, 'data', 'tmax.1992.nc'), 'r')
tmin_1992_data = nc.Dataset(os.path.join(self.parent_dir, 'data', 'tmin.1992.nc'), 'r')
tmaxs_1991 = tmax_1991_data.variables['tmax'][273:]
tmins_1991 = tmin_1991_data.variables['tmin'][273:]
tmaxs_1992 = tmax_1992_data.variables['tmax'][:32]
tmins_1992 = tmin_1992_data.variables['tmin'][:32]
for i in range(len(tmaxs_1991)):
combo = []
for la in [210, 211, 212]:
for lo in [92, 93, 94]:
combo.append((tmaxs_1991[i][la][lo] + tmins_1991[i][la][lo]) / 2)
self.temp_missing.append(round(sum(combo) / float(len(combo)), 2))
tmax_1991_data.close()
tmin_1991_data.close()
for i in range(len(tmaxs_1992)):
combo = []
for la in [210, 211, 212]:
for lo in [92, 93, 94]:
combo.append((tmaxs_1992[i][la][lo] + tmins_1992[i][la][lo]) / 2)
self.temp_missing.append(round(sum(combo) / float(len(combo)), 2))
tmax_1992_data.close()
tmin_1992_data.close()
@staticmethod
def str_int(integer):
if integer > 9:
return str(integer)
else:
return "0{}".format(integer)
| gpl-3.0 |
kenshay/ImageScript | ProgramData/SystemFiles/Python/Lib/site-packages/matplotlib/stackplot.py | 6 | 4198 | """
Stacked area plot for 1D arrays inspired by Douglas Y'barbo's stackoverflow
answer:
http://stackoverflow.com/questions/2225995/how-can-i-create-stacked-line-graph-with-matplotlib
(http://stackoverflow.com/users/66549/doug)
"""
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import six
from six.moves import xrange
from cycler import cycler
import numpy as np
__all__ = ['stackplot']
def stackplot(axes, x, *args, **kwargs):
"""Draws a stacked area plot.
*x* : 1d array of dimension N
*y* : 2d array of dimension MxN, OR any number 1d arrays each of dimension
1xN. The data is assumed to be unstacked. Each of the following
calls is legal::
stackplot(x, y) # where y is MxN
stackplot(x, y1, y2, y3, y4) # where y1, y2, y3, y4, are all 1xNm
Keyword arguments:
*baseline* : ['zero', 'sym', 'wiggle', 'weighted_wiggle']
Method used to calculate the baseline. 'zero' is just a
simple stacked plot. 'sym' is symmetric around zero and
is sometimes called `ThemeRiver`. 'wiggle' minimizes the
sum of the squared slopes. 'weighted_wiggle' does the
same but weights to account for size of each layer.
It is also called `Streamgraph`-layout. More details
can be found at http://leebyron.com/streamgraph/.
*labels* : A list or tuple of labels to assign to each data series.
*colors* : A list or tuple of colors. These will be cycled through and
used to colour the stacked areas.
All other keyword arguments are passed to
:func:`~matplotlib.Axes.fill_between`
Returns *r* : A list of
:class:`~matplotlib.collections.PolyCollection`, one for each
element in the stacked area plot.
"""
if len(args) == 1:
y = np.atleast_2d(*args)
elif len(args) > 1:
y = np.row_stack(args)
labels = iter(kwargs.pop('labels', []))
colors = kwargs.pop('colors', None)
if colors is not None:
axes.set_prop_cycle(cycler('color', colors))
baseline = kwargs.pop('baseline', 'zero')
# Assume data passed has not been 'stacked', so stack it here.
stack = np.cumsum(y, axis=0)
if baseline == 'zero':
first_line = 0.
elif baseline == 'sym':
first_line = -np.sum(y, 0) * 0.5
stack += first_line[None, :]
elif baseline == 'wiggle':
m = y.shape[0]
first_line = (y * (m - 0.5 - np.arange(0, m)[:, None])).sum(0)
first_line /= -m
stack += first_line
elif baseline == 'weighted_wiggle':
m, n = y.shape
center = np.zeros(n)
total = np.sum(y, 0)
# multiply by 1/total (or zero) to avoid infinities in the division:
inv_total = np.zeros_like(total)
mask = total > 0
inv_total[mask] = 1.0 / total[mask]
increase = np.hstack((y[:, 0:1], np.diff(y)))
below_size = total - stack
below_size += 0.5 * y
move_up = below_size * inv_total
move_up[:, 0] = 0.5
center = (move_up - 0.5) * increase
center = np.cumsum(center.sum(0))
first_line = center - 0.5 * total
stack += first_line
else:
errstr = "Baseline method %s not recognised. " % baseline
errstr += "Expected 'zero', 'sym', 'wiggle' or 'weighted_wiggle'"
raise ValueError(errstr)
# Color between x = 0 and the first array.
color = axes._get_lines.get_next_color()
coll = axes.fill_between(x, first_line, stack[0, :],
facecolor=color, label=six.next(labels, None),
**kwargs)
coll.sticky_edges.y[:] = [0]
r = [coll]
# Color between array i-1 and array i
for i in xrange(len(y) - 1):
color = axes._get_lines.get_next_color()
r.append(axes.fill_between(x, stack[i, :], stack[i + 1, :],
facecolor=color,
label= six.next(labels, None),
**kwargs))
return r
| gpl-3.0 |
schets/scikit-learn | sklearn/datasets/svmlight_format.py | 39 | 15319 | """This module implements a loader and dumper for the svmlight format
This format is a text-based format, with one sample per line. It does
not store zero valued features hence is suitable for sparse dataset.
The first element of each line can be used to store a target variable to
predict.
This format is used as the default format for both svmlight and the
libsvm command line programs.
"""
# Authors: Mathieu Blondel <mathieu@mblondel.org>
# Lars Buitinck <L.J.Buitinck@uva.nl>
# Olivier Grisel <olivier.grisel@ensta.org>
# License: BSD 3 clause
from contextlib import closing
import io
import os.path
import numpy as np
import scipy.sparse as sp
from ._svmlight_format import _load_svmlight_file
from .. import __version__
from ..externals import six
from ..externals.six import u, b
from ..externals.six.moves import range, zip
from ..utils import check_array
from ..utils.fixes import frombuffer_empty
def load_svmlight_file(f, n_features=None, dtype=np.float64,
multilabel=False, zero_based="auto", query_id=False):
"""Load datasets in the svmlight / libsvm format into sparse CSR matrix
This format is a text-based format, with one sample per line. It does
not store zero valued features hence is suitable for sparse dataset.
The first element of each line can be used to store a target variable
to predict.
This format is used as the default format for both svmlight and the
libsvm command line programs.
Parsing a text based source can be expensive. When working on
repeatedly on the same dataset, it is recommended to wrap this
loader with joblib.Memory.cache to store a memmapped backup of the
CSR results of the first call and benefit from the near instantaneous
loading of memmapped structures for the subsequent calls.
In case the file contains a pairwise preference constraint (known
as "qid" in the svmlight format) these are ignored unless the
query_id parameter is set to True. These pairwise preference
constraints can be used to constraint the combination of samples
when using pairwise loss functions (as is the case in some
learning to rank problems) so that only pairs with the same
query_id value are considered.
This implementation is written in Cython and is reasonably fast.
However, a faster API-compatible loader is also available at:
https://github.com/mblondel/svmlight-loader
Parameters
----------
f : {str, file-like, int}
(Path to) a file to load. If a path ends in ".gz" or ".bz2", it will
be uncompressed on the fly. If an integer is passed, it is assumed to
be a file descriptor. A file-like or file descriptor will not be closed
by this function. A file-like object must be opened in binary mode.
n_features : int or None
The number of features to use. If None, it will be inferred. This
argument is useful to load several files that are subsets of a
bigger sliced dataset: each subset might not have examples of
every feature, hence the inferred shape might vary from one
slice to another.
multilabel : boolean, optional, default False
Samples may have several labels each (see
http://www.csie.ntu.edu.tw/~cjlin/libsvmtools/datasets/multilabel.html)
zero_based : boolean or "auto", optional, default "auto"
Whether column indices in f are zero-based (True) or one-based
(False). If column indices are one-based, they are transformed to
zero-based to match Python/NumPy conventions.
If set to "auto", a heuristic check is applied to determine this from
the file contents. Both kinds of files occur "in the wild", but they
are unfortunately not self-identifying. Using "auto" or True should
always be safe.
query_id : boolean, default False
If True, will return the query_id array for each file.
dtype : numpy data type, default np.float64
Data type of dataset to be loaded. This will be the data type of the
output numpy arrays ``X`` and ``y``.
Returns
-------
X: scipy.sparse matrix of shape (n_samples, n_features)
y: ndarray of shape (n_samples,), or, in the multilabel a list of
tuples of length n_samples.
query_id: array of shape (n_samples,)
query_id for each sample. Only returned when query_id is set to
True.
See also
--------
load_svmlight_files: similar function for loading multiple files in this
format, enforcing the same number of features/columns on all of them.
Examples
--------
To use joblib.Memory to cache the svmlight file::
from sklearn.externals.joblib import Memory
from sklearn.datasets import load_svmlight_file
mem = Memory("./mycache")
@mem.cache
def get_data():
data = load_svmlight_file("mysvmlightfile")
return data[0], data[1]
X, y = get_data()
"""
return tuple(load_svmlight_files([f], n_features, dtype, multilabel,
zero_based, query_id))
def _gen_open(f):
if isinstance(f, int): # file descriptor
return io.open(f, "rb", closefd=False)
elif not isinstance(f, six.string_types):
raise TypeError("expected {str, int, file-like}, got %s" % type(f))
_, ext = os.path.splitext(f)
if ext == ".gz":
import gzip
return gzip.open(f, "rb")
elif ext == ".bz2":
from bz2 import BZ2File
return BZ2File(f, "rb")
else:
return open(f, "rb")
def _open_and_load(f, dtype, multilabel, zero_based, query_id):
if hasattr(f, "read"):
actual_dtype, data, ind, indptr, labels, query = \
_load_svmlight_file(f, dtype, multilabel, zero_based, query_id)
# XXX remove closing when Python 2.7+/3.1+ required
else:
with closing(_gen_open(f)) as f:
actual_dtype, data, ind, indptr, labels, query = \
_load_svmlight_file(f, dtype, multilabel, zero_based, query_id)
# convert from array.array, give data the right dtype
if not multilabel:
labels = frombuffer_empty(labels, np.float64)
data = frombuffer_empty(data, actual_dtype)
indices = frombuffer_empty(ind, np.intc)
indptr = np.frombuffer(indptr, dtype=np.intc) # never empty
query = frombuffer_empty(query, np.intc)
data = np.asarray(data, dtype=dtype) # no-op for float{32,64}
return data, indices, indptr, labels, query
def load_svmlight_files(files, n_features=None, dtype=np.float64,
multilabel=False, zero_based="auto", query_id=False):
"""Load dataset from multiple files in SVMlight format
This function is equivalent to mapping load_svmlight_file over a list of
files, except that the results are concatenated into a single, flat list
and the samples vectors are constrained to all have the same number of
features.
In case the file contains a pairwise preference constraint (known
as "qid" in the svmlight format) these are ignored unless the
query_id parameter is set to True. These pairwise preference
constraints can be used to constraint the combination of samples
when using pairwise loss functions (as is the case in some
learning to rank problems) so that only pairs with the same
query_id value are considered.
Parameters
----------
files : iterable over {str, file-like, int}
(Paths of) files to load. If a path ends in ".gz" or ".bz2", it will
be uncompressed on the fly. If an integer is passed, it is assumed to
be a file descriptor. File-likes and file descriptors will not be
closed by this function. File-like objects must be opened in binary
mode.
n_features: int or None
The number of features to use. If None, it will be inferred from the
maximum column index occurring in any of the files.
This can be set to a higher value than the actual number of features
in any of the input files, but setting it to a lower value will cause
an exception to be raised.
multilabel: boolean, optional
Samples may have several labels each (see
http://www.csie.ntu.edu.tw/~cjlin/libsvmtools/datasets/multilabel.html)
zero_based: boolean or "auto", optional
Whether column indices in f are zero-based (True) or one-based
(False). If column indices are one-based, they are transformed to
zero-based to match Python/NumPy conventions.
If set to "auto", a heuristic check is applied to determine this from
the file contents. Both kinds of files occur "in the wild", but they
are unfortunately not self-identifying. Using "auto" or True should
always be safe.
query_id: boolean, defaults to False
If True, will return the query_id array for each file.
dtype : numpy data type, default np.float64
Data type of dataset to be loaded. This will be the data type of the
output numpy arrays ``X`` and ``y``.
Returns
-------
[X1, y1, ..., Xn, yn]
where each (Xi, yi) pair is the result from load_svmlight_file(files[i]).
If query_id is set to True, this will return instead [X1, y1, q1,
..., Xn, yn, qn] where (Xi, yi, qi) is the result from
load_svmlight_file(files[i])
Notes
-----
When fitting a model to a matrix X_train and evaluating it against a
matrix X_test, it is essential that X_train and X_test have the same
number of features (X_train.shape[1] == X_test.shape[1]). This may not
be the case if you load the files individually with load_svmlight_file.
See also
--------
load_svmlight_file
"""
r = [_open_and_load(f, dtype, multilabel, bool(zero_based), bool(query_id))
for f in files]
if (zero_based is False
or zero_based == "auto" and all(np.min(tmp[1]) > 0 for tmp in r)):
for ind in r:
indices = ind[1]
indices -= 1
n_f = max(ind[1].max() for ind in r) + 1
if n_features is None:
n_features = n_f
elif n_features < n_f:
raise ValueError("n_features was set to {},"
" but input file contains {} features"
.format(n_features, n_f))
result = []
for data, indices, indptr, y, query_values in r:
shape = (indptr.shape[0] - 1, n_features)
X = sp.csr_matrix((data, indices, indptr), shape)
X.sort_indices()
result += X, y
if query_id:
result.append(query_values)
return result
def _dump_svmlight(X, y, f, one_based, comment, query_id):
is_sp = int(hasattr(X, "tocsr"))
if X.dtype.kind == 'i':
value_pattern = u("%d:%d")
else:
value_pattern = u("%d:%.16g")
if y.dtype.kind == 'i':
line_pattern = u("%d")
else:
line_pattern = u("%.16g")
if query_id is not None:
line_pattern += u(" qid:%d")
line_pattern += u(" %s\n")
if comment:
f.write(b("# Generated by dump_svmlight_file from scikit-learn %s\n"
% __version__))
f.write(b("# Column indices are %s-based\n"
% ["zero", "one"][one_based]))
f.write(b("#\n"))
f.writelines(b("# %s\n" % line) for line in comment.splitlines())
for i in range(X.shape[0]):
if is_sp:
span = slice(X.indptr[i], X.indptr[i + 1])
row = zip(X.indices[span], X.data[span])
else:
nz = X[i] != 0
row = zip(np.where(nz)[0], X[i, nz])
s = " ".join(value_pattern % (j + one_based, x) for j, x in row)
if query_id is not None:
feat = (y[i], query_id[i], s)
else:
feat = (y[i], s)
f.write((line_pattern % feat).encode('ascii'))
def dump_svmlight_file(X, y, f, zero_based=True, comment=None, query_id=None):
"""Dump the dataset in svmlight / libsvm file format.
This format is a text-based format, with one sample per line. It does
not store zero valued features hence is suitable for sparse dataset.
The first element of each line can be used to store a target variable
to predict.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Training vectors, where n_samples is the number of samples and
n_features is the number of features.
y : array-like, shape = [n_samples]
Target values.
f : string or file-like in binary mode
If string, specifies the path that will contain the data.
If file-like, data will be written to f. f should be opened in binary
mode.
zero_based : boolean, optional
Whether column indices should be written zero-based (True) or one-based
(False).
comment : string, optional
Comment to insert at the top of the file. This should be either a
Unicode string, which will be encoded as UTF-8, or an ASCII byte
string.
If a comment is given, then it will be preceded by one that identifies
the file as having been dumped by scikit-learn. Note that not all
tools grok comments in SVMlight files.
query_id : array-like, shape = [n_samples]
Array containing pairwise preference constraints (qid in svmlight
format).
"""
if comment is not None:
# Convert comment string to list of lines in UTF-8.
# If a byte string is passed, then check whether it's ASCII;
# if a user wants to get fancy, they'll have to decode themselves.
# Avoid mention of str and unicode types for Python 3.x compat.
if isinstance(comment, bytes):
comment.decode("ascii") # just for the exception
else:
comment = comment.encode("utf-8")
if six.b("\0") in comment:
raise ValueError("comment string contains NUL byte")
y = np.asarray(y)
if y.ndim != 1:
raise ValueError("expected y of shape (n_samples,), got %r"
% (y.shape,))
Xval = check_array(X, accept_sparse='csr')
if Xval.shape[0] != y.shape[0]:
raise ValueError("X.shape[0] and y.shape[0] should be the same, got"
" %r and %r instead." % (Xval.shape[0], y.shape[0]))
# We had some issues with CSR matrices with unsorted indices (e.g. #1501),
# so sort them here, but first make sure we don't modify the user's X.
# TODO We can do this cheaper; sorted_indices copies the whole matrix.
if Xval is X and hasattr(Xval, "sorted_indices"):
X = Xval.sorted_indices()
else:
X = Xval
if hasattr(X, "sort_indices"):
X.sort_indices()
if query_id is not None:
query_id = np.asarray(query_id)
if query_id.shape[0] != y.shape[0]:
raise ValueError("expected query_id of shape (n_samples,), got %r"
% (query_id.shape,))
one_based = not zero_based
if hasattr(f, "write"):
_dump_svmlight(X, y, f, one_based, comment, query_id)
else:
with open(f, "wb") as f:
_dump_svmlight(X, y, f, one_based, comment, query_id)
| bsd-3-clause |
cogmission/nupic | external/linux32/lib/python2.6/site-packages/matplotlib/axes.py | 69 | 259904 | from __future__ import division, generators
import math, sys, warnings, datetime, new
import numpy as np
from numpy import ma
import matplotlib
rcParams = matplotlib.rcParams
import matplotlib.artist as martist
import matplotlib.axis as maxis
import matplotlib.cbook as cbook
import matplotlib.collections as mcoll
import matplotlib.colors as mcolors
import matplotlib.contour as mcontour
import matplotlib.dates as mdates
import matplotlib.font_manager as font_manager
import matplotlib.image as mimage
import matplotlib.legend as mlegend
import matplotlib.lines as mlines
import matplotlib.mlab as mlab
import matplotlib.patches as mpatches
import matplotlib.quiver as mquiver
import matplotlib.scale as mscale
import matplotlib.table as mtable
import matplotlib.text as mtext
import matplotlib.ticker as mticker
import matplotlib.transforms as mtransforms
iterable = cbook.iterable
is_string_like = cbook.is_string_like
def _process_plot_format(fmt):
"""
Process a matlab(TM) style color/line style format string. Return a
(*linestyle*, *color*) tuple as a result of the processing. Default
values are ('-', 'b'). Example format strings include:
* 'ko': black circles
* '.b': blue dots
* 'r--': red dashed lines
.. seealso::
:func:`~matplotlib.Line2D.lineStyles` and
:func:`~matplotlib.pyplot.colors`:
for all possible styles and color format string.
"""
linestyle = None
marker = None
color = None
# Is fmt just a colorspec?
try:
color = mcolors.colorConverter.to_rgb(fmt)
return linestyle, marker, color # Yes.
except ValueError:
pass # No, not just a color.
# handle the multi char special cases and strip them from the
# string
if fmt.find('--')>=0:
linestyle = '--'
fmt = fmt.replace('--', '')
if fmt.find('-.')>=0:
linestyle = '-.'
fmt = fmt.replace('-.', '')
if fmt.find(' ')>=0:
linestyle = 'None'
fmt = fmt.replace(' ', '')
chars = [c for c in fmt]
for c in chars:
if c in mlines.lineStyles:
if linestyle is not None:
raise ValueError(
'Illegal format string "%s"; two linestyle symbols' % fmt)
linestyle = c
elif c in mlines.lineMarkers:
if marker is not None:
raise ValueError(
'Illegal format string "%s"; two marker symbols' % fmt)
marker = c
elif c in mcolors.colorConverter.colors:
if color is not None:
raise ValueError(
'Illegal format string "%s"; two color symbols' % fmt)
color = c
else:
raise ValueError(
'Unrecognized character %c in format string' % c)
if linestyle is None and marker is None:
linestyle = rcParams['lines.linestyle']
if linestyle is None:
linestyle = 'None'
if marker is None:
marker = 'None'
return linestyle, marker, color
def set_default_color_cycle(clist):
"""
Change the default cycle of colors that will be used by the plot
command. This must be called before creating the
:class:`Axes` to which it will apply; it will
apply to all future axes.
*clist* is a sequence of mpl color specifiers
"""
_process_plot_var_args.defaultColors = clist[:]
rcParams['lines.color'] = clist[0]
class _process_plot_var_args:
"""
Process variable length arguments to the plot command, so that
plot commands like the following are supported::
plot(t, s)
plot(t1, s1, t2, s2)
plot(t1, s1, 'ko', t2, s2)
plot(t1, s1, 'ko', t2, s2, 'r--', t3, e3)
an arbitrary number of *x*, *y*, *fmt* are allowed
"""
defaultColors = ['b','g','r','c','m','y','k']
def __init__(self, axes, command='plot'):
self.axes = axes
self.command = command
self._clear_color_cycle()
def _clear_color_cycle(self):
self.colors = _process_plot_var_args.defaultColors[:]
# if the default line color is a color format string, move it up
# in the que
try: ind = self.colors.index(rcParams['lines.color'])
except ValueError:
self.firstColor = rcParams['lines.color']
else:
self.colors[0], self.colors[ind] = self.colors[ind], self.colors[0]
self.firstColor = self.colors[0]
self.Ncolors = len(self.colors)
self.count = 0
def set_color_cycle(self, clist):
self.colors = clist[:]
self.firstColor = self.colors[0]
self.Ncolors = len(self.colors)
self.count = 0
def _get_next_cycle_color(self):
if self.count==0:
color = self.firstColor
else:
color = self.colors[int(self.count % self.Ncolors)]
self.count += 1
return color
def __call__(self, *args, **kwargs):
if self.axes.xaxis is not None and self.axes.yaxis is not None:
xunits = kwargs.pop( 'xunits', self.axes.xaxis.units)
yunits = kwargs.pop( 'yunits', self.axes.yaxis.units)
if xunits!=self.axes.xaxis.units:
self.axes.xaxis.set_units(xunits)
if yunits!=self.axes.yaxis.units:
self.axes.yaxis.set_units(yunits)
ret = self._grab_next_args(*args, **kwargs)
return ret
def set_lineprops(self, line, **kwargs):
assert self.command == 'plot', 'set_lineprops only works with "plot"'
for key, val in kwargs.items():
funcName = "set_%s"%key
if not hasattr(line,funcName):
raise TypeError, 'There is no line property "%s"'%key
func = getattr(line,funcName)
func(val)
def set_patchprops(self, fill_poly, **kwargs):
assert self.command == 'fill', 'set_patchprops only works with "fill"'
for key, val in kwargs.items():
funcName = "set_%s"%key
if not hasattr(fill_poly,funcName):
raise TypeError, 'There is no patch property "%s"'%key
func = getattr(fill_poly,funcName)
func(val)
def _xy_from_y(self, y):
if self.axes.yaxis is not None:
b = self.axes.yaxis.update_units(y)
if b: return np.arange(len(y)), y, False
if not ma.isMaskedArray(y):
y = np.asarray(y)
if len(y.shape) == 1:
y = y[:,np.newaxis]
nr, nc = y.shape
x = np.arange(nr)
if len(x.shape) == 1:
x = x[:,np.newaxis]
return x,y, True
def _xy_from_xy(self, x, y):
if self.axes.xaxis is not None and self.axes.yaxis is not None:
bx = self.axes.xaxis.update_units(x)
by = self.axes.yaxis.update_units(y)
# right now multicol is not supported if either x or y are
# unit enabled but this can be fixed..
if bx or by: return x, y, False
x = ma.asarray(x)
y = ma.asarray(y)
if len(x.shape) == 1:
x = x[:,np.newaxis]
if len(y.shape) == 1:
y = y[:,np.newaxis]
nrx, ncx = x.shape
nry, ncy = y.shape
assert nrx == nry, 'Dimensions of x and y are incompatible'
if ncx == ncy:
return x, y, True
if ncx == 1:
x = np.repeat(x, ncy, axis=1)
if ncy == 1:
y = np.repeat(y, ncx, axis=1)
assert x.shape == y.shape, 'Dimensions of x and y are incompatible'
return x, y, True
def _plot_1_arg(self, y, **kwargs):
assert self.command == 'plot', 'fill needs at least 2 arguments'
ret = []
x, y, multicol = self._xy_from_y(y)
if multicol:
for j in xrange(y.shape[1]):
color = self._get_next_cycle_color()
seg = mlines.Line2D(x, y[:,j],
color = color,
axes=self.axes,
)
self.set_lineprops(seg, **kwargs)
ret.append(seg)
else:
color = self._get_next_cycle_color()
seg = mlines.Line2D(x, y,
color = color,
axes=self.axes,
)
self.set_lineprops(seg, **kwargs)
ret.append(seg)
return ret
def _plot_2_args(self, tup2, **kwargs):
ret = []
if is_string_like(tup2[1]):
assert self.command == 'plot', ('fill needs at least 2 non-string '
'arguments')
y, fmt = tup2
x, y, multicol = self._xy_from_y(y)
linestyle, marker, color = _process_plot_format(fmt)
def makeline(x, y):
_color = color
if _color is None:
_color = self._get_next_cycle_color()
seg = mlines.Line2D(x, y,
color=_color,
linestyle=linestyle, marker=marker,
axes=self.axes,
)
self.set_lineprops(seg, **kwargs)
ret.append(seg)
if multicol:
for j in xrange(y.shape[1]):
makeline(x[:,j], y[:,j])
else:
makeline(x, y)
return ret
else:
x, y = tup2
x, y, multicol = self._xy_from_xy(x, y)
def makeline(x, y):
color = self._get_next_cycle_color()
seg = mlines.Line2D(x, y,
color=color,
axes=self.axes,
)
self.set_lineprops(seg, **kwargs)
ret.append(seg)
def makefill(x, y):
x = self.axes.convert_xunits(x)
y = self.axes.convert_yunits(y)
facecolor = self._get_next_cycle_color()
seg = mpatches.Polygon(np.hstack(
(x[:,np.newaxis],y[:,np.newaxis])),
facecolor = facecolor,
fill=True,
closed=closed
)
self.set_patchprops(seg, **kwargs)
ret.append(seg)
if self.command == 'plot':
func = makeline
else:
closed = kwargs.get('closed', True)
func = makefill
if multicol:
for j in xrange(y.shape[1]):
func(x[:,j], y[:,j])
else:
func(x, y)
return ret
def _plot_3_args(self, tup3, **kwargs):
ret = []
x, y, fmt = tup3
x, y, multicol = self._xy_from_xy(x, y)
linestyle, marker, color = _process_plot_format(fmt)
def makeline(x, y):
_color = color
if _color is None:
_color = self._get_next_cycle_color()
seg = mlines.Line2D(x, y,
color=_color,
linestyle=linestyle, marker=marker,
axes=self.axes,
)
self.set_lineprops(seg, **kwargs)
ret.append(seg)
def makefill(x, y):
facecolor = color
x = self.axes.convert_xunits(x)
y = self.axes.convert_yunits(y)
seg = mpatches.Polygon(np.hstack(
(x[:,np.newaxis],y[:,np.newaxis])),
facecolor = facecolor,
fill=True,
closed=closed
)
self.set_patchprops(seg, **kwargs)
ret.append(seg)
if self.command == 'plot':
func = makeline
else:
closed = kwargs.get('closed', True)
func = makefill
if multicol:
for j in xrange(y.shape[1]):
func(x[:,j], y[:,j])
else:
func(x, y)
return ret
def _grab_next_args(self, *args, **kwargs):
remaining = args
while 1:
if len(remaining)==0: return
if len(remaining)==1:
for seg in self._plot_1_arg(remaining[0], **kwargs):
yield seg
remaining = []
continue
if len(remaining)==2:
for seg in self._plot_2_args(remaining, **kwargs):
yield seg
remaining = []
continue
if len(remaining)==3:
if not is_string_like(remaining[2]):
raise ValueError, 'third arg must be a format string'
for seg in self._plot_3_args(remaining, **kwargs):
yield seg
remaining=[]
continue
if is_string_like(remaining[2]):
for seg in self._plot_3_args(remaining[:3], **kwargs):
yield seg
remaining=remaining[3:]
else:
for seg in self._plot_2_args(remaining[:2], **kwargs):
yield seg
remaining=remaining[2:]
class Axes(martist.Artist):
"""
The :class:`Axes` contains most of the figure elements:
:class:`~matplotlib.axis.Axis`, :class:`~matplotlib.axis.Tick`,
:class:`~matplotlib.lines.Line2D`, :class:`~matplotlib.text.Text`,
:class:`~matplotlib.patches.Polygon`, etc., and sets the
coordinate system.
The :class:`Axes` instance supports callbacks through a callbacks
attribute which is a :class:`~matplotlib.cbook.CallbackRegistry`
instance. The events you can connect to are 'xlim_changed' and
'ylim_changed' and the callback will be called with func(*ax*)
where *ax* is the :class:`Axes` instance.
"""
name = "rectilinear"
_shared_x_axes = cbook.Grouper()
_shared_y_axes = cbook.Grouper()
def __str__(self):
return "Axes(%g,%g;%gx%g)" % tuple(self._position.bounds)
def __init__(self, fig, rect,
axisbg = None, # defaults to rc axes.facecolor
frameon = True,
sharex=None, # use Axes instance's xaxis info
sharey=None, # use Axes instance's yaxis info
label='',
**kwargs
):
"""
Build an :class:`Axes` instance in
:class:`~matplotlib.figure.Figure` *fig* with
*rect=[left, bottom, width, height]* in
:class:`~matplotlib.figure.Figure` coordinates
Optional keyword arguments:
================ =========================================
Keyword Description
================ =========================================
*adjustable* [ 'box' | 'datalim' ]
*alpha* float: the alpha transparency
*anchor* [ 'C', 'SW', 'S', 'SE', 'E', 'NE', 'N',
'NW', 'W' ]
*aspect* [ 'auto' | 'equal' | aspect_ratio ]
*autoscale_on* [ *True* | *False* ] whether or not to
autoscale the *viewlim*
*axis_bgcolor* any matplotlib color, see
:func:`~matplotlib.pyplot.colors`
*axisbelow* draw the grids and ticks below the other
artists
*cursor_props* a (*float*, *color*) tuple
*figure* a :class:`~matplotlib.figure.Figure`
instance
*frame_on* a boolean - draw the axes frame
*label* the axes label
*navigate* [ *True* | *False* ]
*navigate_mode* [ 'PAN' | 'ZOOM' | None ] the navigation
toolbar button status
*position* [left, bottom, width, height] in
class:`~matplotlib.figure.Figure` coords
*sharex* an class:`~matplotlib.axes.Axes` instance
to share the x-axis with
*sharey* an class:`~matplotlib.axes.Axes` instance
to share the y-axis with
*title* the title string
*visible* [ *True* | *False* ] whether the axes is
visible
*xlabel* the xlabel
*xlim* (*xmin*, *xmax*) view limits
*xscale* [%(scale)s]
*xticklabels* sequence of strings
*xticks* sequence of floats
*ylabel* the ylabel strings
*ylim* (*ymin*, *ymax*) view limits
*yscale* [%(scale)s]
*yticklabels* sequence of strings
*yticks* sequence of floats
================ =========================================
""" % {'scale': ' | '.join([repr(x) for x in mscale.get_scale_names()])}
martist.Artist.__init__(self)
if isinstance(rect, mtransforms.Bbox):
self._position = rect
else:
self._position = mtransforms.Bbox.from_bounds(*rect)
self._originalPosition = self._position.frozen()
self.set_axes(self)
self.set_aspect('auto')
self._adjustable = 'box'
self.set_anchor('C')
self._sharex = sharex
self._sharey = sharey
if sharex is not None:
self._shared_x_axes.join(self, sharex)
if sharex._adjustable == 'box':
sharex._adjustable = 'datalim'
#warnings.warn(
# 'shared axes: "adjustable" is being changed to "datalim"')
self._adjustable = 'datalim'
if sharey is not None:
self._shared_y_axes.join(self, sharey)
if sharey._adjustable == 'box':
sharey._adjustable = 'datalim'
#warnings.warn(
# 'shared axes: "adjustable" is being changed to "datalim"')
self._adjustable = 'datalim'
self.set_label(label)
self.set_figure(fig)
# this call may differ for non-sep axes, eg polar
self._init_axis()
if axisbg is None: axisbg = rcParams['axes.facecolor']
self._axisbg = axisbg
self._frameon = frameon
self._axisbelow = rcParams['axes.axisbelow']
self._hold = rcParams['axes.hold']
self._connected = {} # a dict from events to (id, func)
self.cla()
# funcs used to format x and y - fall back on major formatters
self.fmt_xdata = None
self.fmt_ydata = None
self.set_cursor_props((1,'k')) # set the cursor properties for axes
self._cachedRenderer = None
self.set_navigate(True)
self.set_navigate_mode(None)
if len(kwargs): martist.setp(self, **kwargs)
if self.xaxis is not None:
self._xcid = self.xaxis.callbacks.connect('units finalize',
self.relim)
if self.yaxis is not None:
self._ycid = self.yaxis.callbacks.connect('units finalize',
self.relim)
def get_window_extent(self, *args, **kwargs):
'''
get the axes bounding box in display space; *args* and
*kwargs* are empty
'''
return self.bbox
def _init_axis(self):
"move this out of __init__ because non-separable axes don't use it"
self.xaxis = maxis.XAxis(self)
self.yaxis = maxis.YAxis(self)
self._update_transScale()
def set_figure(self, fig):
"""
Set the class:`~matplotlib.axes.Axes` figure
accepts a class:`~matplotlib.figure.Figure` instance
"""
martist.Artist.set_figure(self, fig)
self.bbox = mtransforms.TransformedBbox(self._position, fig.transFigure)
#these will be updated later as data is added
self.dataLim = mtransforms.Bbox.unit()
self.viewLim = mtransforms.Bbox.unit()
self.transScale = mtransforms.TransformWrapper(
mtransforms.IdentityTransform())
self._set_lim_and_transforms()
def _set_lim_and_transforms(self):
"""
set the *dataLim* and *viewLim*
:class:`~matplotlib.transforms.Bbox` attributes and the
*transScale*, *transData*, *transLimits* and *transAxes*
transformations.
"""
self.transAxes = mtransforms.BboxTransformTo(self.bbox)
# Transforms the x and y axis separately by a scale factor
# It is assumed that this part will have non-linear components
self.transScale = mtransforms.TransformWrapper(
mtransforms.IdentityTransform())
# An affine transformation on the data, generally to limit the
# range of the axes
self.transLimits = mtransforms.BboxTransformFrom(
mtransforms.TransformedBbox(self.viewLim, self.transScale))
# The parentheses are important for efficiency here -- they
# group the last two (which are usually affines) separately
# from the first (which, with log-scaling can be non-affine).
self.transData = self.transScale + (self.transLimits + self.transAxes)
self._xaxis_transform = mtransforms.blended_transform_factory(
self.axes.transData, self.axes.transAxes)
self._yaxis_transform = mtransforms.blended_transform_factory(
self.axes.transAxes, self.axes.transData)
def get_xaxis_transform(self):
"""
Get the transformation used for drawing x-axis labels, ticks
and gridlines. The x-direction is in data coordinates and the
y-direction is in axis coordinates.
.. note::
This transformation is primarily used by the
:class:`~matplotlib.axis.Axis` class, and is meant to be
overridden by new kinds of projections that may need to
place axis elements in different locations.
"""
return self._xaxis_transform
def get_xaxis_text1_transform(self, pad_points):
"""
Get the transformation used for drawing x-axis labels, which
will add the given amount of padding (in points) between the
axes and the label. The x-direction is in data coordinates
and the y-direction is in axis coordinates. Returns a
3-tuple of the form::
(transform, valign, halign)
where *valign* and *halign* are requested alignments for the
text.
.. note::
This transformation is primarily used by the
:class:`~matplotlib.axis.Axis` class, and is meant to be
overridden by new kinds of projections that may need to
place axis elements in different locations.
"""
return (self._xaxis_transform +
mtransforms.ScaledTranslation(0, -1 * pad_points / 72.0,
self.figure.dpi_scale_trans),
"top", "center")
def get_xaxis_text2_transform(self, pad_points):
"""
Get the transformation used for drawing the secondary x-axis
labels, which will add the given amount of padding (in points)
between the axes and the label. The x-direction is in data
coordinates and the y-direction is in axis coordinates.
Returns a 3-tuple of the form::
(transform, valign, halign)
where *valign* and *halign* are requested alignments for the
text.
.. note::
This transformation is primarily used by the
:class:`~matplotlib.axis.Axis` class, and is meant to be
overridden by new kinds of projections that may need to
place axis elements in different locations.
"""
return (self._xaxis_transform +
mtransforms.ScaledTranslation(0, pad_points / 72.0,
self.figure.dpi_scale_trans),
"bottom", "center")
def get_yaxis_transform(self):
"""
Get the transformation used for drawing y-axis labels, ticks
and gridlines. The x-direction is in axis coordinates and the
y-direction is in data coordinates.
.. note::
This transformation is primarily used by the
:class:`~matplotlib.axis.Axis` class, and is meant to be
overridden by new kinds of projections that may need to
place axis elements in different locations.
"""
return self._yaxis_transform
def get_yaxis_text1_transform(self, pad_points):
"""
Get the transformation used for drawing y-axis labels, which
will add the given amount of padding (in points) between the
axes and the label. The x-direction is in axis coordinates
and the y-direction is in data coordinates. Returns a 3-tuple
of the form::
(transform, valign, halign)
where *valign* and *halign* are requested alignments for the
text.
.. note::
This transformation is primarily used by the
:class:`~matplotlib.axis.Axis` class, and is meant to be
overridden by new kinds of projections that may need to
place axis elements in different locations.
"""
return (self._yaxis_transform +
mtransforms.ScaledTranslation(-1 * pad_points / 72.0, 0,
self.figure.dpi_scale_trans),
"center", "right")
def get_yaxis_text2_transform(self, pad_points):
"""
Get the transformation used for drawing the secondary y-axis
labels, which will add the given amount of padding (in points)
between the axes and the label. The x-direction is in axis
coordinates and the y-direction is in data coordinates.
Returns a 3-tuple of the form::
(transform, valign, halign)
where *valign* and *halign* are requested alignments for the
text.
.. note::
This transformation is primarily used by the
:class:`~matplotlib.axis.Axis` class, and is meant to be
overridden by new kinds of projections that may need to
place axis elements in different locations.
"""
return (self._yaxis_transform +
mtransforms.ScaledTranslation(pad_points / 72.0, 0,
self.figure.dpi_scale_trans),
"center", "left")
def _update_transScale(self):
self.transScale.set(
mtransforms.blended_transform_factory(
self.xaxis.get_transform(), self.yaxis.get_transform()))
if hasattr(self, "lines"):
for line in self.lines:
line._transformed_path.invalidate()
def get_position(self, original=False):
'Return the a copy of the axes rectangle as a Bbox'
if original:
return self._originalPosition.frozen()
else:
return self._position.frozen()
def set_position(self, pos, which='both'):
"""
Set the axes position with::
pos = [left, bottom, width, height]
in relative 0,1 coords, or *pos* can be a
:class:`~matplotlib.transforms.Bbox`
There are two position variables: one which is ultimately
used, but which may be modified by :meth:`apply_aspect`, and a
second which is the starting point for :meth:`apply_aspect`.
Optional keyword arguments:
*which*
========== ====================
value description
========== ====================
'active' to change the first
'original' to change the second
'both' to change both
========== ====================
"""
if not isinstance(pos, mtransforms.BboxBase):
pos = mtransforms.Bbox.from_bounds(*pos)
if which in ('both', 'active'):
self._position.set(pos)
if which in ('both', 'original'):
self._originalPosition.set(pos)
def reset_position(self):
'Make the original position the active position'
pos = self.get_position(original=True)
self.set_position(pos, which='active')
def _set_artist_props(self, a):
'set the boilerplate props for artists added to axes'
a.set_figure(self.figure)
if not a.is_transform_set():
a.set_transform(self.transData)
a.set_axes(self)
def _gen_axes_patch(self):
"""
Returns the patch used to draw the background of the axes. It
is also used as the clipping path for any data elements on the
axes.
In the standard axes, this is a rectangle, but in other
projections it may not be.
.. note::
Intended to be overridden by new projection types.
"""
return mpatches.Rectangle((0.0, 0.0), 1.0, 1.0)
def cla(self):
'Clear the current axes'
# Note: this is called by Axes.__init__()
self.xaxis.cla()
self.yaxis.cla()
self.ignore_existing_data_limits = True
self.callbacks = cbook.CallbackRegistry(('xlim_changed',
'ylim_changed'))
if self._sharex is not None:
# major and minor are class instances with
# locator and formatter attributes
self.xaxis.major = self._sharex.xaxis.major
self.xaxis.minor = self._sharex.xaxis.minor
x0, x1 = self._sharex.get_xlim()
self.set_xlim(x0, x1, emit=False)
self.xaxis.set_scale(self._sharex.xaxis.get_scale())
else:
self.xaxis.set_scale('linear')
if self._sharey is not None:
self.yaxis.major = self._sharey.yaxis.major
self.yaxis.minor = self._sharey.yaxis.minor
y0, y1 = self._sharey.get_ylim()
self.set_ylim(y0, y1, emit=False)
self.yaxis.set_scale(self._sharey.yaxis.get_scale())
else:
self.yaxis.set_scale('linear')
self._autoscaleon = True
self._update_transScale() # needed?
self._get_lines = _process_plot_var_args(self)
self._get_patches_for_fill = _process_plot_var_args(self, 'fill')
self._gridOn = rcParams['axes.grid']
self.lines = []
self.patches = []
self.texts = []
self.tables = []
self.artists = []
self.images = []
self.legend_ = None
self.collections = [] # collection.Collection instances
self.grid(self._gridOn)
props = font_manager.FontProperties(size=rcParams['axes.titlesize'])
self.titleOffsetTrans = mtransforms.ScaledTranslation(
0.0, 5.0 / 72.0, self.figure.dpi_scale_trans)
self.title = mtext.Text(
x=0.5, y=1.0, text='',
fontproperties=props,
verticalalignment='bottom',
horizontalalignment='center',
)
self.title.set_transform(self.transAxes + self.titleOffsetTrans)
self.title.set_clip_box(None)
self._set_artist_props(self.title)
# the patch draws the background of the axes. we want this to
# be below the other artists; the axesPatch name is
# deprecated. We use the frame to draw the edges so we are
# setting the edgecolor to None
self.patch = self.axesPatch = self._gen_axes_patch()
self.patch.set_figure(self.figure)
self.patch.set_facecolor(self._axisbg)
self.patch.set_edgecolor('None')
self.patch.set_linewidth(0)
self.patch.set_transform(self.transAxes)
# the frame draws the border around the axes and we want this
# above. this is a place holder for a more sophisticated
# artist that might just draw a left, bottom frame, or a
# centered frame, etc the axesFrame name is deprecated
self.frame = self.axesFrame = self._gen_axes_patch()
self.frame.set_figure(self.figure)
self.frame.set_facecolor('none')
self.frame.set_edgecolor(rcParams['axes.edgecolor'])
self.frame.set_linewidth(rcParams['axes.linewidth'])
self.frame.set_transform(self.transAxes)
self.frame.set_zorder(2.5)
self.axison = True
self.xaxis.set_clip_path(self.patch)
self.yaxis.set_clip_path(self.patch)
self._shared_x_axes.clean()
self._shared_y_axes.clean()
def clear(self):
'clear the axes'
self.cla()
def set_color_cycle(self, clist):
"""
Set the color cycle for any future plot commands on this Axes.
clist is a list of mpl color specifiers.
"""
self._get_lines.set_color_cycle(clist)
def ishold(self):
'return the HOLD status of the axes'
return self._hold
def hold(self, b=None):
"""
call signature::
hold(b=None)
Set the hold state. If *hold* is *None* (default), toggle the
*hold* state. Else set the *hold* state to boolean value *b*.
Examples:
* toggle hold:
>>> hold()
* turn hold on:
>>> hold(True)
* turn hold off
>>> hold(False)
When hold is True, subsequent plot commands will be added to
the current axes. When hold is False, the current axes and
figure will be cleared on the next plot command
"""
if b is None:
self._hold = not self._hold
else:
self._hold = b
def get_aspect(self):
return self._aspect
def set_aspect(self, aspect, adjustable=None, anchor=None):
"""
*aspect*
======== ================================================
value description
======== ================================================
'auto' automatic; fill position rectangle with data
'normal' same as 'auto'; deprecated
'equal' same scaling from data to plot units for x and y
num a circle will be stretched such that the height
is num times the width. aspect=1 is the same as
aspect='equal'.
======== ================================================
*adjustable*
========= ============================
value description
========= ============================
'box' change physical size of axes
'datalim' change xlim or ylim
========= ============================
*anchor*
===== =====================
value description
===== =====================
'C' centered
'SW' lower left corner
'S' middle of bottom edge
'SE' lower right corner
etc.
===== =====================
"""
if aspect in ('normal', 'auto'):
self._aspect = 'auto'
elif aspect == 'equal':
self._aspect = 'equal'
else:
self._aspect = float(aspect) # raise ValueError if necessary
if adjustable is not None:
self.set_adjustable(adjustable)
if anchor is not None:
self.set_anchor(anchor)
def get_adjustable(self):
return self._adjustable
def set_adjustable(self, adjustable):
"""
ACCEPTS: [ 'box' | 'datalim' ]
"""
if adjustable in ('box', 'datalim'):
if self in self._shared_x_axes or self in self._shared_y_axes:
if adjustable == 'box':
raise ValueError(
'adjustable must be "datalim" for shared axes')
self._adjustable = adjustable
else:
raise ValueError('argument must be "box", or "datalim"')
def get_anchor(self):
return self._anchor
def set_anchor(self, anchor):
"""
*anchor*
===== ============
value description
===== ============
'C' Center
'SW' bottom left
'S' bottom
'SE' bottom right
'E' right
'NE' top right
'N' top
'NW' top left
'W' left
===== ============
"""
if anchor in mtransforms.Bbox.coefs.keys() or len(anchor) == 2:
self._anchor = anchor
else:
raise ValueError('argument must be among %s' %
', '.join(mtransforms.BBox.coefs.keys()))
def get_data_ratio(self):
"""
Returns the aspect ratio of the raw data.
This method is intended to be overridden by new projection
types.
"""
xmin,xmax = self.get_xbound()
xsize = max(math.fabs(xmax-xmin), 1e-30)
ymin,ymax = self.get_ybound()
ysize = max(math.fabs(ymax-ymin), 1e-30)
return ysize/xsize
def apply_aspect(self, position=None):
'''
Use :meth:`_aspect` and :meth:`_adjustable` to modify the
axes box or the view limits.
'''
if position is None:
position = self.get_position(original=True)
aspect = self.get_aspect()
if aspect == 'auto':
self.set_position( position , which='active')
return
if aspect == 'equal':
A = 1
else:
A = aspect
#Ensure at drawing time that any Axes involved in axis-sharing
# does not have its position changed.
if self in self._shared_x_axes or self in self._shared_y_axes:
if self._adjustable == 'box':
self._adjustable = 'datalim'
warnings.warn(
'shared axes: "adjustable" is being changed to "datalim"')
figW,figH = self.get_figure().get_size_inches()
fig_aspect = figH/figW
if self._adjustable == 'box':
box_aspect = A * self.get_data_ratio()
pb = position.frozen()
pb1 = pb.shrunk_to_aspect(box_aspect, pb, fig_aspect)
self.set_position(pb1.anchored(self.get_anchor(), pb), 'active')
return
# reset active to original in case it had been changed
# by prior use of 'box'
self.set_position(position, which='active')
xmin,xmax = self.get_xbound()
xsize = max(math.fabs(xmax-xmin), 1e-30)
ymin,ymax = self.get_ybound()
ysize = max(math.fabs(ymax-ymin), 1e-30)
l,b,w,h = position.bounds
box_aspect = fig_aspect * (h/w)
data_ratio = box_aspect / A
y_expander = (data_ratio*xsize/ysize - 1.0)
#print 'y_expander', y_expander
# If y_expander > 0, the dy/dx viewLim ratio needs to increase
if abs(y_expander) < 0.005:
#print 'good enough already'
return
dL = self.dataLim
xr = 1.05 * dL.width
yr = 1.05 * dL.height
xmarg = xsize - xr
ymarg = ysize - yr
Ysize = data_ratio * xsize
Xsize = ysize / data_ratio
Xmarg = Xsize - xr
Ymarg = Ysize - yr
xm = 0 # Setting these targets to, e.g., 0.05*xr does not seem to help.
ym = 0
#print 'xmin, xmax, ymin, ymax', xmin, xmax, ymin, ymax
#print 'xsize, Xsize, ysize, Ysize', xsize, Xsize, ysize, Ysize
changex = (self in self._shared_y_axes
and self not in self._shared_x_axes)
changey = (self in self._shared_x_axes
and self not in self._shared_y_axes)
if changex and changey:
warnings.warn("adjustable='datalim' cannot work with shared "
"x and y axes")
return
if changex:
adjust_y = False
else:
#print 'xmarg, ymarg, Xmarg, Ymarg', xmarg, ymarg, Xmarg, Ymarg
if xmarg > xm and ymarg > ym:
adjy = ((Ymarg > 0 and y_expander < 0)
or (Xmarg < 0 and y_expander > 0))
else:
adjy = y_expander > 0
#print 'y_expander, adjy', y_expander, adjy
adjust_y = changey or adjy #(Ymarg > xmarg)
if adjust_y:
yc = 0.5*(ymin+ymax)
y0 = yc - Ysize/2.0
y1 = yc + Ysize/2.0
self.set_ybound((y0, y1))
#print 'New y0, y1:', y0, y1
#print 'New ysize, ysize/xsize', y1-y0, (y1-y0)/xsize
else:
xc = 0.5*(xmin+xmax)
x0 = xc - Xsize/2.0
x1 = xc + Xsize/2.0
self.set_xbound((x0, x1))
#print 'New x0, x1:', x0, x1
#print 'New xsize, ysize/xsize', x1-x0, ysize/(x1-x0)
def axis(self, *v, **kwargs):
'''
Convenience method for manipulating the x and y view limits
and the aspect ratio of the plot.
*kwargs* are passed on to :meth:`set_xlim` and
:meth:`set_ylim`
'''
if len(v)==1 and is_string_like(v[0]):
s = v[0].lower()
if s=='on': self.set_axis_on()
elif s=='off': self.set_axis_off()
elif s in ('equal', 'tight', 'scaled', 'normal', 'auto', 'image'):
self.set_autoscale_on(True)
self.set_aspect('auto')
self.autoscale_view()
# self.apply_aspect()
if s=='equal':
self.set_aspect('equal', adjustable='datalim')
elif s == 'scaled':
self.set_aspect('equal', adjustable='box', anchor='C')
self.set_autoscale_on(False) # Req. by Mark Bakker
elif s=='tight':
self.autoscale_view(tight=True)
self.set_autoscale_on(False)
elif s == 'image':
self.autoscale_view(tight=True)
self.set_autoscale_on(False)
self.set_aspect('equal', adjustable='box', anchor='C')
else:
raise ValueError('Unrecognized string %s to axis; '
'try on or off' % s)
xmin, xmax = self.get_xlim()
ymin, ymax = self.get_ylim()
return xmin, xmax, ymin, ymax
try: v[0]
except IndexError:
emit = kwargs.get('emit', True)
xmin = kwargs.get('xmin', None)
xmax = kwargs.get('xmax', None)
xmin, xmax = self.set_xlim(xmin, xmax, emit)
ymin = kwargs.get('ymin', None)
ymax = kwargs.get('ymax', None)
ymin, ymax = self.set_ylim(ymin, ymax, emit)
return xmin, xmax, ymin, ymax
v = v[0]
if len(v) != 4:
raise ValueError('v must contain [xmin xmax ymin ymax]')
self.set_xlim([v[0], v[1]])
self.set_ylim([v[2], v[3]])
return v
def get_child_artists(self):
"""
Return a list of artists the axes contains.
.. deprecated:: 0.98
"""
raise DeprecationWarning('Use get_children instead')
def get_frame(self):
'Return the axes Rectangle frame'
warnings.warn('use ax.patch instead', DeprecationWarning)
return self.patch
def get_legend(self):
'Return the legend.Legend instance, or None if no legend is defined'
return self.legend_
def get_images(self):
'return a list of Axes images contained by the Axes'
return cbook.silent_list('AxesImage', self.images)
def get_lines(self):
'Return a list of lines contained by the Axes'
return cbook.silent_list('Line2D', self.lines)
def get_xaxis(self):
'Return the XAxis instance'
return self.xaxis
def get_xgridlines(self):
'Get the x grid lines as a list of Line2D instances'
return cbook.silent_list('Line2D xgridline', self.xaxis.get_gridlines())
def get_xticklines(self):
'Get the xtick lines as a list of Line2D instances'
return cbook.silent_list('Text xtickline', self.xaxis.get_ticklines())
def get_yaxis(self):
'Return the YAxis instance'
return self.yaxis
def get_ygridlines(self):
'Get the y grid lines as a list of Line2D instances'
return cbook.silent_list('Line2D ygridline', self.yaxis.get_gridlines())
def get_yticklines(self):
'Get the ytick lines as a list of Line2D instances'
return cbook.silent_list('Line2D ytickline', self.yaxis.get_ticklines())
#### Adding and tracking artists
def has_data(self):
'''Return *True* if any artists have been added to axes.
This should not be used to determine whether the *dataLim*
need to be updated, and may not actually be useful for
anything.
'''
return (
len(self.collections) +
len(self.images) +
len(self.lines) +
len(self.patches))>0
def add_artist(self, a):
'Add any :class:`~matplotlib.artist.Artist` to the axes'
a.set_axes(self)
self.artists.append(a)
self._set_artist_props(a)
a.set_clip_path(self.patch)
a._remove_method = lambda h: self.artists.remove(h)
def add_collection(self, collection, autolim=True):
'''
add a :class:`~matplotlib.collections.Collection` instance
to the axes
'''
label = collection.get_label()
if not label:
collection.set_label('collection%d'%len(self.collections))
self.collections.append(collection)
self._set_artist_props(collection)
collection.set_clip_path(self.patch)
if autolim:
if collection._paths and len(collection._paths):
self.update_datalim(collection.get_datalim(self.transData))
collection._remove_method = lambda h: self.collections.remove(h)
def add_line(self, line):
'''
Add a :class:`~matplotlib.lines.Line2D` to the list of plot
lines
'''
self._set_artist_props(line)
line.set_clip_path(self.patch)
self._update_line_limits(line)
if not line.get_label():
line.set_label('_line%d'%len(self.lines))
self.lines.append(line)
line._remove_method = lambda h: self.lines.remove(h)
def _update_line_limits(self, line):
p = line.get_path()
if p.vertices.size > 0:
self.dataLim.update_from_path(p, self.ignore_existing_data_limits,
updatex=line.x_isdata,
updatey=line.y_isdata)
self.ignore_existing_data_limits = False
def add_patch(self, p):
"""
Add a :class:`~matplotlib.patches.Patch` *p* to the list of
axes patches; the clipbox will be set to the Axes clipping
box. If the transform is not set, it will be set to
:attr:`transData`.
"""
self._set_artist_props(p)
p.set_clip_path(self.patch)
self._update_patch_limits(p)
self.patches.append(p)
p._remove_method = lambda h: self.patches.remove(h)
def _update_patch_limits(self, patch):
'update the data limits for patch *p*'
# hist can add zero height Rectangles, which is useful to keep
# the bins, counts and patches lined up, but it throws off log
# scaling. We'll ignore rects with zero height or width in
# the auto-scaling
if (isinstance(patch, mpatches.Rectangle) and
(patch.get_width()==0 or patch.get_height()==0)):
return
vertices = patch.get_path().vertices
if vertices.size > 0:
xys = patch.get_patch_transform().transform(vertices)
if patch.get_data_transform() != self.transData:
transform = (patch.get_data_transform() +
self.transData.inverted())
xys = transform.transform(xys)
self.update_datalim(xys, updatex=patch.x_isdata,
updatey=patch.y_isdata)
def add_table(self, tab):
'''
Add a :class:`~matplotlib.tables.Table` instance to the
list of axes tables
'''
self._set_artist_props(tab)
self.tables.append(tab)
tab.set_clip_path(self.patch)
tab._remove_method = lambda h: self.tables.remove(h)
def relim(self):
'recompute the data limits based on current artists'
# Collections are deliberately not supported (yet); see
# the TODO note in artists.py.
self.dataLim.ignore(True)
self.ignore_existing_data_limits = True
for line in self.lines:
self._update_line_limits(line)
for p in self.patches:
self._update_patch_limits(p)
def update_datalim(self, xys, updatex=True, updatey=True):
'Update the data lim bbox with seq of xy tups or equiv. 2-D array'
# if no data is set currently, the bbox will ignore its
# limits and set the bound to be the bounds of the xydata.
# Otherwise, it will compute the bounds of it's current data
# and the data in xydata
if iterable(xys) and not len(xys): return
if not ma.isMaskedArray(xys):
xys = np.asarray(xys)
self.dataLim.update_from_data_xy(xys, self.ignore_existing_data_limits,
updatex=updatex, updatey=updatey)
self.ignore_existing_data_limits = False
def update_datalim_numerix(self, x, y):
'Update the data lim bbox with seq of xy tups'
# if no data is set currently, the bbox will ignore it's
# limits and set the bound to be the bounds of the xydata.
# Otherwise, it will compute the bounds of it's current data
# and the data in xydata
if iterable(x) and not len(x): return
self.dataLim.update_from_data(x, y, self.ignore_existing_data_limits)
self.ignore_existing_data_limits = False
def update_datalim_bounds(self, bounds):
'''
Update the datalim to include the given
:class:`~matplotlib.transforms.Bbox` *bounds*
'''
self.dataLim.set(mtransforms.Bbox.union([self.dataLim, bounds]))
def _process_unit_info(self, xdata=None, ydata=None, kwargs=None):
'look for unit *kwargs* and update the axis instances as necessary'
if self.xaxis is None or self.yaxis is None: return
#print 'processing', self.get_geometry()
if xdata is not None:
# we only need to update if there is nothing set yet.
if not self.xaxis.have_units():
self.xaxis.update_units(xdata)
#print '\tset from xdata', self.xaxis.units
if ydata is not None:
# we only need to update if there is nothing set yet.
if not self.yaxis.have_units():
self.yaxis.update_units(ydata)
#print '\tset from ydata', self.yaxis.units
# process kwargs 2nd since these will override default units
if kwargs is not None:
xunits = kwargs.pop( 'xunits', self.xaxis.units)
if xunits!=self.xaxis.units:
#print '\tkw setting xunits', xunits
self.xaxis.set_units(xunits)
# If the units being set imply a different converter,
# we need to update.
if xdata is not None:
self.xaxis.update_units(xdata)
yunits = kwargs.pop('yunits', self.yaxis.units)
if yunits!=self.yaxis.units:
#print '\tkw setting yunits', yunits
self.yaxis.set_units(yunits)
# If the units being set imply a different converter,
# we need to update.
if ydata is not None:
self.yaxis.update_units(ydata)
def in_axes(self, mouseevent):
'''
return *True* if the given *mouseevent* (in display coords)
is in the Axes
'''
return self.patch.contains(mouseevent)[0]
def get_autoscale_on(self):
"""
Get whether autoscaling is applied on plot commands
"""
return self._autoscaleon
def set_autoscale_on(self, b):
"""
Set whether autoscaling is applied on plot commands
accepts: [ *True* | *False* ]
"""
self._autoscaleon = b
def autoscale_view(self, tight=False, scalex=True, scaley=True):
"""
autoscale the view limits using the data limits. You can
selectively autoscale only a single axis, eg, the xaxis by
setting *scaley* to *False*. The autoscaling preserves any
axis direction reversal that has already been done.
"""
# if image data only just use the datalim
if not self._autoscaleon: return
if scalex:
xshared = self._shared_x_axes.get_siblings(self)
dl = [ax.dataLim for ax in xshared]
bb = mtransforms.BboxBase.union(dl)
x0, x1 = bb.intervalx
if scaley:
yshared = self._shared_y_axes.get_siblings(self)
dl = [ax.dataLim for ax in yshared]
bb = mtransforms.BboxBase.union(dl)
y0, y1 = bb.intervaly
if (tight or (len(self.images)>0 and
len(self.lines)==0 and
len(self.patches)==0)):
if scalex:
self.set_xbound(x0, x1)
if scaley:
self.set_ybound(y0, y1)
return
if scalex:
XL = self.xaxis.get_major_locator().view_limits(x0, x1)
self.set_xbound(XL)
if scaley:
YL = self.yaxis.get_major_locator().view_limits(y0, y1)
self.set_ybound(YL)
#### Drawing
def draw(self, renderer=None, inframe=False):
"Draw everything (plot lines, axes, labels)"
if renderer is None:
renderer = self._cachedRenderer
if renderer is None:
raise RuntimeError('No renderer defined')
if not self.get_visible(): return
renderer.open_group('axes')
self.apply_aspect()
# the patch draws the background rectangle -- the frame below
# will draw the edges
if self.axison and self._frameon:
self.patch.draw(renderer)
artists = []
if len(self.images)<=1 or renderer.option_image_nocomposite():
for im in self.images:
im.draw(renderer)
else:
# make a composite image blending alpha
# list of (mimage.Image, ox, oy)
mag = renderer.get_image_magnification()
ims = [(im.make_image(mag),0,0)
for im in self.images if im.get_visible()]
l, b, r, t = self.bbox.extents
width = mag*((round(r) + 0.5) - (round(l) - 0.5))
height = mag*((round(t) + 0.5) - (round(b) - 0.5))
im = mimage.from_images(height,
width,
ims)
im.is_grayscale = False
l, b, w, h = self.bbox.bounds
# composite images need special args so they will not
# respect z-order for now
renderer.draw_image(
round(l), round(b), im, self.bbox,
self.patch.get_path(),
self.patch.get_transform())
artists.extend(self.collections)
artists.extend(self.patches)
artists.extend(self.lines)
artists.extend(self.texts)
artists.extend(self.artists)
if self.axison and not inframe:
if self._axisbelow:
self.xaxis.set_zorder(0.5)
self.yaxis.set_zorder(0.5)
else:
self.xaxis.set_zorder(2.5)
self.yaxis.set_zorder(2.5)
artists.extend([self.xaxis, self.yaxis])
if not inframe: artists.append(self.title)
artists.extend(self.tables)
if self.legend_ is not None:
artists.append(self.legend_)
# the frame draws the edges around the axes patch -- we
# decouple these so the patch can be in the background and the
# frame in the foreground.
if self.axison and self._frameon:
artists.append(self.frame)
dsu = [ (a.zorder, i, a) for i, a in enumerate(artists)
if not a.get_animated() ]
dsu.sort()
for zorder, i, a in dsu:
a.draw(renderer)
renderer.close_group('axes')
self._cachedRenderer = renderer
def draw_artist(self, a):
"""
This method can only be used after an initial draw which
caches the renderer. It is used to efficiently update Axes
data (axis ticks, labels, etc are not updated)
"""
assert self._cachedRenderer is not None
a.draw(self._cachedRenderer)
def redraw_in_frame(self):
"""
This method can only be used after an initial draw which
caches the renderer. It is used to efficiently update Axes
data (axis ticks, labels, etc are not updated)
"""
assert self._cachedRenderer is not None
self.draw(self._cachedRenderer, inframe=True)
def get_renderer_cache(self):
return self._cachedRenderer
def __draw_animate(self):
# ignore for now; broken
if self._lastRenderer is None:
raise RuntimeError('You must first call ax.draw()')
dsu = [(a.zorder, a) for a in self.animated.keys()]
dsu.sort()
renderer = self._lastRenderer
renderer.blit()
for tmp, a in dsu:
a.draw(renderer)
#### Axes rectangle characteristics
def get_frame_on(self):
"""
Get whether the axes rectangle patch is drawn
"""
return self._frameon
def set_frame_on(self, b):
"""
Set whether the axes rectangle patch is drawn
ACCEPTS: [ *True* | *False* ]
"""
self._frameon = b
def get_axisbelow(self):
"""
Get whether axis below is true or not
"""
return self._axisbelow
def set_axisbelow(self, b):
"""
Set whether the axis ticks and gridlines are above or below most artists
ACCEPTS: [ *True* | *False* ]
"""
self._axisbelow = b
def grid(self, b=None, **kwargs):
"""
call signature::
grid(self, b=None, **kwargs)
Set the axes grids on or off; *b* is a boolean
If *b* is *None* and ``len(kwargs)==0``, toggle the grid state. If
*kwargs* are supplied, it is assumed that you want a grid and *b*
is thus set to *True*
*kawrgs* are used to set the grid line properties, eg::
ax.grid(color='r', linestyle='-', linewidth=2)
Valid :class:`~matplotlib.lines.Line2D` kwargs are
%(Line2D)s
"""
if len(kwargs): b = True
self.xaxis.grid(b, **kwargs)
self.yaxis.grid(b, **kwargs)
grid.__doc__ = cbook.dedent(grid.__doc__) % martist.kwdocd
def ticklabel_format(self, **kwargs):
"""
Convenience method for manipulating the ScalarFormatter
used by default for linear axes.
Optional keyword arguments:
============ =====================================
Keyword Description
============ =====================================
*style* [ 'sci' (or 'scientific') | 'plain' ]
plain turns off scientific notation
*scilimits* (m, n), pair of integers; if *style*
is 'sci', scientific notation will
be used for numbers outside the range
10`-m`:sup: to 10`n`:sup:.
Use (0,0) to include all numbers.
*axis* [ 'x' | 'y' | 'both' ]
============ =====================================
Only the major ticks are affected.
If the method is called when the
:class:`~matplotlib.ticker.ScalarFormatter` is not the
:class:`~matplotlib.ticker.Formatter` being used, an
:exc:`AttributeError` will be raised.
"""
style = kwargs.pop('style', '').lower()
scilimits = kwargs.pop('scilimits', None)
if scilimits is not None:
try:
m, n = scilimits
m+n+1 # check that both are numbers
except (ValueError, TypeError):
raise ValueError("scilimits must be a sequence of 2 integers")
axis = kwargs.pop('axis', 'both').lower()
if style[:3] == 'sci':
sb = True
elif style in ['plain', 'comma']:
sb = False
if style == 'plain':
cb = False
else:
cb = True
raise NotImplementedError, "comma style remains to be added"
elif style == '':
sb = None
else:
raise ValueError, "%s is not a valid style value"
try:
if sb is not None:
if axis == 'both' or axis == 'x':
self.xaxis.major.formatter.set_scientific(sb)
if axis == 'both' or axis == 'y':
self.yaxis.major.formatter.set_scientific(sb)
if scilimits is not None:
if axis == 'both' or axis == 'x':
self.xaxis.major.formatter.set_powerlimits(scilimits)
if axis == 'both' or axis == 'y':
self.yaxis.major.formatter.set_powerlimits(scilimits)
except AttributeError:
raise AttributeError(
"This method only works with the ScalarFormatter.")
def set_axis_off(self):
"""turn off the axis"""
self.axison = False
def set_axis_on(self):
"""turn on the axis"""
self.axison = True
def get_axis_bgcolor(self):
'Return the axis background color'
return self._axisbg
def set_axis_bgcolor(self, color):
"""
set the axes background color
ACCEPTS: any matplotlib color - see
:func:`~matplotlib.pyplot.colors`
"""
self._axisbg = color
self.patch.set_facecolor(color)
### data limits, ticks, tick labels, and formatting
def invert_xaxis(self):
"Invert the x-axis."
left, right = self.get_xlim()
self.set_xlim(right, left)
def xaxis_inverted(self):
'Returns True if the x-axis is inverted.'
left, right = self.get_xlim()
return right < left
def get_xbound(self):
"""
Returns the x-axis numerical bounds where::
lowerBound < upperBound
"""
left, right = self.get_xlim()
if left < right:
return left, right
else:
return right, left
def set_xbound(self, lower=None, upper=None):
"""
Set the lower and upper numerical bounds of the x-axis.
This method will honor axes inversion regardless of parameter order.
"""
if upper is None and iterable(lower):
lower,upper = lower
old_lower,old_upper = self.get_xbound()
if lower is None: lower = old_lower
if upper is None: upper = old_upper
if self.xaxis_inverted():
if lower < upper:
self.set_xlim(upper, lower)
else:
self.set_xlim(lower, upper)
else:
if lower < upper:
self.set_xlim(lower, upper)
else:
self.set_xlim(upper, lower)
def get_xlim(self):
"""
Get the x-axis range [*xmin*, *xmax*]
"""
return tuple(self.viewLim.intervalx)
def set_xlim(self, xmin=None, xmax=None, emit=True, **kwargs):
"""
call signature::
set_xlim(self, *args, **kwargs)
Set the limits for the xaxis
Returns the current xlimits as a length 2 tuple: [*xmin*, *xmax*]
Examples::
set_xlim((valmin, valmax))
set_xlim(valmin, valmax)
set_xlim(xmin=1) # xmax unchanged
set_xlim(xmax=1) # xmin unchanged
Keyword arguments:
*ymin*: scalar
the min of the ylim
*ymax*: scalar
the max of the ylim
*emit*: [ True | False ]
notify observers of lim change
ACCEPTS: len(2) sequence of floats
"""
if xmax is None and iterable(xmin):
xmin,xmax = xmin
self._process_unit_info(xdata=(xmin, xmax))
if xmin is not None:
xmin = self.convert_xunits(xmin)
if xmax is not None:
xmax = self.convert_xunits(xmax)
old_xmin,old_xmax = self.get_xlim()
if xmin is None: xmin = old_xmin
if xmax is None: xmax = old_xmax
xmin, xmax = mtransforms.nonsingular(xmin, xmax, increasing=False)
xmin, xmax = self.xaxis.limit_range_for_scale(xmin, xmax)
self.viewLim.intervalx = (xmin, xmax)
if emit:
self.callbacks.process('xlim_changed', self)
# Call all of the other x-axes that are shared with this one
for other in self._shared_x_axes.get_siblings(self):
if other is not self:
other.set_xlim(self.viewLim.intervalx, emit=False)
if (other.figure != self.figure and
other.figure.canvas is not None):
other.figure.canvas.draw_idle()
return xmin, xmax
def get_xscale(self):
'return the xaxis scale string: %s' % (
", ".join(mscale.get_scale_names()))
return self.xaxis.get_scale()
def set_xscale(self, value, **kwargs):
"""
call signature::
set_xscale(value)
Set the scaling of the x-axis: %(scale)s
ACCEPTS: [%(scale)s]
Different kwargs are accepted, depending on the scale:
%(scale_docs)s
"""
self.xaxis.set_scale(value, **kwargs)
self.autoscale_view()
self._update_transScale()
set_xscale.__doc__ = cbook.dedent(set_xscale.__doc__) % {
'scale': ' | '.join([repr(x) for x in mscale.get_scale_names()]),
'scale_docs': mscale.get_scale_docs().strip()}
def get_xticks(self, minor=False):
'Return the x ticks as a list of locations'
return self.xaxis.get_ticklocs(minor=minor)
def set_xticks(self, ticks, minor=False):
"""
Set the x ticks with list of *ticks*
ACCEPTS: sequence of floats
"""
return self.xaxis.set_ticks(ticks, minor=minor)
def get_xmajorticklabels(self):
'Get the xtick labels as a list of Text instances'
return cbook.silent_list('Text xticklabel',
self.xaxis.get_majorticklabels())
def get_xminorticklabels(self):
'Get the xtick labels as a list of Text instances'
return cbook.silent_list('Text xticklabel',
self.xaxis.get_minorticklabels())
def get_xticklabels(self, minor=False):
'Get the xtick labels as a list of Text instances'
return cbook.silent_list('Text xticklabel',
self.xaxis.get_ticklabels(minor=minor))
def set_xticklabels(self, labels, fontdict=None, minor=False, **kwargs):
"""
call signature::
set_xticklabels(labels, fontdict=None, minor=False, **kwargs)
Set the xtick labels with list of strings *labels*. Return a
list of axis text instances.
*kwargs* set the :class:`~matplotlib.text.Text` properties.
Valid properties are
%(Text)s
ACCEPTS: sequence of strings
"""
return self.xaxis.set_ticklabels(labels, fontdict,
minor=minor, **kwargs)
set_xticklabels.__doc__ = cbook.dedent(
set_xticklabels.__doc__) % martist.kwdocd
def invert_yaxis(self):
"Invert the y-axis."
left, right = self.get_ylim()
self.set_ylim(right, left)
def yaxis_inverted(self):
'Returns True if the y-axis is inverted.'
left, right = self.get_ylim()
return right < left
def get_ybound(self):
"Return y-axis numerical bounds in the form of lowerBound < upperBound"
left, right = self.get_ylim()
if left < right:
return left, right
else:
return right, left
def set_ybound(self, lower=None, upper=None):
"""Set the lower and upper numerical bounds of the y-axis.
This method will honor axes inversion regardless of parameter order.
"""
if upper is None and iterable(lower):
lower,upper = lower
old_lower,old_upper = self.get_ybound()
if lower is None: lower = old_lower
if upper is None: upper = old_upper
if self.yaxis_inverted():
if lower < upper:
self.set_ylim(upper, lower)
else:
self.set_ylim(lower, upper)
else:
if lower < upper:
self.set_ylim(lower, upper)
else:
self.set_ylim(upper, lower)
def get_ylim(self):
"""
Get the y-axis range [*ymin*, *ymax*]
"""
return tuple(self.viewLim.intervaly)
def set_ylim(self, ymin=None, ymax=None, emit=True, **kwargs):
"""
call signature::
set_ylim(self, *args, **kwargs):
Set the limits for the yaxis; v = [ymin, ymax]::
set_ylim((valmin, valmax))
set_ylim(valmin, valmax)
set_ylim(ymin=1) # ymax unchanged
set_ylim(ymax=1) # ymin unchanged
Keyword arguments:
*ymin*: scalar
the min of the ylim
*ymax*: scalar
the max of the ylim
*emit*: [ True | False ]
notify observers of lim change
Returns the current ylimits as a length 2 tuple
ACCEPTS: len(2) sequence of floats
"""
if ymax is None and iterable(ymin):
ymin,ymax = ymin
if ymin is not None:
ymin = self.convert_yunits(ymin)
if ymax is not None:
ymax = self.convert_yunits(ymax)
old_ymin,old_ymax = self.get_ylim()
if ymin is None: ymin = old_ymin
if ymax is None: ymax = old_ymax
ymin, ymax = mtransforms.nonsingular(ymin, ymax, increasing=False)
ymin, ymax = self.yaxis.limit_range_for_scale(ymin, ymax)
self.viewLim.intervaly = (ymin, ymax)
if emit:
self.callbacks.process('ylim_changed', self)
# Call all of the other y-axes that are shared with this one
for other in self._shared_y_axes.get_siblings(self):
if other is not self:
other.set_ylim(self.viewLim.intervaly, emit=False)
if (other.figure != self.figure and
other.figure.canvas is not None):
other.figure.canvas.draw_idle()
return ymin, ymax
def get_yscale(self):
'return the xaxis scale string: %s' % (
", ".join(mscale.get_scale_names()))
return self.yaxis.get_scale()
def set_yscale(self, value, **kwargs):
"""
call signature::
set_yscale(value)
Set the scaling of the y-axis: %(scale)s
ACCEPTS: [%(scale)s]
Different kwargs are accepted, depending on the scale:
%(scale_docs)s
"""
self.yaxis.set_scale(value, **kwargs)
self.autoscale_view()
self._update_transScale()
set_yscale.__doc__ = cbook.dedent(set_yscale.__doc__) % {
'scale': ' | '.join([repr(x) for x in mscale.get_scale_names()]),
'scale_docs': mscale.get_scale_docs().strip()}
def get_yticks(self, minor=False):
'Return the y ticks as a list of locations'
return self.yaxis.get_ticklocs(minor=minor)
def set_yticks(self, ticks, minor=False):
"""
Set the y ticks with list of *ticks*
ACCEPTS: sequence of floats
Keyword arguments:
*minor*: [ False | True ]
Sets the minor ticks if True
"""
return self.yaxis.set_ticks(ticks, minor=minor)
def get_ymajorticklabels(self):
'Get the xtick labels as a list of Text instances'
return cbook.silent_list('Text yticklabel',
self.yaxis.get_majorticklabels())
def get_yminorticklabels(self):
'Get the xtick labels as a list of Text instances'
return cbook.silent_list('Text yticklabel',
self.yaxis.get_minorticklabels())
def get_yticklabels(self, minor=False):
'Get the xtick labels as a list of Text instances'
return cbook.silent_list('Text yticklabel',
self.yaxis.get_ticklabels(minor=minor))
def set_yticklabels(self, labels, fontdict=None, minor=False, **kwargs):
"""
call signature::
set_yticklabels(labels, fontdict=None, minor=False, **kwargs)
Set the ytick labels with list of strings *labels*. Return a list of
:class:`~matplotlib.text.Text` instances.
*kwargs* set :class:`~matplotlib.text.Text` properties for the labels.
Valid properties are
%(Text)s
ACCEPTS: sequence of strings
"""
return self.yaxis.set_ticklabels(labels, fontdict,
minor=minor, **kwargs)
set_yticklabels.__doc__ = cbook.dedent(
set_yticklabels.__doc__) % martist.kwdocd
def xaxis_date(self, tz=None):
"""Sets up x-axis ticks and labels that treat the x data as dates.
*tz* is the time zone to use in labeling dates. Defaults to rc value.
"""
xmin, xmax = self.dataLim.intervalx
if xmin==0.:
# no data has been added - let's set the default datalim.
# We should probably use a better proxy for the datalim
# have been updated than the ignore setting
dmax = today = datetime.date.today()
dmin = today-datetime.timedelta(days=10)
self._process_unit_info(xdata=(dmin, dmax))
dmin, dmax = self.convert_xunits([dmin, dmax])
self.viewLim.intervalx = dmin, dmax
self.dataLim.intervalx = dmin, dmax
locator = self.xaxis.get_major_locator()
if not isinstance(locator, mdates.DateLocator):
locator = mdates.AutoDateLocator(tz)
self.xaxis.set_major_locator(locator)
# the autolocator uses the viewlim to pick the right date
# locator, but it may not have correct viewlim before an
# autoscale. If the viewlim is still zero..1, set it to the
# datalim and the autoscaler will update it on request
if self.viewLim.intervalx[0]==0.:
self.viewLim.intervalx = tuple(self.dataLim.intervalx)
locator.refresh()
formatter = self.xaxis.get_major_formatter()
if not isinstance(formatter, mdates.DateFormatter):
formatter = mdates.AutoDateFormatter(locator, tz)
self.xaxis.set_major_formatter(formatter)
def yaxis_date(self, tz=None):
"""Sets up y-axis ticks and labels that treat the y data as dates.
*tz* is the time zone to use in labeling dates. Defaults to rc value.
"""
ymin, ymax = self.dataLim.intervaly
if ymin==0.:
# no data has been added - let's set the default datalim.
# We should probably use a better proxy for the datalim
# have been updated than the ignore setting
dmax = today = datetime.date.today()
dmin = today-datetime.timedelta(days=10)
self._process_unit_info(ydata=(dmin, dmax))
dmin, dmax = self.convert_yunits([dmin, dmax])
self.viewLim.intervaly = dmin, dmax
self.dataLim.intervaly = dmin, dmax
locator = self.yaxis.get_major_locator()
if not isinstance(locator, mdates.DateLocator):
locator = mdates.AutoDateLocator(tz)
self.yaxis.set_major_locator(locator)
# the autolocator uses the viewlim to pick the right date
# locator, but it may not have correct viewlim before an
# autoscale. If the viewlim is still zero..1, set it to the
# datalim and the autoscaler will update it on request
if self.viewLim.intervaly[0]==0.:
self.viewLim.intervaly = tuple(self.dataLim.intervaly)
locator.refresh()
formatter = self.xaxis.get_major_formatter()
if not isinstance(formatter, mdates.DateFormatter):
formatter = mdates.AutoDateFormatter(locator, tz)
self.yaxis.set_major_formatter(formatter)
def format_xdata(self, x):
"""
Return *x* string formatted. This function will use the attribute
self.fmt_xdata if it is callable, else will fall back on the xaxis
major formatter
"""
try: return self.fmt_xdata(x)
except TypeError:
func = self.xaxis.get_major_formatter().format_data_short
val = func(x)
return val
def format_ydata(self, y):
"""
Return y string formatted. This function will use the
:attr:`fmt_ydata` attribute if it is callable, else will fall
back on the yaxis major formatter
"""
try: return self.fmt_ydata(y)
except TypeError:
func = self.yaxis.get_major_formatter().format_data_short
val = func(y)
return val
def format_coord(self, x, y):
'return a format string formatting the *x*, *y* coord'
if x is None:
x = '???'
if y is None:
y = '???'
xs = self.format_xdata(x)
ys = self.format_ydata(y)
return 'x=%s, y=%s'%(xs,ys)
#### Interactive manipulation
def can_zoom(self):
"""
Return *True* if this axes support the zoom box
"""
return True
def get_navigate(self):
"""
Get whether the axes responds to navigation commands
"""
return self._navigate
def set_navigate(self, b):
"""
Set whether the axes responds to navigation toolbar commands
ACCEPTS: [ True | False ]
"""
self._navigate = b
def get_navigate_mode(self):
"""
Get the navigation toolbar button status: 'PAN', 'ZOOM', or None
"""
return self._navigate_mode
def set_navigate_mode(self, b):
"""
Set the navigation toolbar button status;
.. warning::
this is not a user-API function.
"""
self._navigate_mode = b
def start_pan(self, x, y, button):
"""
Called when a pan operation has started.
*x*, *y* are the mouse coordinates in display coords.
button is the mouse button number:
* 1: LEFT
* 2: MIDDLE
* 3: RIGHT
.. note::
Intended to be overridden by new projection types.
"""
self._pan_start = cbook.Bunch(
lim = self.viewLim.frozen(),
trans = self.transData.frozen(),
trans_inverse = self.transData.inverted().frozen(),
bbox = self.bbox.frozen(),
x = x,
y = y
)
def end_pan(self):
"""
Called when a pan operation completes (when the mouse button
is up.)
.. note::
Intended to be overridden by new projection types.
"""
del self._pan_start
def drag_pan(self, button, key, x, y):
"""
Called when the mouse moves during a pan operation.
*button* is the mouse button number:
* 1: LEFT
* 2: MIDDLE
* 3: RIGHT
*key* is a "shift" key
*x*, *y* are the mouse coordinates in display coords.
.. note::
Intended to be overridden by new projection types.
"""
def format_deltas(key, dx, dy):
if key=='control':
if(abs(dx)>abs(dy)):
dy = dx
else:
dx = dy
elif key=='x':
dy = 0
elif key=='y':
dx = 0
elif key=='shift':
if 2*abs(dx) < abs(dy):
dx=0
elif 2*abs(dy) < abs(dx):
dy=0
elif(abs(dx)>abs(dy)):
dy=dy/abs(dy)*abs(dx)
else:
dx=dx/abs(dx)*abs(dy)
return (dx,dy)
p = self._pan_start
dx = x - p.x
dy = y - p.y
if dx == 0 and dy == 0:
return
if button == 1:
dx, dy = format_deltas(key, dx, dy)
result = p.bbox.translated(-dx, -dy) \
.transformed(p.trans_inverse)
elif button == 3:
try:
dx = -dx / float(self.bbox.width)
dy = -dy / float(self.bbox.height)
dx, dy = format_deltas(key, dx, dy)
if self.get_aspect() != 'auto':
dx = 0.5 * (dx + dy)
dy = dx
alpha = np.power(10.0, (dx, dy))
start = p.trans_inverse.transform_point((p.x, p.y))
lim_points = p.lim.get_points()
result = start + alpha * (lim_points - start)
result = mtransforms.Bbox(result)
except OverflowError:
warnings.warn('Overflow while panning')
return
self.set_xlim(*result.intervalx)
self.set_ylim(*result.intervaly)
def get_cursor_props(self):
"""
return the cursor propertiess as a (*linewidth*, *color*)
tuple, where *linewidth* is a float and *color* is an RGBA
tuple
"""
return self._cursorProps
def set_cursor_props(self, *args):
"""
Set the cursor property as::
ax.set_cursor_props(linewidth, color)
or::
ax.set_cursor_props((linewidth, color))
ACCEPTS: a (*float*, *color*) tuple
"""
if len(args)==1:
lw, c = args[0]
elif len(args)==2:
lw, c = args
else:
raise ValueError('args must be a (linewidth, color) tuple')
c =mcolors.colorConverter.to_rgba(c)
self._cursorProps = lw, c
def connect(self, s, func):
"""
Register observers to be notified when certain events occur. Register
with callback functions with the following signatures. The function
has the following signature::
func(ax) # where ax is the instance making the callback.
The following events can be connected to:
'xlim_changed','ylim_changed'
The connection id is is returned - you can use this with
disconnect to disconnect from the axes event
"""
raise DeprecationWarning('use the callbacks CallbackRegistry instance '
'instead')
def disconnect(self, cid):
'disconnect from the Axes event.'
raise DeprecationWarning('use the callbacks CallbackRegistry instance '
'instead')
def get_children(self):
'return a list of child artists'
children = []
children.append(self.xaxis)
children.append(self.yaxis)
children.extend(self.lines)
children.extend(self.patches)
children.extend(self.texts)
children.extend(self.tables)
children.extend(self.artists)
children.extend(self.images)
if self.legend_ is not None:
children.append(self.legend_)
children.extend(self.collections)
children.append(self.title)
children.append(self.patch)
children.append(self.frame)
return children
def contains(self,mouseevent):
"""Test whether the mouse event occured in the axes.
Returns T/F, {}
"""
if callable(self._contains): return self._contains(self,mouseevent)
return self.patch.contains(mouseevent)
def pick(self, *args):
"""
call signature::
pick(mouseevent)
each child artist will fire a pick event if mouseevent is over
the artist and the artist has picker set
"""
if len(args)>1:
raise DeprecationWarning('New pick API implemented -- '
'see API_CHANGES in the src distribution')
martist.Artist.pick(self,args[0])
def __pick(self, x, y, trans=None, among=None):
"""
Return the artist under point that is closest to the *x*, *y*.
If *trans* is *None*, *x*, and *y* are in window coords,
(0,0 = lower left). Otherwise, *trans* is a
:class:`~matplotlib.transforms.Transform` that specifies the
coordinate system of *x*, *y*.
The selection of artists from amongst which the pick function
finds an artist can be narrowed using the optional keyword
argument *among*. If provided, this should be either a sequence
of permitted artists or a function taking an artist as its
argument and returning a true value if and only if that artist
can be selected.
Note this algorithm calculates distance to the vertices of the
polygon, so if you want to pick a patch, click on the edge!
"""
# MGDTODO: Needs updating
if trans is not None:
xywin = trans.transform_point((x,y))
else:
xywin = x,y
def dist_points(p1, p2):
'return the distance between two points'
x1, y1 = p1
x2, y2 = p2
return math.sqrt((x1-x2)**2+(y1-y2)**2)
def dist_x_y(p1, x, y):
'*x* and *y* are arrays; return the distance to the closest point'
x1, y1 = p1
return min(np.sqrt((x-x1)**2+(y-y1)**2))
def dist(a):
if isinstance(a, Text):
bbox = a.get_window_extent()
l,b,w,h = bbox.bounds
verts = (l,b), (l,b+h), (l+w,b+h), (l+w, b)
xt, yt = zip(*verts)
elif isinstance(a, Patch):
path = a.get_path()
tverts = a.get_transform().transform_path(path)
xt, yt = zip(*tverts)
elif isinstance(a, mlines.Line2D):
xdata = a.get_xdata(orig=False)
ydata = a.get_ydata(orig=False)
xt, yt = a.get_transform().numerix_x_y(xdata, ydata)
return dist_x_y(xywin, np.asarray(xt), np.asarray(yt))
artists = self.lines + self.patches + self.texts
if callable(among):
artists = filter(test, artists)
elif iterable(among):
amongd = dict([(k,1) for k in among])
artists = [a for a in artists if a in amongd]
elif among is None:
pass
else:
raise ValueError('among must be callable or iterable')
if not len(artists): return None
ds = [ (dist(a),a) for a in artists]
ds.sort()
return ds[0][1]
#### Labelling
def get_title(self):
"""
Get the title text string.
"""
return self.title.get_text()
def set_title(self, label, fontdict=None, **kwargs):
"""
call signature::
set_title(label, fontdict=None, **kwargs):
Set the title for the axes.
kwargs are Text properties:
%(Text)s
ACCEPTS: str
.. seealso::
:meth:`text`:
for information on how override and the optional args work
"""
default = {
'fontsize':rcParams['axes.titlesize'],
'verticalalignment' : 'bottom',
'horizontalalignment' : 'center'
}
self.title.set_text(label)
self.title.update(default)
if fontdict is not None: self.title.update(fontdict)
self.title.update(kwargs)
return self.title
set_title.__doc__ = cbook.dedent(set_title.__doc__) % martist.kwdocd
def get_xlabel(self):
"""
Get the xlabel text string.
"""
label = self.xaxis.get_label()
return label.get_text()
def set_xlabel(self, xlabel, fontdict=None, **kwargs):
"""
call signature::
set_xlabel(xlabel, fontdict=None, **kwargs)
Set the label for the xaxis.
Valid kwargs are Text properties:
%(Text)s
ACCEPTS: str
.. seealso::
:meth:`text`:
for information on how override and the optional args work
"""
label = self.xaxis.get_label()
label.set_text(xlabel)
if fontdict is not None: label.update(fontdict)
label.update(kwargs)
return label
set_xlabel.__doc__ = cbook.dedent(set_xlabel.__doc__) % martist.kwdocd
def get_ylabel(self):
"""
Get the ylabel text string.
"""
label = self.yaxis.get_label()
return label.get_text()
def set_ylabel(self, ylabel, fontdict=None, **kwargs):
"""
call signature::
set_ylabel(ylabel, fontdict=None, **kwargs)
Set the label for the yaxis
Valid kwargs are Text properties:
%(Text)s
ACCEPTS: str
.. seealso::
:meth:`text`:
for information on how override and the optional args work
"""
label = self.yaxis.get_label()
label.set_text(ylabel)
if fontdict is not None: label.update(fontdict)
label.update(kwargs)
return label
set_ylabel.__doc__ = cbook.dedent(set_ylabel.__doc__) % martist.kwdocd
def text(self, x, y, s, fontdict=None,
withdash=False, **kwargs):
"""
call signature::
text(x, y, s, fontdict=None, **kwargs)
Add text in string *s* to axis at location *x*, *y*, data
coordinates.
Keyword arguments:
*fontdict*:
A dictionary to override the default text properties.
If *fontdict* is *None*, the defaults are determined by your rc
parameters.
*withdash*: [ False | True ]
Creates a :class:`~matplotlib.text.TextWithDash` instance
instead of a :class:`~matplotlib.text.Text` instance.
Individual keyword arguments can be used to override any given
parameter::
text(x, y, s, fontsize=12)
The default transform specifies that text is in data coords,
alternatively, you can specify text in axis coords (0,0 is
lower-left and 1,1 is upper-right). The example below places
text in the center of the axes::
text(0.5, 0.5,'matplotlib',
horizontalalignment='center',
verticalalignment='center',
transform = ax.transAxes)
You can put a rectangular box around the text instance (eg. to
set a background color) by using the keyword *bbox*. *bbox* is
a dictionary of :class:`matplotlib.patches.Rectangle`
properties. For example::
text(x, y, s, bbox=dict(facecolor='red', alpha=0.5))
Valid kwargs are :class:`matplotlib.text.Text` properties:
%(Text)s
"""
default = {
'verticalalignment' : 'bottom',
'horizontalalignment' : 'left',
#'verticalalignment' : 'top',
'transform' : self.transData,
}
# At some point if we feel confident that TextWithDash
# is robust as a drop-in replacement for Text and that
# the performance impact of the heavier-weight class
# isn't too significant, it may make sense to eliminate
# the withdash kwarg and simply delegate whether there's
# a dash to TextWithDash and dashlength.
if withdash:
t = mtext.TextWithDash(
x=x, y=y, text=s,
)
else:
t = mtext.Text(
x=x, y=y, text=s,
)
self._set_artist_props(t)
t.update(default)
if fontdict is not None: t.update(fontdict)
t.update(kwargs)
self.texts.append(t)
t._remove_method = lambda h: self.texts.remove(h)
#if t.get_clip_on(): t.set_clip_box(self.bbox)
if 'clip_on' in kwargs: t.set_clip_box(self.bbox)
return t
text.__doc__ = cbook.dedent(text.__doc__) % martist.kwdocd
def annotate(self, *args, **kwargs):
"""
call signature::
annotate(s, xy, xytext=None, xycoords='data',
textcoords='data', arrowprops=None, **kwargs)
Keyword arguments:
%(Annotation)s
.. plot:: mpl_examples/pylab_examples/annotation_demo2.py
"""
a = mtext.Annotation(*args, **kwargs)
a.set_transform(mtransforms.IdentityTransform())
self._set_artist_props(a)
if kwargs.has_key('clip_on'): a.set_clip_path(self.patch)
self.texts.append(a)
return a
annotate.__doc__ = cbook.dedent(annotate.__doc__) % martist.kwdocd
#### Lines and spans
def axhline(self, y=0, xmin=0, xmax=1, **kwargs):
"""
call signature::
axhline(y=0, xmin=0, xmax=1, **kwargs)
Axis Horizontal Line
Draw a horizontal line at *y* from *xmin* to *xmax*. With the
default values of *xmin* = 0 and *xmax* = 1, this line will
always span the horizontal extent of the axes, regardless of
the xlim settings, even if you change them, eg. with the
:meth:`set_xlim` command. That is, the horizontal extent is
in axes coords: 0=left, 0.5=middle, 1.0=right but the *y*
location is in data coordinates.
Return value is the :class:`~matplotlib.lines.Line2D`
instance. kwargs are the same as kwargs to plot, and can be
used to control the line properties. Eg.,
* draw a thick red hline at *y* = 0 that spans the xrange
>>> axhline(linewidth=4, color='r')
* draw a default hline at *y* = 1 that spans the xrange
>>> axhline(y=1)
* draw a default hline at *y* = .5 that spans the the middle half of
the xrange
>>> axhline(y=.5, xmin=0.25, xmax=0.75)
Valid kwargs are :class:`~matplotlib.lines.Line2D` properties:
%(Line2D)s
.. seealso::
:meth:`axhspan`:
for example plot and source code
"""
ymin, ymax = self.get_ybound()
# We need to strip away the units for comparison with
# non-unitized bounds
yy = self.convert_yunits( y )
scaley = (yy<ymin) or (yy>ymax)
trans = mtransforms.blended_transform_factory(
self.transAxes, self.transData)
l = mlines.Line2D([xmin,xmax], [y,y], transform=trans, **kwargs)
l.x_isdata = False
self.add_line(l)
self.autoscale_view(scalex=False, scaley=scaley)
return l
axhline.__doc__ = cbook.dedent(axhline.__doc__) % martist.kwdocd
def axvline(self, x=0, ymin=0, ymax=1, **kwargs):
"""
call signature::
axvline(x=0, ymin=0, ymax=1, **kwargs)
Axis Vertical Line
Draw a vertical line at *x* from *ymin* to *ymax*. With the
default values of *ymin* = 0 and *ymax* = 1, this line will
always span the vertical extent of the axes, regardless of the
xlim settings, even if you change them, eg. with the
:meth:`set_xlim` command. That is, the vertical extent is in
axes coords: 0=bottom, 0.5=middle, 1.0=top but the *x* location
is in data coordinates.
Return value is the :class:`~matplotlib.lines.Line2D`
instance. kwargs are the same as kwargs to plot, and can be
used to control the line properties. Eg.,
* draw a thick red vline at *x* = 0 that spans the yrange
>>> axvline(linewidth=4, color='r')
* draw a default vline at *x* = 1 that spans the yrange
>>> axvline(x=1)
* draw a default vline at *x* = .5 that spans the the middle half of
the yrange
>>> axvline(x=.5, ymin=0.25, ymax=0.75)
Valid kwargs are :class:`~matplotlib.lines.Line2D` properties:
%(Line2D)s
.. seealso::
:meth:`axhspan`:
for example plot and source code
"""
xmin, xmax = self.get_xbound()
# We need to strip away the units for comparison with
# non-unitized bounds
xx = self.convert_xunits( x )
scalex = (xx<xmin) or (xx>xmax)
trans = mtransforms.blended_transform_factory(
self.transData, self.transAxes)
l = mlines.Line2D([x,x], [ymin,ymax] , transform=trans, **kwargs)
l.y_isdata = False
self.add_line(l)
self.autoscale_view(scalex=scalex, scaley=False)
return l
axvline.__doc__ = cbook.dedent(axvline.__doc__) % martist.kwdocd
def axhspan(self, ymin, ymax, xmin=0, xmax=1, **kwargs):
"""
call signature::
axhspan(ymin, ymax, xmin=0, xmax=1, **kwargs)
Axis Horizontal Span.
*y* coords are in data units and *x* coords are in axes (relative
0-1) units.
Draw a horizontal span (rectangle) from *ymin* to *ymax*.
With the default values of *xmin* = 0 and *xmax* = 1, this
always spans the xrange, regardless of the xlim settings, even
if you change them, eg. with the :meth:`set_xlim` command.
That is, the horizontal extent is in axes coords: 0=left,
0.5=middle, 1.0=right but the *y* location is in data
coordinates.
Return value is a :class:`matplotlib.patches.Polygon`
instance.
Examples:
* draw a gray rectangle from *y* = 0.25-0.75 that spans the
horizontal extent of the axes
>>> axhspan(0.25, 0.75, facecolor='0.5', alpha=0.5)
Valid kwargs are :class:`~matplotlib.patches.Polygon` properties:
%(Polygon)s
**Example:**
.. plot:: mpl_examples/pylab_examples/axhspan_demo.py
"""
trans = mtransforms.blended_transform_factory(
self.transAxes, self.transData)
# process the unit information
self._process_unit_info( [xmin, xmax], [ymin, ymax], kwargs=kwargs )
# first we need to strip away the units
xmin, xmax = self.convert_xunits( [xmin, xmax] )
ymin, ymax = self.convert_yunits( [ymin, ymax] )
verts = (xmin, ymin), (xmin, ymax), (xmax, ymax), (xmax, ymin)
p = mpatches.Polygon(verts, **kwargs)
p.set_transform(trans)
p.x_isdata = False
self.add_patch(p)
return p
axhspan.__doc__ = cbook.dedent(axhspan.__doc__) % martist.kwdocd
def axvspan(self, xmin, xmax, ymin=0, ymax=1, **kwargs):
"""
call signature::
axvspan(xmin, xmax, ymin=0, ymax=1, **kwargs)
Axis Vertical Span.
*x* coords are in data units and *y* coords are in axes (relative
0-1) units.
Draw a vertical span (rectangle) from *xmin* to *xmax*. With
the default values of *ymin* = 0 and *ymax* = 1, this always
spans the yrange, regardless of the ylim settings, even if you
change them, eg. with the :meth:`set_ylim` command. That is,
the vertical extent is in axes coords: 0=bottom, 0.5=middle,
1.0=top but the *y* location is in data coordinates.
Return value is the :class:`matplotlib.patches.Polygon`
instance.
Examples:
* draw a vertical green translucent rectangle from x=1.25 to 1.55 that
spans the yrange of the axes
>>> axvspan(1.25, 1.55, facecolor='g', alpha=0.5)
Valid kwargs are :class:`~matplotlib.patches.Polygon`
properties:
%(Polygon)s
.. seealso::
:meth:`axhspan`:
for example plot and source code
"""
trans = mtransforms.blended_transform_factory(
self.transData, self.transAxes)
# process the unit information
self._process_unit_info( [xmin, xmax], [ymin, ymax], kwargs=kwargs )
# first we need to strip away the units
xmin, xmax = self.convert_xunits( [xmin, xmax] )
ymin, ymax = self.convert_yunits( [ymin, ymax] )
verts = [(xmin, ymin), (xmin, ymax), (xmax, ymax), (xmax, ymin)]
p = mpatches.Polygon(verts, **kwargs)
p.set_transform(trans)
p.y_isdata = False
self.add_patch(p)
return p
axvspan.__doc__ = cbook.dedent(axvspan.__doc__) % martist.kwdocd
def hlines(self, y, xmin, xmax, colors='k', linestyles='solid',
label='', **kwargs):
"""
call signature::
hlines(y, xmin, xmax, colors='k', linestyles='solid', **kwargs)
Plot horizontal lines at each *y* from *xmin* to *xmax*.
Returns the :class:`~matplotlib.collections.LineCollection`
that was added.
Required arguments:
*y*:
a 1-D numpy array or iterable.
*xmin* and *xmax*:
can be scalars or ``len(x)`` numpy arrays. If they are
scalars, then the respective values are constant, else the
widths of the lines are determined by *xmin* and *xmax*.
Optional keyword arguments:
*colors*:
a line collections color argument, either a single color
or a ``len(y)`` list of colors
*linestyles*:
[ 'solid' | 'dashed' | 'dashdot' | 'dotted' ]
**Example:**
.. plot:: mpl_examples/pylab_examples/hline_demo.py
"""
if kwargs.get('fmt') is not None:
raise DeprecationWarning('hlines now uses a '
'collections.LineCollection and not a '
'list of Line2D to draw; see API_CHANGES')
# We do the conversion first since not all unitized data is uniform
y = self.convert_yunits( y )
xmin = self.convert_xunits( xmin )
xmax = self.convert_xunits( xmax )
if not iterable(y): y = [y]
if not iterable(xmin): xmin = [xmin]
if not iterable(xmax): xmax = [xmax]
y = np.asarray(y)
xmin = np.asarray(xmin)
xmax = np.asarray(xmax)
if len(xmin)==1:
xmin = np.resize( xmin, y.shape )
if len(xmax)==1:
xmax = np.resize( xmax, y.shape )
if len(xmin)!=len(y):
raise ValueError, 'xmin and y are unequal sized sequences'
if len(xmax)!=len(y):
raise ValueError, 'xmax and y are unequal sized sequences'
verts = [ ((thisxmin, thisy), (thisxmax, thisy))
for thisxmin, thisxmax, thisy in zip(xmin, xmax, y)]
coll = mcoll.LineCollection(verts, colors=colors,
linestyles=linestyles, label=label)
self.add_collection(coll)
coll.update(kwargs)
minx = min(xmin.min(), xmax.min())
maxx = max(xmin.max(), xmax.max())
miny = y.min()
maxy = y.max()
corners = (minx, miny), (maxx, maxy)
self.update_datalim(corners)
self.autoscale_view()
return coll
hlines.__doc__ = cbook.dedent(hlines.__doc__)
def vlines(self, x, ymin, ymax, colors='k', linestyles='solid',
label='', **kwargs):
"""
call signature::
vlines(x, ymin, ymax, color='k', linestyles='solid')
Plot vertical lines at each *x* from *ymin* to *ymax*. *ymin*
or *ymax* can be scalars or len(*x*) numpy arrays. If they are
scalars, then the respective values are constant, else the
heights of the lines are determined by *ymin* and *ymax*.
*colors*
a line collections color args, either a single color
or a len(*x*) list of colors
*linestyles*
one of [ 'solid' | 'dashed' | 'dashdot' | 'dotted' ]
Returns the :class:`matplotlib.collections.LineCollection`
that was added.
kwargs are :class:`~matplotlib.collections.LineCollection` properties:
%(LineCollection)s
"""
if kwargs.get('fmt') is not None:
raise DeprecationWarning('vlines now uses a '
'collections.LineCollection and not a '
'list of Line2D to draw; see API_CHANGES')
self._process_unit_info(xdata=x, ydata=ymin, kwargs=kwargs)
# We do the conversion first since not all unitized data is uniform
x = self.convert_xunits( x )
ymin = self.convert_yunits( ymin )
ymax = self.convert_yunits( ymax )
if not iterable(x): x = [x]
if not iterable(ymin): ymin = [ymin]
if not iterable(ymax): ymax = [ymax]
x = np.asarray(x)
ymin = np.asarray(ymin)
ymax = np.asarray(ymax)
if len(ymin)==1:
ymin = np.resize( ymin, x.shape )
if len(ymax)==1:
ymax = np.resize( ymax, x.shape )
if len(ymin)!=len(x):
raise ValueError, 'ymin and x are unequal sized sequences'
if len(ymax)!=len(x):
raise ValueError, 'ymax and x are unequal sized sequences'
Y = np.array([ymin, ymax]).T
verts = [ ((thisx, thisymin), (thisx, thisymax))
for thisx, (thisymin, thisymax) in zip(x,Y)]
#print 'creating line collection'
coll = mcoll.LineCollection(verts, colors=colors,
linestyles=linestyles, label=label)
self.add_collection(coll)
coll.update(kwargs)
minx = min( x )
maxx = max( x )
miny = min( min(ymin), min(ymax) )
maxy = max( max(ymin), max(ymax) )
corners = (minx, miny), (maxx, maxy)
self.update_datalim(corners)
self.autoscale_view()
return coll
vlines.__doc__ = cbook.dedent(vlines.__doc__) % martist.kwdocd
#### Basic plotting
def plot(self, *args, **kwargs):
"""
Plot lines and/or markers to the
:class:`~matplotlib.axes.Axes`. *args* is a variable length
argument, allowing for multiple *x*, *y* pairs with an
optional format string. For example, each of the following is
legal::
plot(x, y) # plot x and y using default line style and color
plot(x, y, 'bo') # plot x and y using blue circle markers
plot(y) # plot y using x as index array 0..N-1
plot(y, 'r+') # ditto, but with red plusses
If *x* and/or *y* is 2-dimensional, then the corresponding columns
will be plotted.
An arbitrary number of *x*, *y*, *fmt* groups can be
specified, as in::
a.plot(x1, y1, 'g^', x2, y2, 'g-')
Return value is a list of lines that were added.
The following format string characters are accepted to control
the line style or marker:
================ ===============================
character description
================ ===============================
'-' solid line style
'--' dashed line style
'-.' dash-dot line style
':' dotted line style
'.' point marker
',' pixel marker
'o' circle marker
'v' triangle_down marker
'^' triangle_up marker
'<' triangle_left marker
'>' triangle_right marker
'1' tri_down marker
'2' tri_up marker
'3' tri_left marker
'4' tri_right marker
's' square marker
'p' pentagon marker
'*' star marker
'h' hexagon1 marker
'H' hexagon2 marker
'+' plus marker
'x' x marker
'D' diamond marker
'd' thin_diamond marker
'|' vline marker
'_' hline marker
================ ===============================
The following color abbreviations are supported:
========== ========
character color
========== ========
'b' blue
'g' green
'r' red
'c' cyan
'm' magenta
'y' yellow
'k' black
'w' white
========== ========
In addition, you can specify colors in many weird and
wonderful ways, including full names (``'green'``), hex
strings (``'#008000'``), RGB or RGBA tuples (``(0,1,0,1)``) or
grayscale intensities as a string (``'0.8'``). Of these, the
string specifications can be used in place of a ``fmt`` group,
but the tuple forms can be used only as ``kwargs``.
Line styles and colors are combined in a single format string, as in
``'bo'`` for blue circles.
The *kwargs* can be used to set line properties (any property that has
a ``set_*`` method). You can use this to set a line label (for auto
legends), linewidth, anitialising, marker face color, etc. Here is an
example::
plot([1,2,3], [1,2,3], 'go-', label='line 1', linewidth=2)
plot([1,2,3], [1,4,9], 'rs', label='line 2')
axis([0, 4, 0, 10])
legend()
If you make multiple lines with one plot command, the kwargs
apply to all those lines, e.g.::
plot(x1, y1, x2, y2, antialised=False)
Neither line will be antialiased.
You do not need to use format strings, which are just
abbreviations. All of the line properties can be controlled
by keyword arguments. For example, you can set the color,
marker, linestyle, and markercolor with::
plot(x, y, color='green', linestyle='dashed', marker='o',
markerfacecolor='blue', markersize=12). See
:class:`~matplotlib.lines.Line2D` for details.
The kwargs are :class:`~matplotlib.lines.Line2D` properties:
%(Line2D)s
kwargs *scalex* and *scaley*, if defined, are passed on to
:meth:`~matplotlib.axes.Axes.autoscale_view` to determine
whether the *x* and *y* axes are autoscaled; the default is
*True*.
"""
scalex = kwargs.pop( 'scalex', True)
scaley = kwargs.pop( 'scaley', True)
if not self._hold: self.cla()
lines = []
for line in self._get_lines(*args, **kwargs):
self.add_line(line)
lines.append(line)
self.autoscale_view(scalex=scalex, scaley=scaley)
return lines
plot.__doc__ = cbook.dedent(plot.__doc__) % martist.kwdocd
def plot_date(self, x, y, fmt='bo', tz=None, xdate=True, ydate=False,
**kwargs):
"""
call signature::
plot_date(x, y, fmt='bo', tz=None, xdate=True, ydate=False, **kwargs)
Similar to the :func:`~matplotlib.pyplot.plot` command, except
the *x* or *y* (or both) data is considered to be dates, and the
axis is labeled accordingly.
*x* and/or *y* can be a sequence of dates represented as float
days since 0001-01-01 UTC.
Keyword arguments:
*fmt*: string
The plot format string.
*tz*: [ None | timezone string ]
The time zone to use in labeling dates. If *None*, defaults to rc
value.
*xdate*: [ True | False ]
If *True*, the *x*-axis will be labeled with dates.
*ydate*: [ False | True ]
If *True*, the *y*-axis will be labeled with dates.
Note if you are using custom date tickers and formatters, it
may be necessary to set the formatters/locators after the call
to :meth:`plot_date` since :meth:`plot_date` will set the
default tick locator to
:class:`matplotlib.ticker.AutoDateLocator` (if the tick
locator is not already set to a
:class:`matplotlib.ticker.DateLocator` instance) and the
default tick formatter to
:class:`matplotlib.ticker.AutoDateFormatter` (if the tick
formatter is not already set to a
:class:`matplotlib.ticker.DateFormatter` instance).
Valid kwargs are :class:`~matplotlib.lines.Line2D` properties:
%(Line2D)s
.. seealso::
:mod:`~matplotlib.dates`:
for helper functions
:func:`~matplotlib.dates.date2num`,
:func:`~matplotlib.dates.num2date` and
:func:`~matplotlib.dates.drange`:
for help on creating the required floating point
dates.
"""
if not self._hold: self.cla()
ret = self.plot(x, y, fmt, **kwargs)
if xdate:
self.xaxis_date(tz)
if ydate:
self.yaxis_date(tz)
self.autoscale_view()
return ret
plot_date.__doc__ = cbook.dedent(plot_date.__doc__) % martist.kwdocd
def loglog(self, *args, **kwargs):
"""
call signature::
loglog(*args, **kwargs)
Make a plot with log scaling on the *x* and *y* axis.
:func:`~matplotlib.pyplot.loglog` supports all the keyword
arguments of :func:`~matplotlib.pyplot.plot` and
:meth:`matplotlib.axes.Axes.set_xscale` /
:meth:`matplotlib.axes.Axes.set_yscale`.
Notable keyword arguments:
*basex*/*basey*: scalar > 1
base of the *x*/*y* logarithm
*subsx*/*subsy*: [ None | sequence ]
the location of the minor *x*/*y* ticks; *None* defaults
to autosubs, which depend on the number of decades in the
plot; see :meth:`matplotlib.axes.Axes.set_xscale` /
:meth:`matplotlib.axes.Axes.set_yscale` for details
The remaining valid kwargs are
:class:`~matplotlib.lines.Line2D` properties:
%(Line2D)s
**Example:**
.. plot:: mpl_examples/pylab_examples/log_demo.py
"""
if not self._hold: self.cla()
dx = {'basex': kwargs.pop('basex', 10),
'subsx': kwargs.pop('subsx', None),
}
dy = {'basey': kwargs.pop('basey', 10),
'subsy': kwargs.pop('subsy', None),
}
self.set_xscale('log', **dx)
self.set_yscale('log', **dy)
b = self._hold
self._hold = True # we've already processed the hold
l = self.plot(*args, **kwargs)
self._hold = b # restore the hold
return l
loglog.__doc__ = cbook.dedent(loglog.__doc__) % martist.kwdocd
def semilogx(self, *args, **kwargs):
"""
call signature::
semilogx(*args, **kwargs)
Make a plot with log scaling on the *x* axis.
:func:`semilogx` supports all the keyword arguments of
:func:`~matplotlib.pyplot.plot` and
:meth:`matplotlib.axes.Axes.set_xscale`.
Notable keyword arguments:
*basex*: scalar > 1
base of the *x* logarithm
*subsx*: [ None | sequence ]
The location of the minor xticks; *None* defaults to
autosubs, which depend on the number of decades in the
plot; see :meth:`~matplotlib.axes.Axes.set_xscale` for
details.
The remaining valid kwargs are
:class:`~matplotlib.lines.Line2D` properties:
%(Line2D)s
.. seealso::
:meth:`loglog`:
For example code and figure
"""
if not self._hold: self.cla()
d = {'basex': kwargs.pop( 'basex', 10),
'subsx': kwargs.pop( 'subsx', None),
}
self.set_xscale('log', **d)
b = self._hold
self._hold = True # we've already processed the hold
l = self.plot(*args, **kwargs)
self._hold = b # restore the hold
return l
semilogx.__doc__ = cbook.dedent(semilogx.__doc__) % martist.kwdocd
def semilogy(self, *args, **kwargs):
"""
call signature::
semilogy(*args, **kwargs)
Make a plot with log scaling on the *y* axis.
:func:`semilogy` supports all the keyword arguments of
:func:`~matplotlib.pylab.plot` and
:meth:`matplotlib.axes.Axes.set_yscale`.
Notable keyword arguments:
*basey*: scalar > 1
Base of the *y* logarithm
*subsy*: [ None | sequence ]
The location of the minor yticks; *None* defaults to
autosubs, which depend on the number of decades in the
plot; see :meth:`~matplotlib.axes.Axes.set_yscale` for
details.
The remaining valid kwargs are
:class:`~matplotlib.lines.Line2D` properties:
%(Line2D)s
.. seealso::
:meth:`loglog`:
For example code and figure
"""
if not self._hold: self.cla()
d = {'basey': kwargs.pop('basey', 10),
'subsy': kwargs.pop('subsy', None),
}
self.set_yscale('log', **d)
b = self._hold
self._hold = True # we've already processed the hold
l = self.plot(*args, **kwargs)
self._hold = b # restore the hold
return l
semilogy.__doc__ = cbook.dedent(semilogy.__doc__) % martist.kwdocd
def acorr(self, x, **kwargs):
"""
call signature::
acorr(x, normed=False, detrend=mlab.detrend_none, usevlines=False,
maxlags=None, **kwargs)
Plot the autocorrelation of *x*. If *normed* = *True*,
normalize the data by the autocorrelation at 0-th lag. *x* is
detrended by the *detrend* callable (default no normalization).
Data are plotted as ``plot(lags, c, **kwargs)``
Return value is a tuple (*lags*, *c*, *line*) where:
- *lags* are a length 2*maxlags+1 lag vector
- *c* is the 2*maxlags+1 auto correlation vector
- *line* is a :class:`~matplotlib.lines.Line2D` instance
returned by :meth:`plot`
The default *linestyle* is None and the default *marker* is
``'o'``, though these can be overridden with keyword args.
The cross correlation is performed with
:func:`numpy.correlate` with *mode* = 2.
If *usevlines* is *True*, :meth:`~matplotlib.axes.Axes.vlines`
rather than :meth:`~matplotlib.axes.Axes.plot` is used to draw
vertical lines from the origin to the acorr. Otherwise, the
plot style is determined by the kwargs, which are
:class:`~matplotlib.lines.Line2D` properties.
*maxlags* is a positive integer detailing the number of lags
to show. The default value of *None* will return all
:math:`2 \mathrm{len}(x) - 1` lags.
The return value is a tuple (*lags*, *c*, *linecol*, *b*)
where
- *linecol* is the
:class:`~matplotlib.collections.LineCollection`
- *b* is the *x*-axis.
.. seealso::
:meth:`~matplotlib.axes.Axes.plot` or
:meth:`~matplotlib.axes.Axes.vlines`: For documentation on
valid kwargs.
**Example:**
:func:`~matplotlib.pyplot.xcorr` above, and
:func:`~matplotlib.pyplot.acorr` below.
**Example:**
.. plot:: mpl_examples/pylab_examples/xcorr_demo.py
"""
return self.xcorr(x, x, **kwargs)
acorr.__doc__ = cbook.dedent(acorr.__doc__) % martist.kwdocd
def xcorr(self, x, y, normed=False, detrend=mlab.detrend_none,
usevlines=False, maxlags=None, **kwargs):
"""
call signature::
xcorr(x, y, normed=False, detrend=mlab.detrend_none,
usevlines=False, **kwargs):
Plot the cross correlation between *x* and *y*. If *normed* =
*True*, normalize the data by the cross correlation at 0-th
lag. *x* and y are detrended by the *detrend* callable
(default no normalization). *x* and *y* must be equal length.
Data are plotted as ``plot(lags, c, **kwargs)``
Return value is a tuple (*lags*, *c*, *line*) where:
- *lags* are a length ``2*maxlags+1`` lag vector
- *c* is the ``2*maxlags+1`` auto correlation vector
- *line* is a :class:`~matplotlib.lines.Line2D` instance
returned by :func:`~matplotlib.pyplot.plot`.
The default *linestyle* is *None* and the default *marker* is
'o', though these can be overridden with keyword args. The
cross correlation is performed with :func:`numpy.correlate`
with *mode* = 2.
If *usevlines* is *True*:
:func:`~matplotlib.pyplot.vlines`
rather than :func:`~matplotlib.pyplot.plot` is used to draw
vertical lines from the origin to the xcorr. Otherwise the
plotstyle is determined by the kwargs, which are
:class:`~matplotlib.lines.Line2D` properties.
The return value is a tuple (*lags*, *c*, *linecol*, *b*)
where *linecol* is the
:class:`matplotlib.collections.LineCollection` instance and
*b* is the *x*-axis.
*maxlags* is a positive integer detailing the number of lags to show.
The default value of *None* will return all ``(2*len(x)-1)`` lags.
**Example:**
:func:`~matplotlib.pyplot.xcorr` above, and
:func:`~matplotlib.pyplot.acorr` below.
**Example:**
.. plot:: mpl_examples/pylab_examples/xcorr_demo.py
"""
Nx = len(x)
if Nx!=len(y):
raise ValueError('x and y must be equal length')
x = detrend(np.asarray(x))
y = detrend(np.asarray(y))
c = np.correlate(x, y, mode=2)
if normed: c/= np.sqrt(np.dot(x,x) * np.dot(y,y))
if maxlags is None: maxlags = Nx - 1
if maxlags >= Nx or maxlags < 1:
raise ValueError('maglags must be None or strictly '
'positive < %d'%Nx)
lags = np.arange(-maxlags,maxlags+1)
c = c[Nx-1-maxlags:Nx+maxlags]
if usevlines:
a = self.vlines(lags, [0], c, **kwargs)
b = self.axhline(**kwargs)
else:
kwargs.setdefault('marker', 'o')
kwargs.setdefault('linestyle', 'None')
a, = self.plot(lags, c, **kwargs)
b = None
return lags, c, a, b
xcorr.__doc__ = cbook.dedent(xcorr.__doc__) % martist.kwdocd
def legend(self, *args, **kwargs):
"""
call signature::
legend(*args, **kwargs)
Place a legend on the current axes at location *loc*. Labels are a
sequence of strings and *loc* can be a string or an integer specifying
the legend location.
To make a legend with existing lines::
legend()
:meth:`legend` by itself will try and build a legend using the label
property of the lines/patches/collections. You can set the label of
a line by doing::
plot(x, y, label='my data')
or::
line.set_label('my data').
If label is set to '_nolegend_', the item will not be shown in
legend.
To automatically generate the legend from labels::
legend( ('label1', 'label2', 'label3') )
To make a legend for a list of lines and labels::
legend( (line1, line2, line3), ('label1', 'label2', 'label3') )
To make a legend at a given location, using a location argument::
legend( ('label1', 'label2', 'label3'), loc='upper left')
or::
legend( (line1, line2, line3), ('label1', 'label2', 'label3'), loc=2)
The location codes are
=============== =============
Location String Location Code
=============== =============
'best' 0
'upper right' 1
'upper left' 2
'lower left' 3
'lower right' 4
'right' 5
'center left' 6
'center right' 7
'lower center' 8
'upper center' 9
'center' 10
=============== =============
If none of these are locations are suitable, loc can be a 2-tuple
giving x,y in axes coords, ie::
loc = 0, 1 # left top
loc = 0.5, 0.5 # center
Keyword arguments:
*isaxes*: [ True | False ]
Indicates that this is an axes legend
*numpoints*: integer
The number of points in the legend line, default is 4
*prop*: [ None | FontProperties ]
A :class:`matplotlib.font_manager.FontProperties`
instance, or *None* to use rc settings.
*pad*: [ None | scalar ]
The fractional whitespace inside the legend border, between 0 and 1.
If *None*, use rc settings.
*markerscale*: [ None | scalar ]
The relative size of legend markers vs. original. If *None*, use rc
settings.
*shadow*: [ None | False | True ]
If *True*, draw a shadow behind legend. If *None*, use rc settings.
*labelsep*: [ None | scalar ]
The vertical space between the legend entries. If *None*, use rc
settings.
*handlelen*: [ None | scalar ]
The length of the legend lines. If *None*, use rc settings.
*handletextsep*: [ None | scalar ]
The space between the legend line and legend text. If *None*, use rc
settings.
*axespad*: [ None | scalar ]
The border between the axes and legend edge. If *None*, use rc
settings.
**Example:**
.. plot:: mpl_examples/api/legend_demo.py
"""
def get_handles():
handles = self.lines[:]
handles.extend(self.patches)
handles.extend([c for c in self.collections
if isinstance(c, mcoll.LineCollection)])
handles.extend([c for c in self.collections
if isinstance(c, mcoll.RegularPolyCollection)])
return handles
if len(args)==0:
handles = []
labels = []
for handle in get_handles():
label = handle.get_label()
if (label is not None and
label != '' and not label.startswith('_')):
handles.append(handle)
labels.append(label)
if len(handles) == 0:
warnings.warn("No labeled objects found. "
"Use label='...' kwarg on individual plots.")
return None
elif len(args)==1:
# LABELS
labels = args[0]
handles = [h for h, label in zip(get_handles(), labels)]
elif len(args)==2:
if is_string_like(args[1]) or isinstance(args[1], int):
# LABELS, LOC
labels, loc = args
handles = [h for h, label in zip(get_handles(), labels)]
kwargs['loc'] = loc
else:
# LINES, LABELS
handles, labels = args
elif len(args)==3:
# LINES, LABELS, LOC
handles, labels, loc = args
kwargs['loc'] = loc
else:
raise TypeError('Invalid arguments to legend')
handles = cbook.flatten(handles)
self.legend_ = mlegend.Legend(self, handles, labels, **kwargs)
return self.legend_
#### Specialized plotting
def step(self, x, y, *args, **kwargs):
'''
call signature::
step(x, y, *args, **kwargs)
Make a step plot. Additional keyword args to :func:`step` are the same
as those for :func:`~matplotlib.pyplot.plot`.
*x* and *y* must be 1-D sequences, and it is assumed, but not checked,
that *x* is uniformly increasing.
Keyword arguments:
*where*: [ 'pre' | 'post' | 'mid' ]
If 'pre', the interval from x[i] to x[i+1] has level y[i]
If 'post', that interval has level y[i+1]
If 'mid', the jumps in *y* occur half-way between the
*x*-values.
'''
where = kwargs.pop('where', 'pre')
if where not in ('pre', 'post', 'mid'):
raise ValueError("'where' argument to step must be "
"'pre', 'post' or 'mid'")
kwargs['linestyle'] = 'steps-' + where
return self.plot(x, y, *args, **kwargs)
def bar(self, left, height, width=0.8, bottom=None,
color=None, edgecolor=None, linewidth=None,
yerr=None, xerr=None, ecolor=None, capsize=3,
align='edge', orientation='vertical', log=False,
**kwargs
):
"""
call signature::
bar(left, height, width=0.8, bottom=0,
color=None, edgecolor=None, linewidth=None,
yerr=None, xerr=None, ecolor=None, capsize=3,
align='edge', orientation='vertical', log=False)
Make a bar plot with rectangles bounded by:
*left*, *left* + *width*, *bottom*, *bottom* + *height*
(left, right, bottom and top edges)
*left*, *height*, *width*, and *bottom* can be either scalars
or sequences
Return value is a list of
:class:`matplotlib.patches.Rectangle` instances.
Required arguments:
======== ===============================================
Argument Description
======== ===============================================
*left* the x coordinates of the left sides of the bars
*height* the heights of the bars
======== ===============================================
Optional keyword arguments:
=============== ==========================================
Keyword Description
=============== ==========================================
*width* the widths of the bars
*bottom* the y coordinates of the bottom edges of
the bars
*color* the colors of the bars
*edgecolor* the colors of the bar edges
*linewidth* width of bar edges; None means use default
linewidth; 0 means don't draw edges.
*xerr* if not None, will be used to generate
errorbars on the bar chart
*yerr* if not None, will be used to generate
errorbars on the bar chart
*ecolor* specifies the color of any errorbar
*capsize* (default 3) determines the length in
points of the error bar caps
*align* 'edge' (default) | 'center'
*orientation* 'vertical' | 'horizontal'
*log* [False|True] False (default) leaves the
orientation axis as-is; True sets it to
log scale
=============== ==========================================
For vertical bars, *align* = 'edge' aligns bars by their left
edges in left, while *align* = 'center' interprets these
values as the *x* coordinates of the bar centers. For
horizontal bars, *align* = 'edge' aligns bars by their bottom
edges in bottom, while *align* = 'center' interprets these
values as the *y* coordinates of the bar centers.
The optional arguments *color*, *edgecolor*, *linewidth*,
*xerr*, and *yerr* can be either scalars or sequences of
length equal to the number of bars. This enables you to use
bar as the basis for stacked bar charts, or candlestick plots.
Other optional kwargs:
%(Rectangle)s
**Example:** A stacked bar chart.
.. plot:: mpl_examples/pylab_examples/bar_stacked.py
"""
if not self._hold: self.cla()
label = kwargs.pop('label', '')
def make_iterable(x):
if not iterable(x):
return [x]
else:
return x
# make them safe to take len() of
_left = left
left = make_iterable(left)
height = make_iterable(height)
width = make_iterable(width)
_bottom = bottom
bottom = make_iterable(bottom)
linewidth = make_iterable(linewidth)
adjust_ylim = False
adjust_xlim = False
if orientation == 'vertical':
self._process_unit_info(xdata=left, ydata=height, kwargs=kwargs)
if log:
self.set_yscale('log')
# size width and bottom according to length of left
if _bottom is None:
if self.get_yscale() == 'log':
bottom = [1e-100]
adjust_ylim = True
else:
bottom = [0]
nbars = len(left)
if len(width) == 1:
width *= nbars
if len(bottom) == 1:
bottom *= nbars
elif orientation == 'horizontal':
self._process_unit_info(xdata=width, ydata=bottom, kwargs=kwargs)
if log:
self.set_xscale('log')
# size left and height according to length of bottom
if _left is None:
if self.get_xscale() == 'log':
left = [1e-100]
adjust_xlim = True
else:
left = [0]
nbars = len(bottom)
if len(left) == 1:
left *= nbars
if len(height) == 1:
height *= nbars
else:
raise ValueError, 'invalid orientation: %s' % orientation
# do not convert to array here as unit info is lost
#left = np.asarray(left)
#height = np.asarray(height)
#width = np.asarray(width)
#bottom = np.asarray(bottom)
if len(linewidth) < nbars:
linewidth *= nbars
if color is None:
color = [None] * nbars
else:
color = list(mcolors.colorConverter.to_rgba_array(color))
if len(color) < nbars:
color *= nbars
if edgecolor is None:
edgecolor = [None] * nbars
else:
edgecolor = list(mcolors.colorConverter.to_rgba_array(edgecolor))
if len(edgecolor) < nbars:
edgecolor *= nbars
if yerr is not None:
if not iterable(yerr):
yerr = [yerr]*nbars
if xerr is not None:
if not iterable(xerr):
xerr = [xerr]*nbars
# FIXME: convert the following to proper input validation
# raising ValueError; don't use assert for this.
assert len(left)==nbars, "argument 'left' must be %d or scalar" % nbars
assert len(height)==nbars, ("argument 'height' must be %d or scalar" %
nbars)
assert len(width)==nbars, ("argument 'width' must be %d or scalar" %
nbars)
assert len(bottom)==nbars, ("argument 'bottom' must be %d or scalar" %
nbars)
if yerr is not None and len(yerr)!=nbars:
raise ValueError(
"bar() argument 'yerr' must be len(%s) or scalar" % nbars)
if xerr is not None and len(xerr)!=nbars:
raise ValueError(
"bar() argument 'xerr' must be len(%s) or scalar" % nbars)
patches = []
# lets do some conversions now since some types cannot be
# subtracted uniformly
if self.xaxis is not None:
xconv = self.xaxis.converter
if xconv is not None:
units = self.xaxis.get_units()
left = xconv.convert( left, units )
width = xconv.convert( width, units )
if self.yaxis is not None:
yconv = self.yaxis.converter
if yconv is not None :
units = self.yaxis.get_units()
bottom = yconv.convert( bottom, units )
height = yconv.convert( height, units )
if align == 'edge':
pass
elif align == 'center':
if orientation == 'vertical':
left = [left[i] - width[i]/2. for i in xrange(len(left))]
elif orientation == 'horizontal':
bottom = [bottom[i] - height[i]/2. for i in xrange(len(bottom))]
else:
raise ValueError, 'invalid alignment: %s' % align
args = zip(left, bottom, width, height, color, edgecolor, linewidth)
for l, b, w, h, c, e, lw in args:
if h<0:
b += h
h = abs(h)
if w<0:
l += w
w = abs(w)
r = mpatches.Rectangle(
xy=(l, b), width=w, height=h,
facecolor=c,
edgecolor=e,
linewidth=lw,
label=label
)
label = '_nolegend_'
r.update(kwargs)
#print r.get_label(), label, 'label' in kwargs
self.add_patch(r)
patches.append(r)
holdstate = self._hold
self.hold(True) # ensure hold is on before plotting errorbars
if xerr is not None or yerr is not None:
if orientation == 'vertical':
# using list comps rather than arrays to preserve unit info
x = [l+0.5*w for l, w in zip(left, width)]
y = [b+h for b,h in zip(bottom, height)]
elif orientation == 'horizontal':
# using list comps rather than arrays to preserve unit info
x = [l+w for l,w in zip(left, width)]
y = [b+0.5*h for b,h in zip(bottom, height)]
self.errorbar(
x, y,
yerr=yerr, xerr=xerr,
fmt=None, ecolor=ecolor, capsize=capsize)
self.hold(holdstate) # restore previous hold state
if adjust_xlim:
xmin, xmax = self.dataLim.intervalx
xmin = np.amin(width[width!=0]) # filter out the 0 width rects
if xerr is not None:
xmin = xmin - np.amax(xerr)
xmin = max(xmin*0.9, 1e-100)
self.dataLim.intervalx = (xmin, xmax)
if adjust_ylim:
ymin, ymax = self.dataLim.intervaly
ymin = np.amin(height[height!=0]) # filter out the 0 height rects
if yerr is not None:
ymin = ymin - np.amax(yerr)
ymin = max(ymin*0.9, 1e-100)
self.dataLim.intervaly = (ymin, ymax)
self.autoscale_view()
return patches
bar.__doc__ = cbook.dedent(bar.__doc__) % martist.kwdocd
def barh(self, bottom, width, height=0.8, left=None, **kwargs):
"""
call signature::
barh(bottom, width, height=0.8, left=0, **kwargs)
Make a horizontal bar plot with rectangles bounded by:
*left*, *left* + *width*, *bottom*, *bottom* + *height*
(left, right, bottom and top edges)
*bottom*, *width*, *height*, and *left* can be either scalars
or sequences
Return value is a list of
:class:`matplotlib.patches.Rectangle` instances.
Required arguments:
======== ======================================================
Argument Description
======== ======================================================
*bottom* the vertical positions of the bottom edges of the bars
*width* the lengths of the bars
======== ======================================================
Optional keyword arguments:
=============== ==========================================
Keyword Description
=============== ==========================================
*height* the heights (thicknesses) of the bars
*left* the x coordinates of the left edges of the
bars
*color* the colors of the bars
*edgecolor* the colors of the bar edges
*linewidth* width of bar edges; None means use default
linewidth; 0 means don't draw edges.
*xerr* if not None, will be used to generate
errorbars on the bar chart
*yerr* if not None, will be used to generate
errorbars on the bar chart
*ecolor* specifies the color of any errorbar
*capsize* (default 3) determines the length in
points of the error bar caps
*align* 'edge' (default) | 'center'
*log* [False|True] False (default) leaves the
horizontal axis as-is; True sets it to log
scale
=============== ==========================================
Setting *align* = 'edge' aligns bars by their bottom edges in
bottom, while *align* = 'center' interprets these values as
the *y* coordinates of the bar centers.
The optional arguments *color*, *edgecolor*, *linewidth*,
*xerr*, and *yerr* can be either scalars or sequences of
length equal to the number of bars. This enables you to use
barh as the basis for stacked bar charts, or candlestick
plots.
other optional kwargs:
%(Rectangle)s
"""
patches = self.bar(left=left, height=height, width=width, bottom=bottom,
orientation='horizontal', **kwargs)
return patches
barh.__doc__ = cbook.dedent(barh.__doc__) % martist.kwdocd
def broken_barh(self, xranges, yrange, **kwargs):
"""
call signature::
broken_barh(self, xranges, yrange, **kwargs)
A collection of horizontal bars spanning *yrange* with a sequence of
*xranges*.
Required arguments:
========= ==============================
Argument Description
========= ==============================
*xranges* sequence of (*xmin*, *xwidth*)
*yrange* sequence of (*ymin*, *ywidth*)
========= ==============================
kwargs are
:class:`matplotlib.collections.BrokenBarHCollection`
properties:
%(BrokenBarHCollection)s
these can either be a single argument, ie::
facecolors = 'black'
or a sequence of arguments for the various bars, ie::
facecolors = ('black', 'red', 'green')
**Example:**
.. plot:: mpl_examples/pylab_examples/broken_barh.py
"""
col = mcoll.BrokenBarHCollection(xranges, yrange, **kwargs)
self.add_collection(col, autolim=True)
self.autoscale_view()
return col
broken_barh.__doc__ = cbook.dedent(broken_barh.__doc__) % martist.kwdocd
def stem(self, x, y, linefmt='b-', markerfmt='bo', basefmt='r-'):
"""
call signature::
stem(x, y, linefmt='b-', markerfmt='bo', basefmt='r-')
A stem plot plots vertical lines (using *linefmt*) at each *x*
location from the baseline to *y*, and places a marker there
using *markerfmt*. A horizontal line at 0 is is plotted using
*basefmt*.
Return value is a tuple (*markerline*, *stemlines*,
*baseline*).
.. seealso::
`this document`__ for details
:file:`examples/pylab_examples/stem_plot.py`:
for a demo
__ http://www.mathworks.com/access/helpdesk/help/techdoc/ref/stem.html
"""
remember_hold=self._hold
if not self._hold: self.cla()
self.hold(True)
markerline, = self.plot(x, y, markerfmt)
stemlines = []
for thisx, thisy in zip(x, y):
l, = self.plot([thisx,thisx], [0, thisy], linefmt)
stemlines.append(l)
baseline, = self.plot([np.amin(x), np.amax(x)], [0,0], basefmt)
self.hold(remember_hold)
return markerline, stemlines, baseline
def pie(self, x, explode=None, labels=None, colors=None,
autopct=None, pctdistance=0.6, shadow=False,
labeldistance=1.1):
r"""
call signature::
pie(x, explode=None, labels=None,
colors=('b', 'g', 'r', 'c', 'm', 'y', 'k', 'w'),
autopct=None, pctdistance=0.6, labeldistance=1.1, shadow=False)
Make a pie chart of array *x*. The fractional area of each
wedge is given by x/sum(x). If sum(x) <= 1, then the values
of x give the fractional area directly and the array will not
be normalized.
Keyword arguments:
*explode*: [ None | len(x) sequence ]
If not *None*, is a len(*x*) array which specifies the
fraction of the radius with which to offset each wedge.
*colors*: [ None | color sequence ]
A sequence of matplotlib color args through which the pie chart
will cycle.
*labels*: [ None | len(x) sequence of strings ]
A sequence of strings providing the labels for each wedge
*autopct*: [ None | format string | format function ]
If not *None*, is a string or function used to label the
wedges with their numeric value. The label will be placed inside
the wedge. If it is a format string, the label will be ``fmt%pct``.
If it is a function, it will be called.
*pctdistance*: scalar
The ratio between the center of each pie slice and the
start of the text generated by *autopct*. Ignored if
*autopct* is *None*; default is 0.6.
*labeldistance*: scalar
The radial distance at which the pie labels are drawn
*shadow*: [ False | True ]
Draw a shadow beneath the pie.
The pie chart will probably look best if the figure and axes are
square. Eg.::
figure(figsize=(8,8))
ax = axes([0.1, 0.1, 0.8, 0.8])
Return value:
If *autopct* is None, return the tuple (*patches*, *texts*):
- *patches* is a sequence of
:class:`matplotlib.patches.Wedge` instances
- *texts* is a list of the label
:class:`matplotlib.text.Text` instances.
If *autopct* is not *None*, return the tuple (*patches*,
*texts*, *autotexts*), where *patches* and *texts* are as
above, and *autotexts* is a list of
:class:`~matplotlib.text.Text` instances for the numeric
labels.
"""
self.set_frame_on(False)
x = np.asarray(x).astype(np.float32)
sx = float(x.sum())
if sx>1: x = np.divide(x,sx)
if labels is None: labels = ['']*len(x)
if explode is None: explode = [0]*len(x)
assert(len(x)==len(labels))
assert(len(x)==len(explode))
if colors is None: colors = ('b', 'g', 'r', 'c', 'm', 'y', 'k', 'w')
center = 0,0
radius = 1
theta1 = 0
i = 0
texts = []
slices = []
autotexts = []
for frac, label, expl in cbook.safezip(x,labels, explode):
x, y = center
theta2 = theta1 + frac
thetam = 2*math.pi*0.5*(theta1+theta2)
x += expl*math.cos(thetam)
y += expl*math.sin(thetam)
w = mpatches.Wedge((x,y), radius, 360.*theta1, 360.*theta2,
facecolor=colors[i%len(colors)])
slices.append(w)
self.add_patch(w)
w.set_label(label)
if shadow:
# make sure to add a shadow after the call to
# add_patch so the figure and transform props will be
# set
shad = mpatches.Shadow(w, -0.02, -0.02,
#props={'facecolor':w.get_facecolor()}
)
shad.set_zorder(0.9*w.get_zorder())
self.add_patch(shad)
xt = x + labeldistance*radius*math.cos(thetam)
yt = y + labeldistance*radius*math.sin(thetam)
label_alignment = xt > 0 and 'left' or 'right'
t = self.text(xt, yt, label,
size=rcParams['xtick.labelsize'],
horizontalalignment=label_alignment,
verticalalignment='center')
texts.append(t)
if autopct is not None:
xt = x + pctdistance*radius*math.cos(thetam)
yt = y + pctdistance*radius*math.sin(thetam)
if is_string_like(autopct):
s = autopct%(100.*frac)
elif callable(autopct):
s = autopct(100.*frac)
else:
raise TypeError(
'autopct must be callable or a format string')
t = self.text(xt, yt, s,
horizontalalignment='center',
verticalalignment='center')
autotexts.append(t)
theta1 = theta2
i += 1
self.set_xlim((-1.25, 1.25))
self.set_ylim((-1.25, 1.25))
self.set_xticks([])
self.set_yticks([])
if autopct is None: return slices, texts
else: return slices, texts, autotexts
def errorbar(self, x, y, yerr=None, xerr=None,
fmt='-', ecolor=None, elinewidth=None, capsize=3,
barsabove=False, lolims=False, uplims=False,
xlolims=False, xuplims=False, **kwargs):
"""
call signature::
errorbar(x, y, yerr=None, xerr=None,
fmt='-', ecolor=None, elinewidth=None, capsize=3,
barsabove=False, lolims=False, uplims=False,
xlolims=False, xuplims=False)
Plot *x* versus *y* with error deltas in *yerr* and *xerr*.
Vertical errorbars are plotted if *yerr* is not *None*.
Horizontal errorbars are plotted if *xerr* is not *None*.
*x*, *y*, *xerr*, and *yerr* can all be scalars, which plots a
single error bar at *x*, *y*.
Optional keyword arguments:
*xerr*/*yerr*: [ scalar | N, Nx1, Nx2 array-like ]
If a scalar number, len(N) array-like object, or an Nx1 array-like
object, errorbars are drawn +/- value.
If a rank-1, Nx2 Numpy array, errorbars are drawn at -column1 and
+column2
*fmt*: '-'
The plot format symbol for *y*. If *fmt* is *None*, just plot the
errorbars with no line symbols. This can be useful for creating a
bar plot with errorbars.
*ecolor*: [ None | mpl color ]
a matplotlib color arg which gives the color the errorbar lines; if
*None*, use the marker color.
*elinewidth*: scalar
the linewidth of the errorbar lines. If *None*, use the linewidth.
*capsize*: scalar
the size of the error bar caps in points
*barsabove*: [ True | False ]
if *True*, will plot the errorbars above the plot
symbols. Default is below.
*lolims*/*uplims*/*xlolims*/*xuplims*: [ False | True ]
These arguments can be used to indicate that a value gives
only upper/lower limits. In that case a caret symbol is
used to indicate this. lims-arguments may be of the same
type as *xerr* and *yerr*.
All other keyword arguments are passed on to the plot command for the
markers, so you can add additional key=value pairs to control the
errorbar markers. For example, this code makes big red squares with
thick green edges::
x,y,yerr = rand(3,10)
errorbar(x, y, yerr, marker='s',
mfc='red', mec='green', ms=20, mew=4)
where *mfc*, *mec*, *ms* and *mew* are aliases for the longer
property names, *markerfacecolor*, *markeredgecolor*, *markersize*
and *markeredgewith*.
valid kwargs for the marker properties are
%(Line2D)s
Return value is a length 3 tuple. The first element is the
:class:`~matplotlib.lines.Line2D` instance for the *y* symbol
lines. The second element is a list of error bar cap lines,
the third element is a list of
:class:`~matplotlib.collections.LineCollection` instances for
the horizontal and vertical error ranges.
**Example:**
.. plot:: mpl_examples/pylab_examples/errorbar_demo.py
"""
self._process_unit_info(xdata=x, ydata=y, kwargs=kwargs)
if not self._hold: self.cla()
# make sure all the args are iterable; use lists not arrays to
# preserve units
if not iterable(x):
x = [x]
if not iterable(y):
y = [y]
if xerr is not None:
if not iterable(xerr):
xerr = [xerr]*len(x)
if yerr is not None:
if not iterable(yerr):
yerr = [yerr]*len(y)
l0 = None
if barsabove and fmt is not None:
l0, = self.plot(x,y,fmt,**kwargs)
barcols = []
caplines = []
lines_kw = {'label':'_nolegend_'}
if elinewidth:
lines_kw['linewidth'] = elinewidth
else:
if 'linewidth' in kwargs:
lines_kw['linewidth']=kwargs['linewidth']
if 'lw' in kwargs:
lines_kw['lw']=kwargs['lw']
if 'transform' in kwargs:
lines_kw['transform'] = kwargs['transform']
# arrays fine here, they are booleans and hence not units
if not iterable(lolims):
lolims = np.asarray([lolims]*len(x), bool)
else: lolims = np.asarray(lolims, bool)
if not iterable(uplims): uplims = np.array([uplims]*len(x), bool)
else: uplims = np.asarray(uplims, bool)
if not iterable(xlolims): xlolims = np.array([xlolims]*len(x), bool)
else: xlolims = np.asarray(xlolims, bool)
if not iterable(xuplims): xuplims = np.array([xuplims]*len(x), bool)
else: xuplims = np.asarray(xuplims, bool)
def xywhere(xs, ys, mask):
"""
return xs[mask], ys[mask] where mask is True but xs and
ys are not arrays
"""
assert len(xs)==len(ys)
assert len(xs)==len(mask)
xs = [thisx for thisx, b in zip(xs, mask) if b]
ys = [thisy for thisy, b in zip(ys, mask) if b]
return xs, ys
if capsize > 0:
plot_kw = {
'ms':2*capsize,
'label':'_nolegend_'}
if 'markeredgewidth' in kwargs:
plot_kw['markeredgewidth']=kwargs['markeredgewidth']
if 'mew' in kwargs:
plot_kw['mew']=kwargs['mew']
if 'transform' in kwargs:
plot_kw['transform'] = kwargs['transform']
if xerr is not None:
if (iterable(xerr) and len(xerr)==2 and
iterable(xerr[0]) and iterable(xerr[1])):
# using list comps rather than arrays to preserve units
left = [thisx-thiserr for (thisx, thiserr)
in cbook.safezip(x,xerr[0])]
right = [thisx+thiserr for (thisx, thiserr)
in cbook.safezip(x,xerr[1])]
else:
# using list comps rather than arrays to preserve units
left = [thisx-thiserr for (thisx, thiserr)
in cbook.safezip(x,xerr)]
right = [thisx+thiserr for (thisx, thiserr)
in cbook.safezip(x,xerr)]
barcols.append( self.hlines(y, left, right, **lines_kw ) )
if capsize > 0:
if xlolims.any():
# can't use numpy logical indexing since left and
# y are lists
leftlo, ylo = xywhere(left, y, xlolims)
caplines.extend(
self.plot(leftlo, ylo, ls='None',
marker=mlines.CARETLEFT, **plot_kw) )
xlolims = ~xlolims
leftlo, ylo = xywhere(left, y, xlolims)
caplines.extend( self.plot(leftlo, ylo, 'k|', **plot_kw) )
else:
caplines.extend( self.plot(left, y, 'k|', **plot_kw) )
if xuplims.any():
rightup, yup = xywhere(right, y, xuplims)
caplines.extend(
self.plot(rightup, yup, ls='None',
marker=mlines.CARETRIGHT, **plot_kw) )
xuplims = ~xuplims
rightup, yup = xywhere(right, y, xuplims)
caplines.extend( self.plot(rightup, yup, 'k|', **plot_kw) )
else:
caplines.extend( self.plot(right, y, 'k|', **plot_kw) )
if yerr is not None:
if (iterable(yerr) and len(yerr)==2 and
iterable(yerr[0]) and iterable(yerr[1])):
# using list comps rather than arrays to preserve units
lower = [thisy-thiserr for (thisy, thiserr)
in cbook.safezip(y,yerr[0])]
upper = [thisy+thiserr for (thisy, thiserr)
in cbook.safezip(y,yerr[1])]
else:
# using list comps rather than arrays to preserve units
lower = [thisy-thiserr for (thisy, thiserr)
in cbook.safezip(y,yerr)]
upper = [thisy+thiserr for (thisy, thiserr)
in cbook.safezip(y,yerr)]
barcols.append( self.vlines(x, lower, upper, **lines_kw) )
if capsize > 0:
if lolims.any():
xlo, lowerlo = xywhere(x, lower, lolims)
caplines.extend(
self.plot(xlo, lowerlo, ls='None',
marker=mlines.CARETDOWN, **plot_kw) )
lolims = ~lolims
xlo, lowerlo = xywhere(x, lower, lolims)
caplines.extend( self.plot(xlo, lowerlo, 'k_', **plot_kw) )
else:
caplines.extend( self.plot(x, lower, 'k_', **plot_kw) )
if uplims.any():
xup, upperup = xywhere(x, upper, uplims)
caplines.extend(
self.plot(xup, upperup, ls='None',
marker=mlines.CARETUP, **plot_kw) )
uplims = ~uplims
xup, upperup = xywhere(x, upper, uplims)
caplines.extend( self.plot(xup, upperup, 'k_', **plot_kw) )
else:
caplines.extend( self.plot(x, upper, 'k_', **plot_kw) )
if not barsabove and fmt is not None:
l0, = self.plot(x,y,fmt,**kwargs)
if ecolor is None:
if l0 is None:
ecolor = self._get_lines._get_next_cycle_color()
else:
ecolor = l0.get_color()
for l in barcols:
l.set_color(ecolor)
for l in caplines:
l.set_color(ecolor)
self.autoscale_view()
return (l0, caplines, barcols)
errorbar.__doc__ = cbook.dedent(errorbar.__doc__) % martist.kwdocd
def boxplot(self, x, notch=0, sym='b+', vert=1, whis=1.5,
positions=None, widths=None):
"""
call signature::
boxplot(x, notch=0, sym='+', vert=1, whis=1.5,
positions=None, widths=None)
Make a box and whisker plot for each column of *x* or each
vector in sequence *x*. The box extends from the lower to
upper quartile values of the data, with a line at the median.
The whiskers extend from the box to show the range of the
data. Flier points are those past the end of the whiskers.
- *notch* = 0 (default) produces a rectangular box plot.
- *notch* = 1 will produce a notched box plot
*sym* (default 'b+') is the default symbol for flier points.
Enter an empty string ('') if you don't want to show fliers.
- *vert* = 1 (default) makes the boxes vertical.
- *vert* = 0 makes horizontal boxes. This seems goofy, but
that's how Matlab did it.
*whis* (default 1.5) defines the length of the whiskers as
a function of the inner quartile range. They extend to the
most extreme data point within ( ``whis*(75%-25%)`` ) data range.
*positions* (default 1,2,...,n) sets the horizontal positions of
the boxes. The ticks and limits are automatically set to match
the positions.
*widths* is either a scalar or a vector and sets the width of
each box. The default is 0.5, or ``0.15*(distance between extreme
positions)`` if that is smaller.
*x* is an array or a sequence of vectors.
Returns a dictionary mapping each component of the boxplot
to a list of the :class:`matplotlib.lines.Line2D`
instances created.
**Example:**
.. plot:: pyplots/boxplot_demo.py
"""
if not self._hold: self.cla()
holdStatus = self._hold
whiskers, caps, boxes, medians, fliers = [], [], [], [], []
# convert x to a list of vectors
if hasattr(x, 'shape'):
if len(x.shape) == 1:
if hasattr(x[0], 'shape'):
x = list(x)
else:
x = [x,]
elif len(x.shape) == 2:
nr, nc = x.shape
if nr == 1:
x = [x]
elif nc == 1:
x = [x.ravel()]
else:
x = [x[:,i] for i in xrange(nc)]
else:
raise ValueError, "input x can have no more than 2 dimensions"
if not hasattr(x[0], '__len__'):
x = [x]
col = len(x)
# get some plot info
if positions is None:
positions = range(1, col + 1)
if widths is None:
distance = max(positions) - min(positions)
widths = min(0.15*max(distance,1.0), 0.5)
if isinstance(widths, float) or isinstance(widths, int):
widths = np.ones((col,), float) * widths
# loop through columns, adding each to plot
self.hold(True)
for i,pos in enumerate(positions):
d = np.ravel(x[i])
row = len(d)
# get median and quartiles
q1, med, q3 = mlab.prctile(d,[25,50,75])
# get high extreme
iq = q3 - q1
hi_val = q3 + whis*iq
wisk_hi = np.compress( d <= hi_val , d )
if len(wisk_hi) == 0:
wisk_hi = q3
else:
wisk_hi = max(wisk_hi)
# get low extreme
lo_val = q1 - whis*iq
wisk_lo = np.compress( d >= lo_val, d )
if len(wisk_lo) == 0:
wisk_lo = q1
else:
wisk_lo = min(wisk_lo)
# get fliers - if we are showing them
flier_hi = []
flier_lo = []
flier_hi_x = []
flier_lo_x = []
if len(sym) != 0:
flier_hi = np.compress( d > wisk_hi, d )
flier_lo = np.compress( d < wisk_lo, d )
flier_hi_x = np.ones(flier_hi.shape[0]) * pos
flier_lo_x = np.ones(flier_lo.shape[0]) * pos
# get x locations for fliers, whisker, whisker cap and box sides
box_x_min = pos - widths[i] * 0.5
box_x_max = pos + widths[i] * 0.5
wisk_x = np.ones(2) * pos
cap_x_min = pos - widths[i] * 0.25
cap_x_max = pos + widths[i] * 0.25
cap_x = [cap_x_min, cap_x_max]
# get y location for median
med_y = [med, med]
# calculate 'regular' plot
if notch == 0:
# make our box vectors
box_x = [box_x_min, box_x_max, box_x_max, box_x_min, box_x_min ]
box_y = [q1, q1, q3, q3, q1 ]
# make our median line vectors
med_x = [box_x_min, box_x_max]
# calculate 'notch' plot
else:
notch_max = med + 1.57*iq/np.sqrt(row)
notch_min = med - 1.57*iq/np.sqrt(row)
if notch_max > q3:
notch_max = q3
if notch_min < q1:
notch_min = q1
# make our notched box vectors
box_x = [box_x_min, box_x_max, box_x_max, cap_x_max, box_x_max,
box_x_max, box_x_min, box_x_min, cap_x_min, box_x_min,
box_x_min ]
box_y = [q1, q1, notch_min, med, notch_max, q3, q3, notch_max,
med, notch_min, q1]
# make our median line vectors
med_x = [cap_x_min, cap_x_max]
med_y = [med, med]
# vertical or horizontal plot?
if vert:
def doplot(*args):
return self.plot(*args)
else:
def doplot(*args):
shuffled = []
for i in xrange(0, len(args), 3):
shuffled.extend([args[i+1], args[i], args[i+2]])
return self.plot(*shuffled)
whiskers.extend(doplot(wisk_x, [q1, wisk_lo], 'b--',
wisk_x, [q3, wisk_hi], 'b--'))
caps.extend(doplot(cap_x, [wisk_hi, wisk_hi], 'k-',
cap_x, [wisk_lo, wisk_lo], 'k-'))
boxes.extend(doplot(box_x, box_y, 'b-'))
medians.extend(doplot(med_x, med_y, 'r-'))
fliers.extend(doplot(flier_hi_x, flier_hi, sym,
flier_lo_x, flier_lo, sym))
# fix our axes/ticks up a little
if 1 == vert:
setticks, setlim = self.set_xticks, self.set_xlim
else:
setticks, setlim = self.set_yticks, self.set_ylim
newlimits = min(positions)-0.5, max(positions)+0.5
setlim(newlimits)
setticks(positions)
# reset hold status
self.hold(holdStatus)
return dict(whiskers=whiskers, caps=caps, boxes=boxes,
medians=medians, fliers=fliers)
def scatter(self, x, y, s=20, c='b', marker='o', cmap=None, norm=None,
vmin=None, vmax=None, alpha=1.0, linewidths=None,
faceted=True, verts=None,
**kwargs):
"""
call signatures::
scatter(x, y, s=20, c='b', marker='o', cmap=None, norm=None,
vmin=None, vmax=None, alpha=1.0, linewidths=None,
verts=None, **kwargs)
Make a scatter plot of *x* versus *y*, where *x*, *y* are 1-D
sequences of the same length, *N*.
Keyword arguments:
*s*:
size in points^2. It is a scalar or an array of the same
length as *x* and *y*.
*c*:
a color. *c* can be a single color format string, or a
sequence of color specifications of length *N*, or a
sequence of *N* numbers to be mapped to colors using the
*cmap* and *norm* specified via kwargs (see below). Note
that *c* should not be a single numeric RGB or RGBA
sequence because that is indistinguishable from an array
of values to be colormapped. *c* can be a 2-D array in
which the rows are RGB or RGBA, however.
*marker*:
can be one of:
===== ==============
Value Description
===== ==============
's' square
'o' circle
'^' triangle up
'>' triangle right
'v' triangle down
'<' triangle left
'd' diamond
'p' pentagram
'h' hexagon
'8' octagon
'+' plus
'x' cross
===== ==============
The marker can also be a tuple (*numsides*, *style*,
*angle*), which will create a custom, regular symbol.
*numsides*:
the number of sides
*style*:
the style of the regular symbol:
===== =============================================
Value Description
===== =============================================
0 a regular polygon
1 a star-like symbol
2 an asterisk
3 a circle (*numsides* and *angle* is ignored)
===== =============================================
*angle*:
the angle of rotation of the symbol
Finally, *marker* can be (*verts*, 0): *verts* is a
sequence of (*x*, *y*) vertices for a custom scatter
symbol. Alternatively, use the kwarg combination
*marker* = *None*, *verts* = *verts*.
Any or all of *x*, *y*, *s*, and *c* may be masked arrays, in
which case all masks will be combined and only unmasked points
will be plotted.
Other keyword arguments: the color mapping and normalization
arguments will be used only if *c* is an array of floats.
*cmap*: [ None | Colormap ]
A :class:`matplotlib.colors.Colormap` instance. If *None*,
defaults to rc ``image.cmap``. *cmap* is only used if *c*
is an array of floats.
*norm*: [ None | Normalize ]
A :class:`matplotlib.colors.Normalize` instance is used to
scale luminance data to 0, 1. If *None*, use the default
:func:`normalize`. *norm* is only used if *c* is an array
of floats.
*vmin*/*vmax*:
*vmin* and *vmax* are used in conjunction with norm to
normalize luminance data. If either are None, the min and
max of the color array *C* is used. Note if you pass a
*norm* instance, your settings for *vmin* and *vmax* will
be ignored.
*alpha*: 0 <= scalar <= 1
The alpha value for the patches
*linewidths*: [ None | scalar | sequence ]
If *None*, defaults to (lines.linewidth,). Note that this
is a tuple, and if you set the linewidths argument you
must set it as a sequence of floats, as required by
:class:`~matplotlib.collections.RegularPolyCollection`.
Optional kwargs control the
:class:`~matplotlib.collections.Collection` properties; in
particular:
*edgecolors*:
'none' to plot faces with no outlines
*facecolors*:
'none' to plot unfilled outlines
Here are the standard descriptions of all the
:class:`~matplotlib.collections.Collection` kwargs:
%(Collection)s
A :class:`~matplotlib.collections.Collection` instance is
returned.
"""
if not self._hold: self.cla()
syms = { # a dict from symbol to (numsides, angle)
's' : (4,math.pi/4.0,0), # square
'o' : (20,3,0), # circle
'^' : (3,0,0), # triangle up
'>' : (3,math.pi/2.0,0), # triangle right
'v' : (3,math.pi,0), # triangle down
'<' : (3,3*math.pi/2.0,0), # triangle left
'd' : (4,0,0), # diamond
'p' : (5,0,0), # pentagram
'h' : (6,0,0), # hexagon
'8' : (8,0,0), # octagon
'+' : (4,0,2), # plus
'x' : (4,math.pi/4.0,2) # cross
}
self._process_unit_info(xdata=x, ydata=y, kwargs=kwargs)
x, y, s, c = cbook.delete_masked_points(x, y, s, c)
if is_string_like(c) or cbook.is_sequence_of_strings(c):
colors = mcolors.colorConverter.to_rgba_array(c, alpha)
else:
sh = np.shape(c)
# The inherent ambiguity is resolved in favor of color
# mapping, not interpretation as rgb or rgba:
if len(sh) == 1 and sh[0] == len(x):
colors = None # use cmap, norm after collection is created
else:
colors = mcolors.colorConverter.to_rgba_array(c, alpha)
if not iterable(s):
scales = (s,)
else:
scales = s
if faceted:
edgecolors = None
else:
edgecolors = 'none'
warnings.warn(
'''replace "faceted=False" with "edgecolors='none'"''',
DeprecationWarning) #2008/04/18
sym = None
symstyle = 0
# to be API compatible
if marker is None and not (verts is None):
marker = (verts, 0)
verts = None
if is_string_like(marker):
# the standard way to define symbols using a string character
sym = syms.get(marker)
if sym is None and verts is None:
raise ValueError('Unknown marker symbol to scatter')
numsides, rotation, symstyle = syms[marker]
elif iterable(marker):
# accept marker to be:
# (numsides, style, [angle])
# or
# (verts[], style, [angle])
if len(marker)<2 or len(marker)>3:
raise ValueError('Cannot create markersymbol from marker')
if cbook.is_numlike(marker[0]):
# (numsides, style, [angle])
if len(marker)==2:
numsides, rotation = marker[0], 0.
elif len(marker)==3:
numsides, rotation = marker[0], marker[2]
sym = True
if marker[1] in (1,2):
symstyle = marker[1]
else:
verts = np.asarray(marker[0])
if sym is not None:
if symstyle==0:
collection = mcoll.RegularPolyCollection(
numsides, rotation, scales,
facecolors = colors,
edgecolors = edgecolors,
linewidths = linewidths,
offsets = zip(x,y),
transOffset = self.transData,
)
elif symstyle==1:
collection = mcoll.StarPolygonCollection(
numsides, rotation, scales,
facecolors = colors,
edgecolors = edgecolors,
linewidths = linewidths,
offsets = zip(x,y),
transOffset = self.transData,
)
elif symstyle==2:
collection = mcoll.AsteriskPolygonCollection(
numsides, rotation, scales,
facecolors = colors,
edgecolors = edgecolors,
linewidths = linewidths,
offsets = zip(x,y),
transOffset = self.transData,
)
elif symstyle==3:
collection = mcoll.CircleCollection(
scales,
facecolors = colors,
edgecolors = edgecolors,
linewidths = linewidths,
offsets = zip(x,y),
transOffset = self.transData,
)
else:
rescale = np.sqrt(max(verts[:,0]**2+verts[:,1]**2))
verts /= rescale
collection = mcoll.PolyCollection(
(verts,), scales,
facecolors = colors,
edgecolors = edgecolors,
linewidths = linewidths,
offsets = zip(x,y),
transOffset = self.transData,
)
collection.set_transform(mtransforms.IdentityTransform())
collection.set_alpha(alpha)
collection.update(kwargs)
if colors is None:
if norm is not None: assert(isinstance(norm, mcolors.Normalize))
if cmap is not None: assert(isinstance(cmap, mcolors.Colormap))
collection.set_array(np.asarray(c))
collection.set_cmap(cmap)
collection.set_norm(norm)
if vmin is not None or vmax is not None:
collection.set_clim(vmin, vmax)
else:
collection.autoscale_None()
temp_x = x
temp_y = y
minx = np.amin(temp_x)
maxx = np.amax(temp_x)
miny = np.amin(temp_y)
maxy = np.amax(temp_y)
w = maxx-minx
h = maxy-miny
# the pad is a little hack to deal with the fact that we don't
# want to transform all the symbols whose scales are in points
# to data coords to get the exact bounding box for efficiency
# reasons. It can be done right if this is deemed important
padx, pady = 0.05*w, 0.05*h
corners = (minx-padx, miny-pady), (maxx+padx, maxy+pady)
self.update_datalim( corners)
self.autoscale_view()
# add the collection last
self.add_collection(collection)
return collection
scatter.__doc__ = cbook.dedent(scatter.__doc__) % martist.kwdocd
def hexbin(self, x, y, C = None, gridsize = 100, bins = None,
xscale = 'linear', yscale = 'linear',
cmap=None, norm=None, vmin=None, vmax=None,
alpha=1.0, linewidths=None, edgecolors='none',
reduce_C_function = np.mean,
**kwargs):
"""
call signature::
hexbin(x, y, C = None, gridsize = 100, bins = None,
xscale = 'linear', yscale = 'linear',
cmap=None, norm=None, vmin=None, vmax=None,
alpha=1.0, linewidths=None, edgecolors='none'
reduce_C_function = np.mean,
**kwargs)
Make a hexagonal binning plot of *x* versus *y*, where *x*,
*y* are 1-D sequences of the same length, *N*. If *C* is None
(the default), this is a histogram of the number of occurences
of the observations at (x[i],y[i]).
If *C* is specified, it specifies values at the coordinate
(x[i],y[i]). These values are accumulated for each hexagonal
bin and then reduced according to *reduce_C_function*, which
defaults to numpy's mean function (np.mean). (If *C* is
specified, it must also be a 1-D sequence of the same length
as *x* and *y*.)
*x*, *y* and/or *C* may be masked arrays, in which case only
unmasked points will be plotted.
Optional keyword arguments:
*gridsize*: [ 100 | integer ]
The number of hexagons in the *x*-direction, default is
100. The corresponding number of hexagons in the
*y*-direction is chosen such that the hexagons are
approximately regular. Alternatively, gridsize can be a
tuple with two elements specifying the number of hexagons
in the *x*-direction and the *y*-direction.
*bins*: [ None | 'log' | integer | sequence ]
If *None*, no binning is applied; the color of each hexagon
directly corresponds to its count value.
If 'log', use a logarithmic scale for the color
map. Internally, :math:`log_{10}(i+1)` is used to
determine the hexagon color.
If an integer, divide the counts in the specified number
of bins, and color the hexagons accordingly.
If a sequence of values, the values of the lower bound of
the bins to be used.
*xscale*: [ 'linear' | 'log' ]
Use a linear or log10 scale on the horizontal axis.
*scale*: [ 'linear' | 'log' ]
Use a linear or log10 scale on the vertical axis.
Other keyword arguments controlling color mapping and normalization
arguments:
*cmap*: [ None | Colormap ]
a :class:`matplotlib.cm.Colormap` instance. If *None*,
defaults to rc ``image.cmap``.
*norm*: [ None | Normalize ]
:class:`matplotlib.colors.Normalize` instance is used to
scale luminance data to 0,1.
*vmin*/*vmax*: scalar
*vmin* and *vmax* are used in conjunction with *norm* to normalize
luminance data. If either are *None*, the min and max of the color
array *C* is used. Note if you pass a norm instance, your settings
for *vmin* and *vmax* will be ignored.
*alpha*: scalar
the alpha value for the patches
*linewidths*: [ None | scalar ]
If *None*, defaults to rc lines.linewidth. Note that this
is a tuple, and if you set the linewidths argument you
must set it as a sequence of floats, as required by
:class:`~matplotlib.collections.RegularPolyCollection`.
Other keyword arguments controlling the Collection properties:
*edgecolors*: [ None | mpl color | color sequence ]
If 'none', draws the edges in the same color as the fill color.
This is the default, as it avoids unsightly unpainted pixels
between the hexagons.
If *None*, draws the outlines in the default color.
If a matplotlib color arg or sequence of rgba tuples, draws the
outlines in the specified color.
Here are the standard descriptions of all the
:class:`~matplotlib.collections.Collection` kwargs:
%(Collection)s
The return value is a
:class:`~matplotlib.collections.PolyCollection` instance; use
:meth:`~matplotlib.collection.PolyCollection.get_array` on
this :class:`~matplotlib.collections.PolyCollection` to get
the counts in each hexagon.
**Example:**
.. plot:: mpl_examples/pylab_examples/hexbin_demo.py
"""
if not self._hold: self.cla()
self._process_unit_info(xdata=x, ydata=y, kwargs=kwargs)
x, y, C = cbook.delete_masked_points(x, y, C)
# Set the size of the hexagon grid
if iterable(gridsize):
nx, ny = gridsize
else:
nx = gridsize
ny = int(nx/math.sqrt(3))
# Count the number of data in each hexagon
x = np.array(x, float)
y = np.array(y, float)
if xscale=='log':
x = np.log10(x)
if yscale=='log':
y = np.log10(y)
xmin = np.amin(x)
xmax = np.amax(x)
ymin = np.amin(y)
ymax = np.amax(y)
# In the x-direction, the hexagons exactly cover the region from
# xmin to xmax. Need some padding to avoid roundoff errors.
padding = 1.e-9 * (xmax - xmin)
xmin -= padding
xmax += padding
sx = (xmax-xmin) / nx
sy = (ymax-ymin) / ny
x = (x-xmin)/sx
y = (y-ymin)/sy
ix1 = np.round(x).astype(int)
iy1 = np.round(y).astype(int)
ix2 = np.floor(x).astype(int)
iy2 = np.floor(y).astype(int)
nx1 = nx + 1
ny1 = ny + 1
nx2 = nx
ny2 = ny
n = nx1*ny1+nx2*ny2
d1 = (x-ix1)**2 + 3.0 * (y-iy1)**2
d2 = (x-ix2-0.5)**2 + 3.0 * (y-iy2-0.5)**2
bdist = (d1<d2)
if C is None:
accum = np.zeros(n)
# Create appropriate views into "accum" array.
lattice1 = accum[:nx1*ny1]
lattice2 = accum[nx1*ny1:]
lattice1.shape = (nx1,ny1)
lattice2.shape = (nx2,ny2)
for i in xrange(len(x)):
if bdist[i]:
lattice1[ix1[i], iy1[i]]+=1
else:
lattice2[ix2[i], iy2[i]]+=1
else:
# create accumulation arrays
lattice1 = np.empty((nx1,ny1),dtype=object)
for i in xrange(nx1):
for j in xrange(ny1):
lattice1[i,j] = []
lattice2 = np.empty((nx2,ny2),dtype=object)
for i in xrange(nx2):
for j in xrange(ny2):
lattice2[i,j] = []
for i in xrange(len(x)):
if bdist[i]:
lattice1[ix1[i], iy1[i]].append( C[i] )
else:
lattice2[ix2[i], iy2[i]].append( C[i] )
for i in xrange(nx1):
for j in xrange(ny1):
vals = lattice1[i,j]
if len(vals):
lattice1[i,j] = reduce_C_function( vals )
else:
lattice1[i,j] = np.nan
for i in xrange(nx2):
for j in xrange(ny2):
vals = lattice2[i,j]
if len(vals):
lattice2[i,j] = reduce_C_function( vals )
else:
lattice2[i,j] = np.nan
accum = np.hstack((
lattice1.astype(float).ravel(), lattice2.astype(float).ravel()))
good_idxs = ~np.isnan(accum)
px = xmin + sx * np.array([ 0.5, 0.5, 0.0, -0.5, -0.5, 0.0])
py = ymin + sy * np.array([-0.5, 0.5, 1.0, 0.5, -0.5, -1.0]) / 3.0
polygons = np.zeros((6, n, 2), float)
polygons[:,:nx1*ny1,0] = np.repeat(np.arange(nx1), ny1)
polygons[:,:nx1*ny1,1] = np.tile(np.arange(ny1), nx1)
polygons[:,nx1*ny1:,0] = np.repeat(np.arange(nx2) + 0.5, ny2)
polygons[:,nx1*ny1:,1] = np.tile(np.arange(ny2), nx2) + 0.5
if C is not None:
# remove accumulation bins with no data
polygons = polygons[:,good_idxs,:]
accum = accum[good_idxs]
polygons = np.transpose(polygons, axes=[1,0,2])
polygons[:,:,0] *= sx
polygons[:,:,1] *= sy
polygons[:,:,0] += px
polygons[:,:,1] += py
if xscale=='log':
polygons[:,:,0] = 10**(polygons[:,:,0])
xmin = 10**xmin
xmax = 10**xmax
self.set_xscale('log')
if yscale=='log':
polygons[:,:,1] = 10**(polygons[:,:,1])
ymin = 10**ymin
ymax = 10**ymax
self.set_yscale('log')
if edgecolors=='none':
edgecolors = 'face'
collection = mcoll.PolyCollection(
polygons,
edgecolors = edgecolors,
linewidths = linewidths,
transOffset = self.transData,
)
# Transform accum if needed
if bins=='log':
accum = np.log10(accum+1)
elif bins!=None:
if not iterable(bins):
minimum, maximum = min(accum), max(accum)
bins-=1 # one less edge than bins
bins = minimum + (maximum-minimum)*np.arange(bins)/bins
bins = np.sort(bins)
accum = bins.searchsorted(accum)
if norm is not None: assert(isinstance(norm, mcolors.Normalize))
if cmap is not None: assert(isinstance(cmap, mcolors.Colormap))
collection.set_array(accum)
collection.set_cmap(cmap)
collection.set_norm(norm)
collection.set_alpha(alpha)
collection.update(kwargs)
if vmin is not None or vmax is not None:
collection.set_clim(vmin, vmax)
else:
collection.autoscale_None()
corners = ((xmin, ymin), (xmax, ymax))
self.update_datalim( corners)
self.autoscale_view()
# add the collection last
self.add_collection(collection)
return collection
hexbin.__doc__ = cbook.dedent(hexbin.__doc__) % martist.kwdocd
def arrow(self, x, y, dx, dy, **kwargs):
"""
call signature::
arrow(x, y, dx, dy, **kwargs)
Draws arrow on specified axis from (*x*, *y*) to (*x* + *dx*,
*y* + *dy*).
Optional kwargs control the arrow properties:
%(FancyArrow)s
**Example:**
.. plot:: mpl_examples/pylab_examples/arrow_demo.py
"""
a = mpatches.FancyArrow(x, y, dx, dy, **kwargs)
self.add_artist(a)
return a
arrow.__doc__ = cbook.dedent(arrow.__doc__) % martist.kwdocd
def quiverkey(self, *args, **kw):
qk = mquiver.QuiverKey(*args, **kw)
self.add_artist(qk)
return qk
quiverkey.__doc__ = mquiver.QuiverKey.quiverkey_doc
def quiver(self, *args, **kw):
if not self._hold: self.cla()
q = mquiver.Quiver(self, *args, **kw)
self.add_collection(q, False)
self.update_datalim(q.XY)
self.autoscale_view()
return q
quiver.__doc__ = mquiver.Quiver.quiver_doc
def barbs(self, *args, **kw):
"""
%(barbs_doc)s
**Example:**
.. plot:: mpl_examples/pylab_examples/barb_demo.py
"""
if not self._hold: self.cla()
b = mquiver.Barbs(self, *args, **kw)
self.add_collection(b)
self.update_datalim(b.get_offsets())
self.autoscale_view()
return b
barbs.__doc__ = cbook.dedent(barbs.__doc__) % {
'barbs_doc': mquiver.Barbs.barbs_doc}
def fill(self, *args, **kwargs):
"""
call signature::
fill(*args, **kwargs)
Plot filled polygons. *args* is a variable length argument,
allowing for multiple *x*, *y* pairs with an optional color
format string; see :func:`~matplotlib.pyplot.plot` for details
on the argument parsing. For example, to plot a polygon with
vertices at *x*, *y* in blue.::
ax.fill(x,y, 'b' )
An arbitrary number of *x*, *y*, *color* groups can be specified::
ax.fill(x1, y1, 'g', x2, y2, 'r')
Return value is a list of :class:`~matplotlib.patches.Patch`
instances that were added.
The same color strings that :func:`~matplotlib.pyplot.plot`
supports are supported by the fill format string.
If you would like to fill below a curve, eg. shade a region
between 0 and *y* along *x*, use :meth:`fill_between`
The *closed* kwarg will close the polygon when *True* (default).
kwargs control the Polygon properties:
%(Polygon)s
**Example:**
.. plot:: mpl_examples/pylab_examples/fill_demo.py
"""
if not self._hold: self.cla()
patches = []
for poly in self._get_patches_for_fill(*args, **kwargs):
self.add_patch( poly )
patches.append( poly )
self.autoscale_view()
return patches
fill.__doc__ = cbook.dedent(fill.__doc__) % martist.kwdocd
def fill_between(self, x, y1, y2=0, where=None, **kwargs):
"""
call signature::
fill_between(x, y1, y2=0, where=None, **kwargs)
Create a :class:`~matplotlib.collections.PolyCollection`
filling the regions between *y1* and *y2* where
``where==True``
*x*
an N length np array of the x data
*y1*
an N length scalar or np array of the x data
*y2*
an N length scalar or np array of the x data
*where*
if None, default to fill between everywhere. If not None,
it is a a N length numpy boolean array and the fill will
only happen over the regions where ``where==True``
*kwargs*
keyword args passed on to the :class:`PolyCollection`
kwargs control the Polygon properties:
%(PolyCollection)s
.. plot:: mpl_examples/pylab_examples/fill_between.py
"""
# Handle united data, such as dates
self._process_unit_info(xdata=x, ydata=y1, kwargs=kwargs)
self._process_unit_info(ydata=y2)
# Convert the arrays so we can work with them
x = np.asarray(self.convert_xunits(x))
y1 = np.asarray(self.convert_yunits(y1))
y2 = np.asarray(self.convert_yunits(y2))
if not cbook.iterable(y1):
y1 = np.ones_like(x)*y1
if not cbook.iterable(y2):
y2 = np.ones_like(x)*y2
if where is None:
where = np.ones(len(x), np.bool)
where = np.asarray(where)
assert( (len(x)==len(y1)) and (len(x)==len(y2)) and len(x)==len(where))
polys = []
for ind0, ind1 in mlab.contiguous_regions(where):
theseverts = []
xslice = x[ind0:ind1]
y1slice = y1[ind0:ind1]
y2slice = y2[ind0:ind1]
if not len(xslice):
continue
N = len(xslice)
X = np.zeros((2*N+2, 2), np.float)
# the purpose of the next two lines is for when y2 is a
# scalar like 0 and we want the fill to go all the way
# down to 0 even if none of the y1 sample points do
X[0] = xslice[0], y2slice[0]
X[N+1] = xslice[-1], y2slice[-1]
X[1:N+1,0] = xslice
X[1:N+1,1] = y1slice
X[N+2:,0] = xslice[::-1]
X[N+2:,1] = y2slice[::-1]
polys.append(X)
collection = mcoll.PolyCollection(polys, **kwargs)
# now update the datalim and autoscale
XY1 = np.array([x[where], y1[where]]).T
XY2 = np.array([x[where], y2[where]]).T
self.dataLim.update_from_data_xy(XY1, self.ignore_existing_data_limits,
updatex=True, updatey=True)
self.dataLim.update_from_data_xy(XY2, self.ignore_existing_data_limits,
updatex=False, updatey=True)
self.add_collection(collection)
self.autoscale_view()
return collection
fill_between.__doc__ = cbook.dedent(fill_between.__doc__) % martist.kwdocd
#### plotting z(x,y): imshow, pcolor and relatives, contour
def imshow(self, X, cmap=None, norm=None, aspect=None,
interpolation=None, alpha=1.0, vmin=None, vmax=None,
origin=None, extent=None, shape=None, filternorm=1,
filterrad=4.0, imlim=None, resample=None, url=None, **kwargs):
"""
call signature::
imshow(X, cmap=None, norm=None, aspect=None, interpolation=None,
alpha=1.0, vmin=None, vmax=None, origin=None, extent=None,
**kwargs)
Display the image in *X* to current axes. *X* may be a float
array, a uint8 array or a PIL image. If *X* is an array, *X*
can have the following shapes:
* MxN -- luminance (grayscale, float array only)
* MxNx3 -- RGB (float or uint8 array)
* MxNx4 -- RGBA (float or uint8 array)
The value for each component of MxNx3 and MxNx4 float arrays should be
in the range 0.0 to 1.0; MxN float arrays may be normalised.
An :class:`matplotlib.image.AxesImage` instance is returned.
Keyword arguments:
*cmap*: [ None | Colormap ]
A :class:`matplotlib.cm.Colormap` instance, eg. cm.jet.
If *None*, default to rc ``image.cmap`` value.
*cmap* is ignored when *X* has RGB(A) information
*aspect*: [ None | 'auto' | 'equal' | scalar ]
If 'auto', changes the image aspect ratio to match that of the axes
If 'equal', and *extent* is *None*, changes the axes
aspect ratio to match that of the image. If *extent* is
not *None*, the axes aspect ratio is changed to match that
of the extent.
If *None*, default to rc ``image.aspect`` value.
*interpolation*:
Acceptable values are *None*, 'nearest', 'bilinear',
'bicubic', 'spline16', 'spline36', 'hanning', 'hamming',
'hermite', 'kaiser', 'quadric', 'catrom', 'gaussian',
'bessel', 'mitchell', 'sinc', 'lanczos',
If *interpolation* is *None*, default to rc
``image.interpolation``. See also the *filternorm* and
*filterrad* parameters
*norm*: [ None | Normalize ]
An :class:`matplotlib.colors.Normalize` instance; if
*None*, default is ``normalization()``. This scales
luminance -> 0-1
*norm* is only used for an MxN float array.
*vmin*/*vmax*: [ None | scalar ]
Used to scale a luminance image to 0-1. If either is
*None*, the min and max of the luminance values will be
used. Note if *norm* is not *None*, the settings for
*vmin* and *vmax* will be ignored.
*alpha*: scalar
The alpha blending value, between 0 (transparent) and 1 (opaque)
*origin*: [ None | 'upper' | 'lower' ]
Place the [0,0] index of the array in the upper left or lower left
corner of the axes. If *None*, default to rc ``image.origin``.
*extent*: [ None | scalars (left, right, bottom, top) ]
Eata values of the axes. The default assigns zero-based row,
column indices to the *x*, *y* centers of the pixels.
*shape*: [ None | scalars (columns, rows) ]
For raw buffer images
*filternorm*:
A parameter for the antigrain image resize filter. From the
antigrain documentation, if *filternorm* = 1, the filter normalizes
integer values and corrects the rounding errors. It doesn't do
anything with the source floating point values, it corrects only
integers according to the rule of 1.0 which means that any sum of
pixel weights must be equal to 1.0. So, the filter function must
produce a graph of the proper shape.
*filterrad*:
The filter radius for filters that have a radius
parameter, i.e. when interpolation is one of: 'sinc',
'lanczos' or 'blackman'
Additional kwargs are :class:`~matplotlib.artist.Artist` properties:
%(Artist)s
**Example:**
.. plot:: mpl_examples/pylab_examples/image_demo.py
"""
if not self._hold: self.cla()
if norm is not None: assert(isinstance(norm, mcolors.Normalize))
if cmap is not None: assert(isinstance(cmap, mcolors.Colormap))
if aspect is None: aspect = rcParams['image.aspect']
self.set_aspect(aspect)
im = mimage.AxesImage(self, cmap, norm, interpolation, origin, extent,
filternorm=filternorm,
filterrad=filterrad, resample=resample, **kwargs)
im.set_data(X)
im.set_alpha(alpha)
self._set_artist_props(im)
im.set_clip_path(self.patch)
#if norm is None and shape is None:
# im.set_clim(vmin, vmax)
if vmin is not None or vmax is not None:
im.set_clim(vmin, vmax)
else:
im.autoscale_None()
im.set_url(url)
xmin, xmax, ymin, ymax = im.get_extent()
corners = (xmin, ymin), (xmax, ymax)
self.update_datalim(corners)
if self._autoscaleon:
self.set_xlim((xmin, xmax))
self.set_ylim((ymin, ymax))
self.images.append(im)
return im
imshow.__doc__ = cbook.dedent(imshow.__doc__) % martist.kwdocd
def _pcolorargs(self, funcname, *args):
if len(args)==1:
C = args[0]
numRows, numCols = C.shape
X, Y = np.meshgrid(np.arange(numCols+1), np.arange(numRows+1) )
elif len(args)==3:
X, Y, C = args
else:
raise TypeError(
'Illegal arguments to %s; see help(%s)' % (funcname, funcname))
Nx = X.shape[-1]
Ny = Y.shape[0]
if len(X.shape) <> 2 or X.shape[0] == 1:
x = X.reshape(1,Nx)
X = x.repeat(Ny, axis=0)
if len(Y.shape) <> 2 or Y.shape[1] == 1:
y = Y.reshape(Ny, 1)
Y = y.repeat(Nx, axis=1)
if X.shape != Y.shape:
raise TypeError(
'Incompatible X, Y inputs to %s; see help(%s)' % (
funcname, funcname))
return X, Y, C
def pcolor(self, *args, **kwargs):
"""
call signatures::
pcolor(C, **kwargs)
pcolor(X, Y, C, **kwargs)
Create a pseudocolor plot of a 2-D array.
*C* is the array of color values.
*X* and *Y*, if given, specify the (*x*, *y*) coordinates of
the colored quadrilaterals; the quadrilateral for C[i,j] has
corners at::
(X[i, j], Y[i, j]),
(X[i, j+1], Y[i, j+1]),
(X[i+1, j], Y[i+1, j]),
(X[i+1, j+1], Y[i+1, j+1]).
Ideally the dimensions of *X* and *Y* should be one greater
than those of *C*; if the dimensions are the same, then the
last row and column of *C* will be ignored.
Note that the the column index corresponds to the
*x*-coordinate, and the row index corresponds to *y*; for
details, see the :ref:`Grid Orientation
<axes-pcolor-grid-orientation>` section below.
If either or both of *X* and *Y* are 1-D arrays or column vectors,
they will be expanded as needed into the appropriate 2-D arrays,
making a rectangular grid.
*X*, *Y* and *C* may be masked arrays. If either C[i, j], or one
of the vertices surrounding C[i,j] (*X* or *Y* at [i, j], [i+1, j],
[i, j+1],[i+1, j+1]) is masked, nothing is plotted.
Keyword arguments:
*cmap*: [ None | Colormap ]
A :class:`matplotlib.cm.Colormap` instance. If *None*, use
rc settings.
norm: [ None | Normalize ]
An :class:`matplotlib.colors.Normalize` instance is used
to scale luminance data to 0,1. If *None*, defaults to
:func:`normalize`.
*vmin*/*vmax*: [ None | scalar ]
*vmin* and *vmax* are used in conjunction with *norm* to
normalize luminance data. If either are *None*, the min
and max of the color array *C* is used. If you pass a
*norm* instance, *vmin* and *vmax* will be ignored.
*shading*: [ 'flat' | 'faceted' ]
If 'faceted', a black grid is drawn around each rectangle; if
'flat', edges are not drawn. Default is 'flat', contrary to
Matlab(TM).
This kwarg is deprecated; please use 'edgecolors' instead:
* shading='flat' -- edgecolors='None'
* shading='faceted -- edgecolors='k'
*edgecolors*: [ None | 'None' | color | color sequence]
If *None*, the rc setting is used by default.
If 'None', edges will not be visible.
An mpl color or sequence of colors will set the edge color
*alpha*: 0 <= scalar <= 1
the alpha blending value
Return value is a :class:`matplotlib.collection.Collection`
instance.
.. _axes-pcolor-grid-orientation:
The grid orientation follows the Matlab(TM) convention: an
array *C* with shape (*nrows*, *ncolumns*) is plotted with
the column number as *X* and the row number as *Y*, increasing
up; hence it is plotted the way the array would be printed,
except that the *Y* axis is reversed. That is, *C* is taken
as *C*(*y*, *x*).
Similarly for :func:`~matplotlib.pyplot.meshgrid`::
x = np.arange(5)
y = np.arange(3)
X, Y = meshgrid(x,y)
is equivalent to:
X = array([[0, 1, 2, 3, 4],
[0, 1, 2, 3, 4],
[0, 1, 2, 3, 4]])
Y = array([[0, 0, 0, 0, 0],
[1, 1, 1, 1, 1],
[2, 2, 2, 2, 2]])
so if you have::
C = rand( len(x), len(y))
then you need::
pcolor(X, Y, C.T)
or::
pcolor(C.T)
Matlab :func:`pcolor` always discards the last row and column
of *C*, but matplotlib displays the last row and column if *X* and
*Y* are not specified, or if *X* and *Y* have one more row and
column than *C*.
kwargs can be used to control the
:class:`~matplotlib.collection.PolyCollection` properties:
%(PolyCollection)s
"""
if not self._hold: self.cla()
alpha = kwargs.pop('alpha', 1.0)
norm = kwargs.pop('norm', None)
cmap = kwargs.pop('cmap', None)
vmin = kwargs.pop('vmin', None)
vmax = kwargs.pop('vmax', None)
shading = kwargs.pop('shading', 'flat')
X, Y, C = self._pcolorargs('pcolor', *args)
Ny, Nx = X.shape
# convert to MA, if necessary.
C = ma.asarray(C)
X = ma.asarray(X)
Y = ma.asarray(Y)
mask = ma.getmaskarray(X)+ma.getmaskarray(Y)
xymask = mask[0:-1,0:-1]+mask[1:,1:]+mask[0:-1,1:]+mask[1:,0:-1]
# don't plot if C or any of the surrounding vertices are masked.
mask = ma.getmaskarray(C)[0:Ny-1,0:Nx-1]+xymask
newaxis = np.newaxis
compress = np.compress
ravelmask = (mask==0).ravel()
X1 = compress(ravelmask, ma.filled(X[0:-1,0:-1]).ravel())
Y1 = compress(ravelmask, ma.filled(Y[0:-1,0:-1]).ravel())
X2 = compress(ravelmask, ma.filled(X[1:,0:-1]).ravel())
Y2 = compress(ravelmask, ma.filled(Y[1:,0:-1]).ravel())
X3 = compress(ravelmask, ma.filled(X[1:,1:]).ravel())
Y3 = compress(ravelmask, ma.filled(Y[1:,1:]).ravel())
X4 = compress(ravelmask, ma.filled(X[0:-1,1:]).ravel())
Y4 = compress(ravelmask, ma.filled(Y[0:-1,1:]).ravel())
npoly = len(X1)
xy = np.concatenate((X1[:,newaxis], Y1[:,newaxis],
X2[:,newaxis], Y2[:,newaxis],
X3[:,newaxis], Y3[:,newaxis],
X4[:,newaxis], Y4[:,newaxis],
X1[:,newaxis], Y1[:,newaxis]),
axis=1)
verts = xy.reshape((npoly, 5, 2))
#verts = zip(zip(X1,Y1),zip(X2,Y2),zip(X3,Y3),zip(X4,Y4))
C = compress(ravelmask, ma.filled(C[0:Ny-1,0:Nx-1]).ravel())
if shading == 'faceted':
edgecolors = (0,0,0,1),
linewidths = (0.25,)
else:
edgecolors = 'face'
linewidths = (1.0,)
kwargs.setdefault('edgecolors', edgecolors)
kwargs.setdefault('antialiaseds', (0,))
kwargs.setdefault('linewidths', linewidths)
collection = mcoll.PolyCollection(verts, **kwargs)
collection.set_alpha(alpha)
collection.set_array(C)
if norm is not None: assert(isinstance(norm, mcolors.Normalize))
if cmap is not None: assert(isinstance(cmap, mcolors.Colormap))
collection.set_cmap(cmap)
collection.set_norm(norm)
if vmin is not None or vmax is not None:
collection.set_clim(vmin, vmax)
else:
collection.autoscale_None()
self.grid(False)
x = X.compressed()
y = Y.compressed()
minx = np.amin(x)
maxx = np.amax(x)
miny = np.amin(y)
maxy = np.amax(y)
corners = (minx, miny), (maxx, maxy)
self.update_datalim( corners)
self.autoscale_view()
self.add_collection(collection)
return collection
pcolor.__doc__ = cbook.dedent(pcolor.__doc__) % martist.kwdocd
def pcolormesh(self, *args, **kwargs):
"""
call signatures::
pcolormesh(C)
pcolormesh(X, Y, C)
pcolormesh(C, **kwargs)
*C* may be a masked array, but *X* and *Y* may not. Masked
array support is implemented via *cmap* and *norm*; in
contrast, :func:`~matplotlib.pyplot.pcolor` simply does not
draw quadrilaterals with masked colors or vertices.
Keyword arguments:
*cmap*: [ None | Colormap ]
A :class:`matplotlib.cm.Colormap` instance. If None, use
rc settings.
*norm*: [ None | Normalize ]
A :class:`matplotlib.colors.Normalize` instance is used to
scale luminance data to 0,1. If None, defaults to
:func:`normalize`.
*vmin*/*vmax*: [ None | scalar ]
*vmin* and *vmax* are used in conjunction with *norm* to
normalize luminance data. If either are *None*, the min
and max of the color array *C* is used. If you pass a
*norm* instance, *vmin* and *vmax* will be ignored.
*shading*: [ 'flat' | 'faceted' ]
If 'faceted', a black grid is drawn around each rectangle; if
'flat', edges are not drawn. Default is 'flat', contrary to
Matlab(TM).
This kwarg is deprecated; please use 'edgecolors' instead:
* shading='flat' -- edgecolors='None'
* shading='faceted -- edgecolors='k'
*edgecolors*: [ None | 'None' | color | color sequence]
If None, the rc setting is used by default.
If 'None', edges will not be visible.
An mpl color or sequence of colors will set the edge color
*alpha*: 0 <= scalar <= 1
the alpha blending value
Return value is a :class:`matplotlib.collection.QuadMesh`
object.
kwargs can be used to control the
:class:`matplotlib.collections.QuadMesh`
properties:
%(QuadMesh)s
.. seealso::
:func:`~matplotlib.pyplot.pcolor`:
For an explanation of the grid orientation and the
expansion of 1-D *X* and/or *Y* to 2-D arrays.
"""
if not self._hold: self.cla()
alpha = kwargs.pop('alpha', 1.0)
norm = kwargs.pop('norm', None)
cmap = kwargs.pop('cmap', None)
vmin = kwargs.pop('vmin', None)
vmax = kwargs.pop('vmax', None)
shading = kwargs.pop('shading', 'flat')
edgecolors = kwargs.pop('edgecolors', 'None')
antialiased = kwargs.pop('antialiased', False)
X, Y, C = self._pcolorargs('pcolormesh', *args)
Ny, Nx = X.shape
# convert to one dimensional arrays
C = ma.ravel(C[0:Ny-1, 0:Nx-1]) # data point in each cell is value at
# lower left corner
X = X.ravel()
Y = Y.ravel()
coords = np.zeros(((Nx * Ny), 2), dtype=float)
coords[:, 0] = X
coords[:, 1] = Y
if shading == 'faceted' or edgecolors != 'None':
showedges = 1
else:
showedges = 0
collection = mcoll.QuadMesh(
Nx - 1, Ny - 1, coords, showedges,
antialiased=antialiased) # kwargs are not used
collection.set_alpha(alpha)
collection.set_array(C)
if norm is not None: assert(isinstance(norm, mcolors.Normalize))
if cmap is not None: assert(isinstance(cmap, mcolors.Colormap))
collection.set_cmap(cmap)
collection.set_norm(norm)
if vmin is not None or vmax is not None:
collection.set_clim(vmin, vmax)
else:
collection.autoscale_None()
self.grid(False)
minx = np.amin(X)
maxx = np.amax(X)
miny = np.amin(Y)
maxy = np.amax(Y)
corners = (minx, miny), (maxx, maxy)
self.update_datalim( corners)
self.autoscale_view()
self.add_collection(collection)
return collection
pcolormesh.__doc__ = cbook.dedent(pcolormesh.__doc__) % martist.kwdocd
def pcolorfast(self, *args, **kwargs):
"""
pseudocolor plot of a 2-D array
Experimental; this is a version of pcolor that
does not draw lines, that provides the fastest
possible rendering with the Agg backend, and that
can handle any quadrilateral grid.
Call signatures::
pcolor(C, **kwargs)
pcolor(xr, yr, C, **kwargs)
pcolor(x, y, C, **kwargs)
pcolor(X, Y, C, **kwargs)
C is the 2D array of color values corresponding to quadrilateral
cells. Let (nr, nc) be its shape. C may be a masked array.
``pcolor(C, **kwargs)`` is equivalent to
``pcolor([0,nc], [0,nr], C, **kwargs)``
*xr*, *yr* specify the ranges of *x* and *y* corresponding to the
rectangular region bounding *C*. If::
xr = [x0, x1]
and::
yr = [y0,y1]
then *x* goes from *x0* to *x1* as the second index of *C* goes
from 0 to *nc*, etc. (*x0*, *y0*) is the outermost corner of
cell (0,0), and (*x1*, *y1*) is the outermost corner of cell
(*nr*-1, *nc*-1). All cells are rectangles of the same size.
This is the fastest version.
*x*, *y* are 1D arrays of length *nc* +1 and *nr* +1, respectively,
giving the x and y boundaries of the cells. Hence the cells are
rectangular but the grid may be nonuniform. The speed is
intermediate. (The grid is checked, and if found to be
uniform the fast version is used.)
*X* and *Y* are 2D arrays with shape (*nr* +1, *nc* +1) that specify
the (x,y) coordinates of the corners of the colored
quadrilaterals; the quadrilateral for C[i,j] has corners at
(X[i,j],Y[i,j]), (X[i,j+1],Y[i,j+1]), (X[i+1,j],Y[i+1,j]),
(X[i+1,j+1],Y[i+1,j+1]). The cells need not be rectangular.
This is the most general, but the slowest to render. It may
produce faster and more compact output using ps, pdf, and
svg backends, however.
Note that the the column index corresponds to the x-coordinate,
and the row index corresponds to y; for details, see
the "Grid Orientation" section below.
Optional keyword arguments:
*cmap*: [ None | Colormap ]
A cm Colormap instance from cm. If None, use rc settings.
*norm*: [ None | Normalize ]
An mcolors.Normalize instance is used to scale luminance data to
0,1. If None, defaults to normalize()
*vmin*/*vmax*: [ None | scalar ]
*vmin* and *vmax* are used in conjunction with norm to normalize
luminance data. If either are *None*, the min and max of the color
array *C* is used. If you pass a norm instance, *vmin* and *vmax*
will be *None*.
*alpha*: 0 <= scalar <= 1
the alpha blending value
Return value is an image if a regular or rectangular grid
is specified, and a QuadMesh collection in the general
quadrilateral case.
"""
if not self._hold: self.cla()
alpha = kwargs.pop('alpha', 1.0)
norm = kwargs.pop('norm', None)
cmap = kwargs.pop('cmap', None)
vmin = kwargs.pop('vmin', None)
vmax = kwargs.pop('vmax', None)
if norm is not None: assert(isinstance(norm, mcolors.Normalize))
if cmap is not None: assert(isinstance(cmap, mcolors.Colormap))
C = args[-1]
nr, nc = C.shape
if len(args) == 1:
style = "image"
x = [0, nc]
y = [0, nr]
elif len(args) == 3:
x, y = args[:2]
x = np.asarray(x)
y = np.asarray(y)
if x.ndim == 1 and y.ndim == 1:
if x.size == 2 and y.size == 2:
style = "image"
else:
dx = np.diff(x)
dy = np.diff(y)
if (np.ptp(dx) < 0.01*np.abs(dx.mean()) and
np.ptp(dy) < 0.01*np.abs(dy.mean())):
style = "image"
else:
style = "pcolorimage"
elif x.ndim == 2 and y.ndim == 2:
style = "quadmesh"
else:
raise TypeError("arguments do not match valid signatures")
else:
raise TypeError("need 1 argument or 3 arguments")
if style == "quadmesh":
# convert to one dimensional arrays
# This should also be moved to the QuadMesh class
C = ma.ravel(C) # data point in each cell is value
# at lower left corner
X = x.ravel()
Y = y.ravel()
Nx = nc+1
Ny = nr+1
# The following needs to be cleaned up; the renderer
# requires separate contiguous arrays for X and Y,
# but the QuadMesh class requires the 2D array.
coords = np.empty(((Nx * Ny), 2), np.float64)
coords[:, 0] = X
coords[:, 1] = Y
# The QuadMesh class can also be changed to
# handle relevant superclass kwargs; the initializer
# should do much more than it does now.
collection = mcoll.QuadMesh(nc, nr, coords, 0)
collection.set_alpha(alpha)
collection.set_array(C)
collection.set_cmap(cmap)
collection.set_norm(norm)
self.add_collection(collection)
xl, xr, yb, yt = X.min(), X.max(), Y.min(), Y.max()
ret = collection
else:
# One of the image styles:
xl, xr, yb, yt = x[0], x[-1], y[0], y[-1]
if style == "image":
im = mimage.AxesImage(self, cmap, norm,
interpolation='nearest',
origin='lower',
extent=(xl, xr, yb, yt),
**kwargs)
im.set_data(C)
im.set_alpha(alpha)
self.images.append(im)
ret = im
if style == "pcolorimage":
im = mimage.PcolorImage(self, x, y, C,
cmap=cmap,
norm=norm,
alpha=alpha,
**kwargs)
self.images.append(im)
ret = im
self._set_artist_props(ret)
if vmin is not None or vmax is not None:
ret.set_clim(vmin, vmax)
else:
ret.autoscale_None()
self.update_datalim(np.array([[xl, yb], [xr, yt]]))
self.autoscale_view(tight=True)
return ret
def contour(self, *args, **kwargs):
if not self._hold: self.cla()
kwargs['filled'] = False
return mcontour.ContourSet(self, *args, **kwargs)
contour.__doc__ = mcontour.ContourSet.contour_doc
def contourf(self, *args, **kwargs):
if not self._hold: self.cla()
kwargs['filled'] = True
return mcontour.ContourSet(self, *args, **kwargs)
contourf.__doc__ = mcontour.ContourSet.contour_doc
def clabel(self, CS, *args, **kwargs):
return CS.clabel(*args, **kwargs)
clabel.__doc__ = mcontour.ContourSet.clabel.__doc__
def table(self, **kwargs):
"""
call signature::
table(cellText=None, cellColours=None,
cellLoc='right', colWidths=None,
rowLabels=None, rowColours=None, rowLoc='left',
colLabels=None, colColours=None, colLoc='center',
loc='bottom', bbox=None):
Add a table to the current axes. Returns a
:class:`matplotlib.table.Table` instance. For finer grained
control over tables, use the :class:`~matplotlib.table.Table`
class and add it to the axes with
:meth:`~matplotlib.axes.Axes.add_table`.
Thanks to John Gill for providing the class and table.
kwargs control the :class:`~matplotlib.table.Table`
properties:
%(Table)s
"""
return mtable.table(self, **kwargs)
table.__doc__ = cbook.dedent(table.__doc__) % martist.kwdocd
def twinx(self):
"""
call signature::
ax = twinx()
create a twin of Axes for generating a plot with a sharex
x-axis but independent y axis. The y-axis of self will have
ticks on left and the returned axes will have ticks on the
right
"""
ax2 = self.figure.add_axes(self.get_position(True), sharex=self,
frameon=False)
ax2.yaxis.tick_right()
ax2.yaxis.set_label_position('right')
self.yaxis.tick_left()
return ax2
def twiny(self):
"""
call signature::
ax = twiny()
create a twin of Axes for generating a plot with a shared
y-axis but independent x axis. The x-axis of self will have
ticks on bottom and the returned axes will have ticks on the
top
"""
ax2 = self.figure.add_axes(self.get_position(True), sharey=self,
frameon=False)
ax2.xaxis.tick_top()
ax2.xaxis.set_label_position('top')
self.xaxis.tick_bottom()
return ax2
def get_shared_x_axes(self):
'Return a copy of the shared axes Grouper object for x axes'
return self._shared_x_axes
def get_shared_y_axes(self):
'Return a copy of the shared axes Grouper object for y axes'
return self._shared_y_axes
#### Data analysis
def hist(self, x, bins=10, range=None, normed=False, cumulative=False,
bottom=None, histtype='bar', align='mid',
orientation='vertical', rwidth=None, log=False, **kwargs):
"""
call signature::
hist(x, bins=10, range=None, normed=False, cumulative=False,
bottom=None, histtype='bar', align='mid',
orientation='vertical', rwidth=None, log=False, **kwargs)
Compute and draw the histogram of *x*. The return value is a
tuple (*n*, *bins*, *patches*) or ([*n0*, *n1*, ...], *bins*,
[*patches0*, *patches1*,...]) if the input contains multiple
data.
Keyword arguments:
*bins*:
Either an integer number of bins or a sequence giving the
bins. *x* are the data to be binned. *x* can be an array,
a 2D array with multiple data in its columns, or a list of
arrays with data of different length. Note, if *bins*
is an integer input argument=numbins, *bins* + 1 bin edges
will be returned, compatible with the semantics of
:func:`numpy.histogram` with the *new* = True argument.
Unequally spaced bins are supported if *bins* is a sequence.
*range*:
The lower and upper range of the bins. Lower and upper outliers
are ignored. If not provided, *range* is (x.min(), x.max()).
Range has no effect if *bins* is a sequence.
If *bins* is a sequence or *range* is specified, autoscaling is
set off (*autoscale_on* is set to *False*) and the xaxis limits
are set to encompass the full specified bin range.
*normed*:
If *True*, the first element of the return tuple will
be the counts normalized to form a probability density, i.e.,
``n/(len(x)*dbin)``. In a probability density, the integral of
the histogram should be 1; you can verify that with a
trapezoidal integration of the probability density function::
pdf, bins, patches = ax.hist(...)
print np.sum(pdf * np.diff(bins))
*cumulative*:
If *True*, then a histogram is computed where each bin
gives the counts in that bin plus all bins for smaller values.
The last bin gives the total number of datapoints. If *normed*
is also *True* then the histogram is normalized such that the
last bin equals 1. If *cumulative* evaluates to less than 0
(e.g. -1), the direction of accumulation is reversed. In this
case, if *normed* is also *True*, then the histogram is normalized
such that the first bin equals 1.
*histtype*: [ 'bar' | 'barstacked' | 'step' | 'stepfilled' ]
The type of histogram to draw.
- 'bar' is a traditional bar-type histogram. If multiple data
are given the bars are aranged side by side.
- 'barstacked' is a bar-type histogram where multiple
data are stacked on top of each other.
- 'step' generates a lineplot that is by default
unfilled.
- 'stepfilled' generates a lineplot that is by default
filled.
*align*: ['left' | 'mid' | 'right' ]
Controls how the histogram is plotted.
- 'left': bars are centered on the left bin edges.
- 'mid': bars are centered between the bin edges.
- 'right': bars are centered on the right bin edges.
*orientation*: [ 'horizontal' | 'vertical' ]
If 'horizontal', :func:`~matplotlib.pyplot.barh` will be
used for bar-type histograms and the *bottom* kwarg will be
the left edges.
*rwidth*:
The relative width of the bars as a fraction of the bin
width. If *None*, automatically compute the width. Ignored
if *histtype* = 'step' or 'stepfilled'.
*log*:
If *True*, the histogram axis will be set to a log scale.
If *log* is *True* and *x* is a 1D array, empty bins will
be filtered out and only the non-empty (*n*, *bins*,
*patches*) will be returned.
kwargs are used to update the properties of the hist
:class:`~matplotlib.patches.Rectangle` instances:
%(Rectangle)s
You can use labels for your histogram, and only the first
:class:`~matplotlib.patches.Rectangle` gets the label (the
others get the magic string '_nolegend_'. This will make the
histograms work in the intuitive way for bar charts::
ax.hist(10+2*np.random.randn(1000), label='men')
ax.hist(12+3*np.random.randn(1000), label='women', alpha=0.5)
ax.legend()
**Example:**
.. plot:: mpl_examples/pylab_examples/histogram_demo.py
"""
if not self._hold: self.cla()
# NOTE: the range keyword overwrites the built-in func range !!!
# needs to be fixed in with numpy !!!
if kwargs.get('width') is not None:
raise DeprecationWarning(
'hist now uses the rwidth to give relative width '
'and not absolute width')
try:
# make sure a copy is created: don't use asarray
x = np.transpose(np.array(x))
if len(x.shape)==1:
x.shape = (1,x.shape[0])
elif len(x.shape)==2 and x.shape[1]<x.shape[0]:
warnings.warn('2D hist should be nsamples x nvariables; '
'this looks transposed')
except ValueError:
# multiple hist with data of different length
if iterable(x[0]) and not is_string_like(x[0]):
tx = []
for i in xrange(len(x)):
tx.append( np.array(x[i]) )
x = tx
else:
raise ValueError, 'Can not use providet data to create a histogram'
# Check whether bins or range are given explicitly. In that
# case do not autoscale axes.
binsgiven = (cbook.iterable(bins) or range != None)
# check the version of the numpy
if np.__version__ < "1.3": # version 1.1 and 1.2
hist_kwargs = dict(range=range,
normed=bool(normed), new=True)
else: # version 1.3 and later, drop new=True
hist_kwargs = dict(range=range,
normed=bool(normed))
n = []
for i in xrange(len(x)):
# this will automatically overwrite bins,
# so that each histogram uses the same bins
m, bins = np.histogram(x[i], bins, **hist_kwargs)
n.append(m)
if cumulative:
slc = slice(None)
if cbook.is_numlike(cumulative) and cumulative < 0:
slc = slice(None,None,-1)
if normed:
n = [(m * np.diff(bins))[slc].cumsum()[slc] for m in n]
else:
n = [m[slc].cumsum()[slc] for m in n]
patches = []
if histtype.startswith('bar'):
totwidth = np.diff(bins)
stacked = False
if rwidth is not None: dr = min(1., max(0., rwidth))
elif len(n)>1: dr = 0.8
else: dr = 1.0
if histtype=='bar':
width = dr*totwidth/len(n)
dw = width
if len(n)>1:
boffset = -0.5*dr*totwidth*(1.-1./len(n))
else:
boffset = 0.0
elif histtype=='barstacked':
width = dr*totwidth
boffset, dw = 0.0, 0.0
stacked = True
else:
raise ValueError, 'invalid histtype: %s' % histtype
if align == 'mid' or align == 'edge':
boffset += 0.5*totwidth
elif align == 'right':
boffset += totwidth
elif align != 'left' and align != 'center':
raise ValueError, 'invalid align: %s' % align
if orientation == 'horizontal':
for m in n:
color = self._get_lines._get_next_cycle_color()
patch = self.barh(bins[:-1]+boffset, m, height=width,
left=bottom, align='center', log=log,
color=color)
patches.append(patch)
if stacked:
if bottom is None: bottom = 0.0
bottom += m
boffset += dw
elif orientation == 'vertical':
for m in n:
color = self._get_lines._get_next_cycle_color()
patch = self.bar(bins[:-1]+boffset, m, width=width,
bottom=bottom, align='center', log=log,
color=color)
patches.append(patch)
if stacked:
if bottom is None: bottom = 0.0
bottom += m
boffset += dw
else:
raise ValueError, 'invalid orientation: %s' % orientation
elif histtype.startswith('step'):
x = np.zeros( 2*len(bins), np.float )
y = np.zeros( 2*len(bins), np.float )
x[0::2], x[1::2] = bins, bins
if align == 'left' or align == 'center':
x -= 0.5*(bins[1]-bins[0])
elif align == 'right':
x += 0.5*(bins[1]-bins[0])
elif align != 'mid' and align != 'edge':
raise ValueError, 'invalid align: %s' % align
if log:
y[0],y[-1] = 1e-100, 1e-100
if orientation == 'horizontal':
self.set_xscale('log')
elif orientation == 'vertical':
self.set_yscale('log')
fill = False
if histtype == 'stepfilled':
fill = True
elif histtype != 'step':
raise ValueError, 'invalid histtype: %s' % histtype
for m in n:
y[1:-1:2], y[2::2] = m, m
if orientation == 'horizontal':
x,y = y,x
elif orientation != 'vertical':
raise ValueError, 'invalid orientation: %s' % orientation
color = self._get_lines._get_next_cycle_color()
if fill:
patches.append( self.fill(x, y,
closed=False, facecolor=color) )
else:
patches.append( self.fill(x, y,
closed=False, edgecolor=color, fill=False) )
# adopted from adjust_x/ylim part of the bar method
if orientation == 'horizontal':
xmin, xmax = 0, self.dataLim.intervalx[1]
for m in n:
xmin = np.amin(m[m!=0]) # filter out the 0 height bins
xmin = max(xmin*0.9, 1e-100)
self.dataLim.intervalx = (xmin, xmax)
elif orientation == 'vertical':
ymin, ymax = 0, self.dataLim.intervaly[1]
for m in n:
ymin = np.amin(m[m!=0]) # filter out the 0 height bins
ymin = max(ymin*0.9, 1e-100)
self.dataLim.intervaly = (ymin, ymax)
self.autoscale_view()
else:
raise ValueError, 'invalid histtype: %s' % histtype
label = kwargs.pop('label', '')
for patch in patches:
for p in patch:
p.update(kwargs)
p.set_label(label)
label = '_nolegend_'
if binsgiven:
self.set_autoscale_on(False)
if orientation == 'vertical':
self.autoscale_view(scalex=False, scaley=True)
XL = self.xaxis.get_major_locator().view_limits(bins[0], bins[-1])
self.set_xbound(XL)
else:
self.autoscale_view(scalex=True, scaley=False)
YL = self.yaxis.get_major_locator().view_limits(bins[0], bins[-1])
self.set_ybound(YL)
if len(n)==1:
return n[0], bins, cbook.silent_list('Patch', patches[0])
else:
return n, bins, cbook.silent_list('Lists of Patches', patches)
hist.__doc__ = cbook.dedent(hist.__doc__) % martist.kwdocd
def psd(self, x, NFFT=256, Fs=2, Fc=0, detrend=mlab.detrend_none,
window=mlab.window_hanning, noverlap=0, pad_to=None,
sides='default', scale_by_freq=None, **kwargs):
"""
call signature::
psd(x, NFFT=256, Fs=2, Fc=0, detrend=mlab.detrend_none,
window=mlab.window_hanning, noverlap=0, pad_to=None,
sides='default', scale_by_freq=None, **kwargs)
The power spectral density by Welch's average periodogram
method. The vector *x* is divided into *NFFT* length
segments. Each segment is detrended by function *detrend* and
windowed by function *window*. *noverlap* gives the length of
the overlap between segments. The :math:`|\mathrm{fft}(i)|^2`
of each segment :math:`i` are averaged to compute *Pxx*, with a
scaling to correct for power loss due to windowing. *Fs* is the
sampling frequency.
%(PSD)s
*Fc*: integer
The center frequency of *x* (defaults to 0), which offsets
the x extents of the plot to reflect the frequency range used
when a signal is acquired and then filtered and downsampled to
baseband.
Returns the tuple (*Pxx*, *freqs*).
For plotting, the power is plotted as
:math:`10\log_{10}(P_{xx})` for decibels, though *Pxx* itself
is returned.
References:
Bendat & Piersol -- Random Data: Analysis and Measurement
Procedures, John Wiley & Sons (1986)
kwargs control the :class:`~matplotlib.lines.Line2D` properties:
%(Line2D)s
**Example:**
.. plot:: mpl_examples/pylab_examples/psd_demo.py
"""
if not self._hold: self.cla()
pxx, freqs = mlab.psd(x, NFFT, Fs, detrend, window, noverlap, pad_to,
sides, scale_by_freq)
pxx.shape = len(freqs),
freqs += Fc
if scale_by_freq in (None, True):
psd_units = 'dB/Hz'
else:
psd_units = 'dB'
self.plot(freqs, 10*np.log10(pxx), **kwargs)
self.set_xlabel('Frequency')
self.set_ylabel('Power Spectral Density (%s)' % psd_units)
self.grid(True)
vmin, vmax = self.viewLim.intervaly
intv = vmax-vmin
logi = int(np.log10(intv))
if logi==0: logi=.1
step = 10*logi
#print vmin, vmax, step, intv, math.floor(vmin), math.ceil(vmax)+1
ticks = np.arange(math.floor(vmin), math.ceil(vmax)+1, step)
self.set_yticks(ticks)
return pxx, freqs
psd_doc_dict = dict()
psd_doc_dict.update(martist.kwdocd)
psd_doc_dict.update(mlab.kwdocd)
psd_doc_dict['PSD'] = cbook.dedent(psd_doc_dict['PSD'])
psd.__doc__ = cbook.dedent(psd.__doc__) % psd_doc_dict
def csd(self, x, y, NFFT=256, Fs=2, Fc=0, detrend=mlab.detrend_none,
window=mlab.window_hanning, noverlap=0, pad_to=None,
sides='default', scale_by_freq=None, **kwargs):
"""
call signature::
csd(x, y, NFFT=256, Fs=2, Fc=0, detrend=mlab.detrend_none,
window=mlab.window_hanning, noverlap=0, pad_to=None,
sides='default', scale_by_freq=None, **kwargs)
The cross spectral density :math:`P_{xy}` by Welch's average
periodogram method. The vectors *x* and *y* are divided into
*NFFT* length segments. Each segment is detrended by function
*detrend* and windowed by function *window*. The product of
the direct FFTs of *x* and *y* are averaged over each segment
to compute :math:`P_{xy}`, with a scaling to correct for power
loss due to windowing.
Returns the tuple (*Pxy*, *freqs*). *P* is the cross spectrum
(complex valued), and :math:`10\log_{10}|P_{xy}|` is
plotted.
%(PSD)s
*Fc*: integer
The center frequency of *x* (defaults to 0), which offsets
the x extents of the plot to reflect the frequency range used
when a signal is acquired and then filtered and downsampled to
baseband.
References:
Bendat & Piersol -- Random Data: Analysis and Measurement
Procedures, John Wiley & Sons (1986)
kwargs control the Line2D properties:
%(Line2D)s
**Example:**
.. plot:: mpl_examples/pylab_examples/csd_demo.py
.. seealso:
:meth:`psd`
For a description of the optional parameters.
"""
if not self._hold: self.cla()
pxy, freqs = mlab.csd(x, y, NFFT, Fs, detrend, window, noverlap,
pad_to, sides, scale_by_freq)
pxy.shape = len(freqs),
# pxy is complex
freqs += Fc
self.plot(freqs, 10*np.log10(np.absolute(pxy)), **kwargs)
self.set_xlabel('Frequency')
self.set_ylabel('Cross Spectrum Magnitude (dB)')
self.grid(True)
vmin, vmax = self.viewLim.intervaly
intv = vmax-vmin
step = 10*int(np.log10(intv))
ticks = np.arange(math.floor(vmin), math.ceil(vmax)+1, step)
self.set_yticks(ticks)
return pxy, freqs
csd.__doc__ = cbook.dedent(csd.__doc__) % psd_doc_dict
def cohere(self, x, y, NFFT=256, Fs=2, Fc=0, detrend=mlab.detrend_none,
window=mlab.window_hanning, noverlap=0, pad_to=None,
sides='default', scale_by_freq=None, **kwargs):
"""
call signature::
cohere(x, y, NFFT=256, Fs=2, Fc=0, detrend = mlab.detrend_none,
window = mlab.window_hanning, noverlap=0, pad_to=None,
sides='default', scale_by_freq=None, **kwargs)
cohere the coherence between *x* and *y*. Coherence is the normalized
cross spectral density:
.. math::
C_{xy} = \\frac{|P_{xy}|^2}{P_{xx}P_{yy}}
%(PSD)s
*Fc*: integer
The center frequency of *x* (defaults to 0), which offsets
the x extents of the plot to reflect the frequency range used
when a signal is acquired and then filtered and downsampled to
baseband.
The return value is a tuple (*Cxy*, *f*), where *f* are the
frequencies of the coherence vector.
kwargs are applied to the lines.
References:
* Bendat & Piersol -- Random Data: Analysis and Measurement
Procedures, John Wiley & Sons (1986)
kwargs control the :class:`~matplotlib.lines.Line2D`
properties of the coherence plot:
%(Line2D)s
**Example:**
.. plot:: mpl_examples/pylab_examples/cohere_demo.py
"""
if not self._hold: self.cla()
cxy, freqs = mlab.cohere(x, y, NFFT, Fs, detrend, window, noverlap,
scale_by_freq)
freqs += Fc
self.plot(freqs, cxy, **kwargs)
self.set_xlabel('Frequency')
self.set_ylabel('Coherence')
self.grid(True)
return cxy, freqs
cohere.__doc__ = cbook.dedent(cohere.__doc__) % psd_doc_dict
def specgram(self, x, NFFT=256, Fs=2, Fc=0, detrend=mlab.detrend_none,
window=mlab.window_hanning, noverlap=128,
cmap=None, xextent=None, pad_to=None, sides='default',
scale_by_freq=None):
"""
call signature::
specgram(x, NFFT=256, Fs=2, Fc=0, detrend=mlab.detrend_none,
window=mlab.window_hanning, noverlap=128,
cmap=None, xextent=None, pad_to=None, sides='default',
scale_by_freq=None)
Compute a spectrogram of data in *x*. Data are split into
*NFFT* length segments and the PSD of each section is
computed. The windowing function *window* is applied to each
segment, and the amount of overlap of each segment is
specified with *noverlap*.
%(PSD)s
*Fc*: integer
The center frequency of *x* (defaults to 0), which offsets
the y extents of the plot to reflect the frequency range used
when a signal is acquired and then filtered and downsampled to
baseband.
*cmap*:
A :class:`matplotlib.cm.Colormap` instance; if *None* use
default determined by rc
*xextent*:
The image extent along the x-axis. xextent = (xmin,xmax)
The default is (0,max(bins)), where bins is the return
value from :func:`mlab.specgram`
Return value is (*Pxx*, *freqs*, *bins*, *im*):
- *bins* are the time points the spectrogram is calculated over
- *freqs* is an array of frequencies
- *Pxx* is a len(times) x len(freqs) array of power
- *im* is a :class:`matplotlib.image.AxesImage` instance
Note: If *x* is real (i.e. non-complex), only the positive
spectrum is shown. If *x* is complex, both positive and
negative parts of the spectrum are shown. This can be
overridden using the *sides* keyword argument.
**Example:**
.. plot:: mpl_examples/pylab_examples/specgram_demo.py
"""
if not self._hold: self.cla()
Pxx, freqs, bins = mlab.specgram(x, NFFT, Fs, detrend,
window, noverlap, pad_to, sides, scale_by_freq)
Z = 10. * np.log10(Pxx)
Z = np.flipud(Z)
if xextent is None: xextent = 0, np.amax(bins)
xmin, xmax = xextent
freqs += Fc
extent = xmin, xmax, freqs[0], freqs[-1]
im = self.imshow(Z, cmap, extent=extent)
self.axis('auto')
return Pxx, freqs, bins, im
specgram.__doc__ = cbook.dedent(specgram.__doc__) % psd_doc_dict
del psd_doc_dict #So that this does not become an Axes attribute
def spy(self, Z, precision=0, marker=None, markersize=None,
aspect='equal', **kwargs):
"""
call signature::
spy(Z, precision=0, marker=None, markersize=None,
aspect='equal', **kwargs)
``spy(Z)`` plots the sparsity pattern of the 2-D array *Z*.
If *precision* is 0, any non-zero value will be plotted;
else, values of :math:`|Z| > precision` will be plotted.
For :class:`scipy.sparse.spmatrix` instances, there is a
special case: if *precision* is 'present', any value present in
the array will be plotted, even if it is identically zero.
The array will be plotted as it would be printed, with
the first index (row) increasing down and the second
index (column) increasing to the right.
By default aspect is 'equal', so that each array element
occupies a square space; set the aspect kwarg to 'auto'
to allow the plot to fill the plot box, or to any scalar
number to specify the aspect ratio of an array element
directly.
Two plotting styles are available: image or marker. Both
are available for full arrays, but only the marker style
works for :class:`scipy.sparse.spmatrix` instances.
If *marker* and *markersize* are *None*, an image will be
returned and any remaining kwargs are passed to
:func:`~matplotlib.pyplot.imshow`; else, a
:class:`~matplotlib.lines.Line2D` object will be returned with
the value of marker determining the marker type, and any
remaining kwargs passed to the
:meth:`~matplotlib.axes.Axes.plot` method.
If *marker* and *markersize* are *None*, useful kwargs include:
* *cmap*
* *alpha*
.. seealso::
:func:`~matplotlib.pyplot.imshow`
For controlling colors, e.g. cyan background and red marks,
use::
cmap = mcolors.ListedColormap(['c','r'])
If *marker* or *markersize* is not *None*, useful kwargs include:
* *marker*
* *markersize*
* *color*
Useful values for *marker* include:
* 's' square (default)
* 'o' circle
* '.' point
* ',' pixel
.. seealso::
:func:`~matplotlib.pyplot.plot`
"""
if precision is None:
precision = 0
warnings.DeprecationWarning("Use precision=0 instead of None")
# 2008/10/03
if marker is None and markersize is None and hasattr(Z, 'tocoo'):
marker = 's'
if marker is None and markersize is None:
Z = np.asarray(Z)
mask = np.absolute(Z)>precision
if 'cmap' not in kwargs:
kwargs['cmap'] = mcolors.ListedColormap(['w', 'k'],
name='binary')
nr, nc = Z.shape
extent = [-0.5, nc-0.5, nr-0.5, -0.5]
ret = self.imshow(mask, interpolation='nearest', aspect=aspect,
extent=extent, origin='upper', **kwargs)
else:
if hasattr(Z, 'tocoo'):
c = Z.tocoo()
if precision == 'present':
y = c.row
x = c.col
else:
nonzero = np.absolute(c.data) > precision
y = c.row[nonzero]
x = c.col[nonzero]
else:
Z = np.asarray(Z)
nonzero = np.absolute(Z)>precision
y, x = np.nonzero(nonzero)
if marker is None: marker = 's'
if markersize is None: markersize = 10
marks = mlines.Line2D(x, y, linestyle='None',
marker=marker, markersize=markersize, **kwargs)
self.add_line(marks)
nr, nc = Z.shape
self.set_xlim(xmin=-0.5, xmax=nc-0.5)
self.set_ylim(ymin=nr-0.5, ymax=-0.5)
self.set_aspect(aspect)
ret = marks
self.title.set_y(1.05)
self.xaxis.tick_top()
self.xaxis.set_ticks_position('both')
self.xaxis.set_major_locator(mticker.MaxNLocator(nbins=9,
steps=[1, 2, 5, 10],
integer=True))
self.yaxis.set_major_locator(mticker.MaxNLocator(nbins=9,
steps=[1, 2, 5, 10],
integer=True))
return ret
def matshow(self, Z, **kwargs):
'''
Plot a matrix or array as an image.
The matrix will be shown the way it would be printed,
with the first row at the top. Row and column numbering
is zero-based.
Argument:
*Z* anything that can be interpreted as a 2-D array
kwargs all are passed to :meth:`~matplotlib.axes.Axes.imshow`.
:meth:`matshow` sets defaults for *extent*, *origin*,
*interpolation*, and *aspect*; use care in overriding the
*extent* and *origin* kwargs, because they interact. (Also,
if you want to change them, you probably should be using
imshow directly in your own version of matshow.)
Returns: an :class:`matplotlib.image.AxesImage` instance.
'''
Z = np.asarray(Z)
nr, nc = Z.shape
extent = [-0.5, nc-0.5, nr-0.5, -0.5]
kw = {'extent': extent,
'origin': 'upper',
'interpolation': 'nearest',
'aspect': 'equal'} # (already the imshow default)
kw.update(kwargs)
im = self.imshow(Z, **kw)
self.title.set_y(1.05)
self.xaxis.tick_top()
self.xaxis.set_ticks_position('both')
self.xaxis.set_major_locator(mticker.MaxNLocator(nbins=9,
steps=[1, 2, 5, 10],
integer=True))
self.yaxis.set_major_locator(mticker.MaxNLocator(nbins=9,
steps=[1, 2, 5, 10],
integer=True))
return im
class SubplotBase:
"""
Base class for subplots, which are :class:`Axes` instances with
additional methods to facilitate generating and manipulating a set
of :class:`Axes` within a figure.
"""
def __init__(self, fig, *args, **kwargs):
"""
*fig* is a :class:`matplotlib.figure.Figure` instance.
*args* is the tuple (*numRows*, *numCols*, *plotNum*), where
the array of subplots in the figure has dimensions *numRows*,
*numCols*, and where *plotNum* is the number of the subplot
being created. *plotNum* starts at 1 in the upper left
corner and increases to the right.
If *numRows* <= *numCols* <= *plotNum* < 10, *args* can be the
decimal integer *numRows* * 100 + *numCols* * 10 + *plotNum*.
"""
self.figure = fig
if len(args)==1:
s = str(args[0])
if len(s) != 3:
raise ValueError('Argument to subplot must be a 3 digits long')
rows, cols, num = map(int, s)
elif len(args)==3:
rows, cols, num = args
else:
raise ValueError( 'Illegal argument to subplot')
total = rows*cols
num -= 1 # convert from matlab to python indexing
# ie num in range(0,total)
if num >= total:
raise ValueError( 'Subplot number exceeds total subplots')
self._rows = rows
self._cols = cols
self._num = num
self.update_params()
# _axes_class is set in the subplot_class_factory
self._axes_class.__init__(self, fig, self.figbox, **kwargs)
def get_geometry(self):
'get the subplot geometry, eg 2,2,3'
return self._rows, self._cols, self._num+1
# COVERAGE NOTE: Never used internally or from examples
def change_geometry(self, numrows, numcols, num):
'change subplot geometry, eg. from 1,1,1 to 2,2,3'
self._rows = numrows
self._cols = numcols
self._num = num-1
self.update_params()
self.set_position(self.figbox)
def update_params(self):
'update the subplot position from fig.subplotpars'
rows = self._rows
cols = self._cols
num = self._num
pars = self.figure.subplotpars
left = pars.left
right = pars.right
bottom = pars.bottom
top = pars.top
wspace = pars.wspace
hspace = pars.hspace
totWidth = right-left
totHeight = top-bottom
figH = totHeight/(rows + hspace*(rows-1))
sepH = hspace*figH
figW = totWidth/(cols + wspace*(cols-1))
sepW = wspace*figW
rowNum, colNum = divmod(num, cols)
figBottom = top - (rowNum+1)*figH - rowNum*sepH
figLeft = left + colNum*(figW + sepW)
self.figbox = mtransforms.Bbox.from_bounds(figLeft, figBottom,
figW, figH)
self.rowNum = rowNum
self.colNum = colNum
self.numRows = rows
self.numCols = cols
if 0:
print 'rcn', rows, cols, num
print 'lbrt', left, bottom, right, top
print 'self.figBottom', self.figBottom
print 'self.figLeft', self.figLeft
print 'self.figW', self.figW
print 'self.figH', self.figH
print 'self.rowNum', self.rowNum
print 'self.colNum', self.colNum
print 'self.numRows', self.numRows
print 'self.numCols', self.numCols
def is_first_col(self):
return self.colNum==0
def is_first_row(self):
return self.rowNum==0
def is_last_row(self):
return self.rowNum==self.numRows-1
def is_last_col(self):
return self.colNum==self.numCols-1
# COVERAGE NOTE: Never used internally or from examples
def label_outer(self):
"""
set the visible property on ticklabels so xticklabels are
visible only if the subplot is in the last row and yticklabels
are visible only if the subplot is in the first column
"""
lastrow = self.is_last_row()
firstcol = self.is_first_col()
for label in self.get_xticklabels():
label.set_visible(lastrow)
for label in self.get_yticklabels():
label.set_visible(firstcol)
_subplot_classes = {}
def subplot_class_factory(axes_class=None):
# This makes a new class that inherits from SubclassBase and the
# given axes_class (which is assumed to be a subclass of Axes).
# This is perhaps a little bit roundabout to make a new class on
# the fly like this, but it means that a new Subplot class does
# not have to be created for every type of Axes.
if axes_class is None:
axes_class = Axes
new_class = _subplot_classes.get(axes_class)
if new_class is None:
new_class = new.classobj("%sSubplot" % (axes_class.__name__),
(SubplotBase, axes_class),
{'_axes_class': axes_class})
_subplot_classes[axes_class] = new_class
return new_class
# This is provided for backward compatibility
Subplot = subplot_class_factory()
martist.kwdocd['Axes'] = martist.kwdocd['Subplot'] = martist.kwdoc(Axes)
"""
# this is some discarded code I was using to find the minimum positive
# data point for some log scaling fixes. I realized there was a
# cleaner way to do it, but am keeping this around as an example for
# how to get the data out of the axes. Might want to make something
# like this a method one day, or better yet make get_verts an Artist
# method
minx, maxx = self.get_xlim()
if minx<=0 or maxx<=0:
# find the min pos value in the data
xs = []
for line in self.lines:
xs.extend(line.get_xdata(orig=False))
for patch in self.patches:
xs.extend([x for x,y in patch.get_verts()])
for collection in self.collections:
xs.extend([x for x,y in collection.get_verts()])
posx = [x for x in xs if x>0]
if len(posx):
minx = min(posx)
maxx = max(posx)
# warning, probably breaks inverted axis
self.set_xlim((0.1*minx, maxx))
"""
| agpl-3.0 |
QuLogic/cartopy | examples/miscellanea/axes_grid_basic.py | 6 | 2559 | """
Using Cartopy and AxesGrid toolkit
----------------------------------
This example demonstrates how to use cartopy `GeoAxes` with
`AxesGrid` from the `mpl_toolkits.axes_grid1`.
The script constructs an `axes_class` kwarg with Plate Carree projection
and passes it to the `AxesGrid` instance. The `AxesGrid` built-in
labelling is switched off, and instead a standard procedure
of creating grid lines is used. Then some fake data is plotted.
"""
import cartopy.crs as ccrs
from cartopy.mpl.geoaxes import GeoAxes
from cartopy.mpl.ticker import LongitudeFormatter, LatitudeFormatter
import matplotlib.pyplot as plt
from mpl_toolkits.axes_grid1 import AxesGrid
import numpy as np
def sample_data_3d(shape):
"""Return `lons`, `lats`, `times` and fake `data`"""
ntimes, nlats, nlons = shape
lats = np.linspace(-np.pi / 2, np.pi / 2, nlats)
lons = np.linspace(0, 2 * np.pi, nlons)
lons, lats = np.meshgrid(lons, lats)
wave = 0.75 * (np.sin(2 * lats) ** 8) * np.cos(4 * lons)
mean = 0.5 * np.cos(2 * lats) * ((np.sin(2 * lats)) ** 2 + 2)
lats = np.rad2deg(lats)
lons = np.rad2deg(lons)
data = wave + mean
times = np.linspace(-1, 1, ntimes)
new_shape = data.shape + (ntimes, )
data = np.rollaxis(data.repeat(ntimes).reshape(new_shape), -1)
data *= times[:, np.newaxis, np.newaxis]
return lons, lats, times, data
def main():
projection = ccrs.PlateCarree()
axes_class = (GeoAxes,
dict(map_projection=projection))
lons, lats, times, data = sample_data_3d((6, 73, 145))
fig = plt.figure()
axgr = AxesGrid(fig, 111, axes_class=axes_class,
nrows_ncols=(3, 2),
axes_pad=0.6,
cbar_location='right',
cbar_mode='single',
cbar_pad=0.2,
cbar_size='3%',
label_mode='') # note the empty label_mode
for i, ax in enumerate(axgr):
ax.coastlines()
ax.set_xticks(np.linspace(-180, 180, 5), crs=projection)
ax.set_yticks(np.linspace(-90, 90, 5), crs=projection)
lon_formatter = LongitudeFormatter(zero_direction_label=True)
lat_formatter = LatitudeFormatter()
ax.xaxis.set_major_formatter(lon_formatter)
ax.yaxis.set_major_formatter(lat_formatter)
p = ax.contourf(lons, lats, data[i, ...],
transform=projection,
cmap='RdBu')
axgr.cbar_axes[0].colorbar(p)
plt.show()
if __name__ == '__main__':
main()
| lgpl-3.0 |
rupak0577/ginga | ginga/mplw/ipg.py | 5 | 1749 |
import matplotlib.pyplot as plt
import matplotlib.patches as patches
import numpy as np
from ginga.mplw.ImageViewCanvasMpl import ImageViewCanvas
from ginga.misc import log
from ginga.AstroImage import AstroImage
from ginga import cmap
# add matplotlib colormaps to ginga's own set
cmap.add_matplotlib_cmaps()
## from IPython.display import Image
## from io import BytesIO
class CustomMplViewer(ImageViewCanvas):
def get_nb_image(self):
return Image(data=bytes(self.get_rgb_image_as_bytes(format='png')),
format='png', embed=True)
def load(self, filepath):
image = AstroImage(logger=self.logger)
image.load_file(filepath)
self.set_image(image)
def show(self):
self.figure.show()
def add_canvas(self, tag=None):
# add a canvas to the view
DrawingCanvas = self.getDrawClass('drawingcanvas')
canvas = DrawingCanvas()
# enable drawing on the canvas
canvas.enable_draw(True)
canvas.enable_edit(True)
canvas.ui_setActive(True)
canvas.setSurface(self)
# add the canvas to the view.
self.add(canvas, tag=tag)
return canvas
def get_viewer():
# Set to True to get diagnostic logging output
use_logger = False
logger = log.get_logger(null=not use_logger, log_stderr=True)
# create a regular matplotlib figure
fig = plt.figure()
# create a ginga object, initialize some defaults and
# tell it about the figure
viewer = CustomMplViewer(logger)
viewer.enable_autocuts('on')
viewer.set_autocut_params('zscale')
viewer.set_figure(fig)
# enable all interactive ginga features
viewer.get_bindings().enable_all(True)
return viewer
| bsd-3-clause |
hmendozap/auto-sklearn | autosklearn/pipeline/components/classification/gaussian_nb.py | 1 | 3040 | import numpy as np
from HPOlibConfigSpace.configuration_space import ConfigurationSpace
from autosklearn.pipeline.components.base import AutoSklearnClassificationAlgorithm
from autosklearn.pipeline.constants import *
class GaussianNB(AutoSklearnClassificationAlgorithm):
def __init__(self, random_state=None, verbose=0):
self.random_state = random_state
self.verbose = int(verbose)
self.estimator = None
def fit(self, X, y):
while not self.configuration_fully_fitted():
self.iterative_fit(X, y, n_iter=1)
return self
def iterative_fit(self, X, y, n_iter=1, refit=False):
import sklearn.naive_bayes
if refit:
self.estimator = None
if self.estimator is None:
self.n_iter = 0
self.fully_fit_ = False
self.estimator = sklearn.naive_bayes.GaussianNB()
self.classes_ = np.unique(y.astype(int))
# Fallback for multilabel classification
if len(y.shape) > 1 and y.shape[1] > 1:
import sklearn.multiclass
self.estimator.n_iter = self.n_iter
self.estimator = sklearn.multiclass.OneVsRestClassifier(
self.estimator, n_jobs=1)
self.estimator.fit(X, y)
self.fully_fit_ = True
else:
for iter in range(n_iter):
start = min(self.n_iter * 1000, y.shape[0])
stop = min((self.n_iter + 1) * 1000, y.shape[0])
if X[start:stop].shape[0] == 0:
self.fully_fit_ = True
break
self.estimator.partial_fit(X[start:stop], y[start:stop],
self.classes_)
self.n_iter += 1
if stop >= len(y):
self.fully_fit_ = True
break
return self
def configuration_fully_fitted(self):
if self.estimator is None:
return False
elif not hasattr(self, 'fully_fit_'):
return False
else:
return self.fully_fit_
def predict(self, X):
if self.estimator is None:
raise NotImplementedError
return self.estimator.predict(X)
def predict_proba(self, X):
if self.estimator is None:
raise NotImplementedError()
return self.estimator.predict_proba(X)
@staticmethod
def get_properties(dataset_properties=None):
return {'shortname': 'GaussianNB',
'name': 'Gaussian Naive Bayes classifier',
'handles_regression': False,
'handles_classification': True,
'handles_multiclass': True,
'handles_multilabel': True,
'is_deterministic': True,
'input': (DENSE, UNSIGNED_DATA),
'output': (PREDICTIONS,)}
@staticmethod
def get_hyperparameter_search_space(dataset_properties=None):
cs = ConfigurationSpace()
return cs
| bsd-3-clause |
WindfallLabs/ArcHacks | _core.py | 1 | 32818 | # -*- coding: utf-8 -*-
"""
Misc ArcPy Addons
Author: Garin Wally
License: MIT
"""
import os
import glob
import random
import re
import sys
#from time import sleep
from queue import Queue
from subprocess import Popen, PIPE
from collections import OrderedDict
from xml.dom import minidom as DOM
from ConfigParser import RawConfigParser
import pandas as pd
import numpy as np
import ogr
import arcpy
#from archacks import DIR
DIR = os.path.abspath(os.path.dirname(__file__))
NEW_GROUP_LAYER = os.path.join(DIR, "NewGroupLayer.lyr")
type_map = {
"int": ["Double", "Integer", "ShortInteger"],
"long": ["Float"],
"str": ["Text", "String"]}
# TODO: not the best...
def is_active(exe="arcmap"):
regex = "(?i){}.exe".format(exe)
if re.findall(regex, sys.executable.replace("\\", "/")):
return True
return False
# MapDocument() cannot be called from within classes and must be global
if is_active():
MXD = arcpy.mapping.MapDocument("CURRENT")
else:
MXD = None
class GDB(object):
def __init__(self, gdb_path, srid=0, datasets=[], default_queue_ds=""):
"""Geodatabase Object.
Args:
gdb_path (str): path to new/existing Geodatabase
srid (int): Spatial Reference ID to use for datasets only
datasets (list): dataset names to create using SRID
default_queue_ds (str): dataset name to use as default for queue
"""
self.path = gdb_path
if not self.path.endswith(".gdb"):
raise AttributeError("Not a Geodatabase")
self.parent_folder = os.path.dirname(self.path)
self.name = os.path.basename(self.path)
self.srid = srid
self.sr = arcpy.SpatialReference(self.srid)
self.datasets = datasets
self.default_queue_ds = default_queue_ds
self.data_queue = Queue()
def build(self):
"""Builds gdb, creates datasets, adds queued data."""
if not os.path.exists(self.path):
arcpy.CreateFileGDB_management(self.parent_folder, self.name)
arcpy.RefreshCatalog(self.path)
arcpy.env.workspace = self.path
for ds in self.datasets:
arcpy.CreateFeatureDataset_management(self.path, ds, self.srid)
arcpy.RefreshCatalog(os.path.join(self.path, ds))
if self.data_queue:
self.load_queued_data()
return
def add(self, in_data_path, data_name="", dataset=""):
"""Adds input featureclass to geodatabase.
Args:
in_data_path (str): path to input data
data_name (str): optionally rename entered data
dataset (str): dataset to send imported data
"""
if not data_name:
data_name = os.path.basename(in_data_path)
if "sde" in data_name.lower():
data_name = data_name.split(".")[-1]
elif "." in data_name:
data_name = data_name.split(".")[0]
out = os.path.join(self.path, dataset).strip("\\").strip("/")
arcpy.FeatureClassToFeatureClass_conversion(
in_data_path, out, data_name)
# Easily access data paths by fc name
setattr(self, data_name.lower(),
os.path.join(self.path, dataset, data_name))
return
def add_many(self, data_mapping={}, data_list=[], dataset=""):
"""Adds a list or dict of input feature classes.
Args:
data_mapping (dict): dictionary of {data_name: data_path}
data_list (list): list of data paths to import
dataset (str): destination dataset for imported data
"""
if data_mapping:
for k, v in data_mapping.items():
self.add(v, k)
if data_list:
for fc_path in data_list:
self.add(fc_path, dataset=dataset)
return
def load_queued_data(self):
"""Alias of 'add_many' for importing all data in the data_queue."""
# Remove path from queue
while self.data_queue.qsize() > 0:
self.add(self.data_queue.get(), "", dataset=self.default_queue_ds)
return
# Debilitatingly slow
'''
def add_table(self, table_path, table_name="", where=""):
if not table_name:
table_name = os.path.basename(table_path)
if "sde" in table_name.lower():
table_name = table_name.split(".")[-1]
elif "." in table_name:
table_name = table_name.split(".")[0]
arcpy.TableToGeodatabase_conversion(table_path, self.path)#, table_name)
return
'''
def df2tbl(df, out_path):
# Convert dataframe to array
a = np.array(np.rec.fromrecords(df.values))
# Add field names to array
a.dtype.names = tuple(df.columns.tolist())
# Sort of surprised ESRI thought of this
arcpy.da.NumPyArrayToTable(a, out_path)
# ...and of course we have to call this...
arcpy.RefreshCatalog(out_path)
return
def domains2df(workspace):
"""Converts all domains into a dict of dataframes."""
domain_obj = arcpy.da.ListDomains(workspace)
domdict = {
d.name: pd.DataFrame.from_dict(d.codedValues, orient="index").sort()
for d in domain_obj
}
for key in domdict:
domdict[key].reset_index(inplace=True)
domdict[key].columns = ["Key", "Value"]
return domdict
def domain2tbl(workspace, domain, output):
domdict = domains2df(workspace)
df2tbl(domdict[domain], output)
return
def mxds2pdfs(in_folder, out_folder, verbose=False):
"""Exports all .mxd files in a folder to .pdf files in a folder."""
for mxd_file in glob.glob("{}/*.mxd".format(in_folder)):
mxd_file = os.path.join(in_folder, mxd_file)
mxd = arcpy.mapping.MapDocument(mxd_file)
pdf_name = os.path.basename(mxd_file).replace(".mxd", ".pdf")
out_pdf = os.path.join(
out_folder,
pdf_name)
if verbose:
print(pdf_name)
arcpy.mapping.ExportToPDF(mxd, out_pdf)
return
class DataFramesWrapper(object):
"""Container for dataframes that is index-able by name and index."""
def __init__(self, mxd):
self.mxd = mxd
@property
def _dict(self):
return OrderedDict([(df.name, df) for df
in arcpy.mapping.ListDataFrames(self.mxd)])
@property
def _list(self):
return self._dict.values()
def __getitem__(self, index):
if type(index) is int:
return self._list[index]
return self._dict[index]
def __iter__(self):
"""All dataframe objects."""
return self._dict.itervalues()
def __str__(self):
return str(self._dict)
def __repr__(self):
return str(self._dict)
class Map(object):
def __init__(self):
try:
self.mxd = MXD
except: #
self.mxd = None
@property
def dataframes(self):
return DataFramesWrapper(MXD)
@property
def count_dataframes(self):
return len(self.dataframes._list)
@property
def df_layers(self):
return OrderedDict([(df.name, arcpy.mapping.ListLayers(df)) for df
in self.dataframes])
@property
def layers(self):
all_lyrs = []
for lyr_list in self.df_layers.values():
all_lyrs.extend(lyr_list)
return {lyr.name: lyr for lyr in all_lyrs}
@property
def layer_names(self):
return self.layers.keys()
def as_object(self, layer_name):
"""Returns the input layer name as an object.
Args:
layer_name (str): name of layer
Use:
city = m.as_object("City Limits")
"""
return self.layers[layer_name]
def rename_layer(self, old_name, new_name, dataframe=0):
self.layers[old_name].name = new_name
self.refresh()
return
def refresh(self):
arcpy.RefreshTOC()
arcpy.RefreshActiveView()
return
def add_group_lyr(self, name, dataframe=0):
group_lyr = arcpy.mapping.Layer(NEW_GROUP_LAYER)
arcpy.mapping.AddLayer(self.dataframes[dataframe], group_lyr, "TOP")
self.rename_layer("New Group Layer", name)
self.refresh()
return
def toggle_on(self, layer_name="*"):
"""Toggles the input or all ("*") layer's visibility to on."""
if layer_name != "*":
self.layers[layer_name].visible = True
else:
for lyr in self.layers.values():
lyr.visible = True
self.refresh()
return
def toggle_off(self, layer_name="*"):
"""Toggles the input or all ("*") layer's visibility to off."""
if layer_name != "*":
self.layers[layer_name].visible = False
else:
for lyr in self.layers.values():
lyr.visible = False
self.refresh()
return
def spatial_field_calc(target_features, output, target_field, join_features,
join_field, merge_rule, match_option="INTERSECT",
default_value=None):
"""Adds a new field to target features via a spatial join.
Args:
#
Example:
>>> spatial_field_calc("parcels", "in_memory/sfieldcalc",
"dwellings17", "permits17", "dwellings", "sum")
"""
# Set field mappings from target_features
fieldmappings = arcpy.FieldMappings()
fieldmappings.addTable(target_features)
# Set field mappings from join_features
join_map = arcpy.FieldMappings()
join_map.addTable(join_features)
# Edit the output fieldmap
field_map = join_map.getFieldMap(join_map.findFieldMapIndex(join_field))
jfield = field_map.outputField
# Name output field
jfield.name = target_field
jfield.aliasName = target_field
# Overwrite old field data with new
field_map.outputField = jfield
field_map.mergeRule = merge_rule
# Add the edited join_field fieldmap from join_features to target_features
fieldmappings.addFieldMap(field_map)
# Execute the spatial join
result = arcpy.SpatialJoin_analysis(target_features, join_features, output,
"#", "#", fieldmappings,
match_option=match_option)
# Convert NULL values to default_value
with arcpy.da.UpdateCursor(output, [target_field]) as cur:
for row in cur:
if row[0] is None:
row[0] = default_value
cur.updateRow(row)
return result
class TableOfContents(object):
"""Table of Contents Object."""
def __init__(self, mxd="CURRENT"):
self.mxd_name = mxd
self.mxd = None
if self.mxd_name:
self.set_mxd(self.mxd_name)
def set_mxd(self, mxd):
self.mxd_name = mxd
self.mxd = arcpy.mapping.MapDocument(self.mxd_name)
def as_featurelyr(self, layer_name):
"""Gets a layer as a feature layer (e.g. make selections on it)."""
flyr_name = layer_name + "_fclyr"
arcpy.MakeFeatureLayer_management(self[layer_name], flyr_name)
return flyr_name
@property
def dataframes(self):
return arcpy.mapping.ListDataFrames(self.mxd)
@property
def contents(self):
cont = {lyr.name: lyr for lyr in arcpy.mapping.ListLayers(self.mxd)}
cont.update({tbl.name: tbl for tbl in
arcpy.mapping.ListTableViews(self.mxd)})
return cont
@property
def features_selected(self): # TODO: assert actually selected not total
sel = {}
for lyr in self.contents.values():
d = {lyr.name: int(arcpy.GetCount_management(lyr).getOutput(0))}
sel.update(d)
return sel
def add_fc(self, fc_path, df_idx=0, loc="TOP"):
"""Wraps the rediculous process of adding data to an mxd"""
new_lyr = arcpy.mapping.Layer(fc_path)
arcpy.mapping.AddLayer(self.dataframes[df_idx], new_lyr, loc)
return
def remove(self, layer_name):
"""Removes layer from TOC by name."""
for df in self.dataframes:
try:
arcpy.mapping.RemoveLayer(df, TOC.contents[layer_name])
except:
pass
return
def __getitem__(self, key):
"""Support dict-style item getting."""
return self.contents[key]
if is_active():
TOC = TableOfContents()
else:
TOC = TableOfContents(None)
# =============================================================================
# LOCKS
def get_locks(gdb):
"""Generates a list of current locks in a gdb."""
# TODO: change to `glob(os.path.join(gdb, "*.lock"))`
locks = [f for f in os.listdir(gdb) if ".lock" in f]
for lock in locks:
try:
with open(gdb, "w") as f:
pass
except IOError:
yield lock
def get_lock_users(gdb):
"""Lists the users holding locks on a gdb."""
locks = [f.split(".")[1] for f in get_locks(gdb)]
return list(set(locks))
# =============================================================================
# STRING FORMATTERS
def in_dataset(path):
if not os.path.split(path)[0].endswith(".gdb"):
return True
return False
def rm_ds(dataset_path):
"""Removes the dataset name from a GDB path."""
if in_dataset(dataset_path):
parts = os.path.split(dataset_path)
return os.path.join(os.path.split(parts[0])[0], parts[1])
return dataset_path
def unc_path(drive_path, unc_path):
"""Replaces a mapped network drive with a UNC path.
Example:
>>> unc_path('I:/workspace', r'\\cityfiles\stuff')
'\\\\cityfiles\\stuff\\workspace'
"""
drive_path = drive_path.replace("/", "\\")
drive = os.path.splitdrive(drive_path)[0]
p = Popen("net use", stdout=PIPE, creationflags=0x08000000)
raw_result = p.communicate()[0]
result = re.findall("{}(.*)\r".format(drive), raw_result)[0]
unc = result.strip().split(" ")[0]
return drive_path.replace(drive, unc)
# =============================================================================
# TABLE UTILITIES
def fill_na(fc, fields, repl_value=0):
"""Update '<Null>' values (None) in input fields.
Args:
fc (str): name or path of input feature class
fields (list): list of fields to replace NULL with 'repl_value'
repl_value (many): value to replace NULL
"""
desc_fields = arcpy.Describe(fc).fields
field_objs = [f for f in desc_fields if f.name in fields]
if len(field_objs) != len(fields):
raise AttributeError("Check spelling of field names")
# Make sure fields are editable
are_editable = [f.editable for f in field_objs]
if not all(are_editable):
ne_fields = [f.name for f in field_objs if not f.editable]
raise AttributeError("Field(s) not editable: {}".format(ne_fields))
# Make sure repl_value matches type of all input fields
m = [f.type in type_map[type(repl_value).__name__] for f in field_objs]
if not all(m):
raise TypeError("Replace value and column types do not match")
# Change the NULL values (None) to 0
with arcpy.da.UpdateCursor(fc, fields) as cur:
for row in cur:
for v in row:
if v is None:
row[row.index(v)] = repl_value
cur.updateRow(row)
return
def tbl2df(tbl, fields=["*"]):
"""Loads a table or featureclass into a pandas dataframe.
Args:
tbl (str): table or featureclass path or name (in Arc Python Window)
fields (list): names of fields to load; value of '*' loads all fields
"""
# List holds each row as a transposed dataframe
frames = []
if fields == ["*"] or fields == "*":
fields = [f.name for f in arcpy.Describe(tbl).fields]
with arcpy.da.SearchCursor(tbl, fields) as cur:
for row in cur:
row_df = pd.DataFrame(list(row)).T
row_df.columns = cur.fields
frames.append(row_df)
# Make a single dataframe from the list
df = pd.concat(frames)
df.reset_index(inplace=True, drop=True)
return df
def ogdb2df(fc_path, fields=["*"]):
"""Open ESRI GDB data as a pandas dataframe (uses osgeo/OpenFileGDB).
This option can be much faster than tbl2df.
Args:
gdb_path (str): path to gdb or path to feature in gdb
fields (list): names of fields to load; value of '*' loads all fields
"""
fc_path = rm_ds(fc_path)
driver = ogr.GetDriverByName("OpenFileGDB")
gdb_path, fc_name = os.path.split(fc_path)
gdb = driver.Open(gdb_path)
fc = gdb.GetLayerByName(fc_name)
schema = fc.schema
if fields == ["*"] or fields == "*":
fields = [f.name for f in schema]
frames = []
feat = fc.GetNextFeature()
while feat:
row = [feat.GetField(f) for f in fields]
row_df = pd.DataFrame(row).T
row_df.columns = fields
frames.append(row_df)
feat = fc.GetNextFeature()
df = pd.concat(frames)
df.index = range(len(df))
return df
def tbl2excel(tbl, out_path, fields=["*"]):
"""Exports an input table or feature class to Excel."""
df = tbl2df(tbl, fields)
df.to_excel(out_path)
return
def groupby(fc, gb_field, summary_field):
fields = [gb_field, summary_field]
df = tbl2df(fc, fields)
return df.groupby(gb_field).sum()
def drop_all(fc, keep=[]):
"""Drops all nonrequired columns except those specified."""
warnings = []
fields = [f.name for f in arcpy.ListFields(fc)]
# TODO: what about difference between keep and all_fields?
rm_fields = list(set(fields).symmetric_difference(set(keep)))
for field in rm_fields:
try:
arcpy.DeleteField_management(fc, field)
except Exception: # TODO:
warnings.append(field)
print("Field(s) could not be removed: {}".format(warnings))
return
def field_value_set(fc, field):
s = set()
with arcpy.da.SearchCursor(fc, field) as cur:
for row in cur:
s.add(row[0])
return s
def is_unique(fc, fields):
"""Checks if fields of a feature class have all unique values."""
if isinstance(fields, str):
fields = [fields]
s = set()
row_cnt = 0
with arcpy.da.SearchCursor(fc, fields) as cur:
for row in cur:
row_cnt += 1
s.add(row[0])
if len(s) == row_cnt:
return True
return False
def max_in_list(find_str, in_list, digits=2):
"""Find the field containing a substring and the largest number.
Good for finding the max year of a series of fields.
Args:
find_str (str): substring of field name; use '' if for only max
in_list (list): a list of field names to search
Returns the field name containing the max number
Use:
>>> fields = ["Year", "Pop10", "Pop20", "Pop30", "Average60"]
>>> max_in_list("Pop", fields)
"Pop30"
>>> max_in_list("", fields)
"Average60"
"""
# Filter out fields without numbers
filt_re = "\d{}".format(digits)
filtered_list = [f for f in in_list if re.findall(filt_re, f)]
print filtered_list
if not filtered_list:
raise AttributeError("No list value contains a 2-digit number")
m = max([int(re.findall("\d{2}", i)[0]) for i in filtered_list
if find_str in i])
return [i for i in in_list if str(m) in i][0]
def sum_field(fc, field):
"""Returns the sum of a field."""
with arcpy.da.SearchCursor(fc, field) as cur:
total = 0
for row in cur:
total += row[0]
return total
def list_all_fields(fc):
"""Returns a list of all fields, includes joined fields."""
fields = [f.name for f in arcpy.Describe(fc).fields]
return fields
def list_joins(fc):
"""Returns a set of tables currently joined to a feature class."""
fields = list_all_fields(fc)
s = set()
[s.add(j.split("$")[0]) for j in fields if "$" in j]
return s
def oid_by_regex(fc, regex, field, oid_field="OBJECTID"):
"""Yields record oids where field value matches regex."""
with arcpy.da.SearchCursor(fc, [oid_field, field]) as cur:
for row in cur:
if row[1] and re.findall(regex, row[1]):
yield row[0]
def layer_by_regex(regex):
"""Returns the full name of a layer based on a substring or regex."""
for layer in TOC.contents.keys():
if re.findall("(?i){}".format(regex), layer):
return layer
def regex_selection(fc, regex, field, id_field="OBJECTID"):
"""For when LIKE statements just don't cut the '(?i)mustard'."""
ids = list(oid_by_regex(fc, regex, field, id_field))
if not ids:
raise IOError("Nothing found")
in_qry = "{} IN ({})".format(id_field, ', '.join([str(i) for i in ids]))
arcpy.SelectLayerByAttribute_management(fc, where_clause=in_qry)
return
def field_by_regex(fc, field_regex, escape_tables=True):
"""Returns a list of field names matching a regular expression."""
for f in arcpy.Describe(fc).fields:
if escape_tables:
field_regex = field_regex.replace("$.", "\\$\\.")
if re.findall(field_regex, f.name):
yield f.name
def select_random(layer, field, sample_size, filter_lambda=None):
"""Selects a random number of features from a layer.
Args:
layer (str): name of a layer in the TOC
field (str): name of a field/attribute in the layer
sample_size (int): number of random features to select
filter_lambda (function): optionally filter the set using a function
Example:
# Select 10 random parcels that do not have a "7" at position -4
# in the 'ParcelID' field
>>> select_random("Parcels", "ParcelID", 10, lambda x: x[-4] <> "7")
"""
# TODO: test
# TODO: allow ADD_TO_SELECTION option
# TODO: allow lambda access to any field value
# Get the layer as a dataframe of unique values
df = tbl2df(layer, field).drop_duplicates()
# Create empty set
s = set()
while len(s) < sample_size:
# Add a distinct random value to the set
s.add(random.choice(df[field].tolist()))
# Optionally reduce the set using an input function
if filter_lambda:
s = set(filter(filter_lambda, s))
# Select the features in the set
arcpy.SelectLayerByAttribute_management(
layer,
"NEW_SELECTION",
# Create a WHERE IN statement
# e.g. `"ParcelID" IN ('040000', '040001')`
"\"{field}\" IN ({values})".format(
field="ParcelID",
values=", ".join(["'" + v + "'" for v in s])))
return
# =============================================================================
# QUERIES
def like_list(field, values, case="", condition="OR"):
"""Make a `<field> LIKE '%value%'` string for list of values.
Args:
field (str): field to use in LIKE statement; may need to be quoted
values (iterable): values to convert to LIKE query
condition (str): 'AND' or 'OR' (default 'OR')
case (str): optionally convert values to title, upper, or lower
Returns joined string.
Usage:
>>> like_list('"Subdivision"', ["Ranch", "Apple"], case="upper")
'Subdivision" LIKE \'%RANCH%\' OR "Subdivision" LIKE \'%APPLE%\'"'
"""
cond = " {} ".format(condition)
if case.lower() == 'title':
values = [v.title() for v in values]
elif case.lower() == 'upper':
values = [v.upper() for v in values]
elif case.lower() == 'lower':
values = [v.lower() for v in values]
q = cond.join(["{} LIKE '%{}%'".format(field, v) for v in values])
return q
# =============================================================================
# FIELD MAPS
# Note: a field map is a string describing a field and its merge rules
# Note: a field mapping is a list of field maps joined by ';'
# TODO: remove?
'''
def get_fieldmap(fc):
"""Get current fieldmapping as list."""
mappings = arcpy.FieldMappings()
mappings.addTable(fc)
def make_fieldmap(fc, field, rename=None, merge_rule="First"):
"""Easy manipulation of FieldMap/Mappings. Not a valid FieldMap object."""
m = arcpy.FieldMap()
m.mergeRule = merge_rule
maps = arcpy.FieldMappings()
full_f_name = list(regex_fields(fc, field))[0]
m.addInputField(fc, full_f_name)
maps.addFieldMap(m)
s = maps.exportToString()
if rename:
regex_name = re.sub("\$\.", "\\$\\.", full_f_name)
regex = "{}(?!,)".format(regex_name)
s = re.sub(regex, rename, s)
return s
def make_fieldmaps(fc, fields):
if isinstance(fields, dict):
for field, rename in fields.items():
yield make_fieldmap(fc, field, rename)
else:
for field in fields:
yield make_fieldmap(fc, field)
def join_fieldmaps(maps):
return ";".join(maps)
def get_field_type(fc, field):
"""Returns a set of value types found within a field."""
s = set()
with arcpy.da.SearchCursor(fc, field) as cur:
for row in cur:
s.add(type(row[0]).__name__)
return s
'''
# TODO: 'spatial join' that copies a field from the selected to the
# intersecting features
# =============================================================================
# WRAPPERS
# Wrappers are particularly engineered for use in ArcMaps' Python Window
def fc2fc(in_fc, full_out_path, where=None, limit_fields=None):
"""Wraps 'arcpy.FeatureClassToFeatureClass_conversion with a short name."""
full_out_path = full_out_path.replace("\\", "/")
out_path, out_name = os.path.split(full_out_path)
mapping = None
# TODO:
#if limit_fields:
# mapping = limit_fields(in_fc, limit_fields)
return arcpy.FeatureClassToFeatureClass_conversion(
in_fc, out_path, out_name, where, mapping)
class GDBPkg(object):
def __init__(self, out_location, gdb_name):
"""Create a template for a file geodatabase and make all at once."""
self.out_location = out_location
self.name = gdb_name
if not gdb_name.endswith(".gdb"):
self.name = gdb_name + ".gdb"
self.contents = []
self.datasets = []
# Validate
if not os.path.exists(self.out_location):
raise IOError("Out location does not exist")
if self.exists:
raise IOError("GDB already exists")
@property
def path(self):
"""Output path for staged GDB."""
return os.path.join(self.out_location, self.name)
@property
def exists(self):
if os.path.exists(self.path):
return True
return False
def add_feature(self, out_name, feature_path, dataset=""):
"""Stages a feature class for import."""
self.contents.append([out_name, feature_path, dataset])
return
def add_dataset(self, name, refsys=0):
"""Stages a feature dataset for creation."""
self.datasets.append([name, refsys])
return
def make(self):
"""Create the staged GDB."""
# Create GDB
arcpy.CreateFileGDB_management(self.out_location, self.name)
# Create Feature Datasets
for ds_name, refsys in self.datasets:
arcpy.CreateFeatureDataset_management(self.path, ds_name, refsys)
# Import Feature Classes
for fc_name, f_path, dataset in self.contents:
if dataset:
if dataset not in [ds[0] for ds in self.datasets]:
raise IOError("{} not a dataset".format(dataset))
arcpy.FeatureClassToFeatureClass_conversion(
f_path, os.path.join(self.path, dataset), fc_name)
else:
arcpy.FeatureClassToFeatureClass_conversion(
f_path, self.path, fc_name)
return
class QueryFile(object):
"""Wraps RawConfigParser to make accessing stored queries easy."""
def __init__(self, path):
self.path = path
self._cfg = RawConfigParser()
self._cfg.read(self.path)
def get(self, section, option):
"""Gets the option from the section in the file."""
if option.lower() == "all":
all_qs = ["({})".format(self._cfg.get(section, opt))
for opt in self._cfg.options(section)]
q = " OR ".join(all_qs)
else:
q = self._cfg.get(section, option)
return q.replace("\n", " ")
# Source:
# https://blogs.esri.com/esri/arcgis/2013/04/23/updating-arcgis-com-hosted-feature-services-with-python/
class Service(object):
def __init__(self, mxd_file, host="My Hosted Services", con="",
service_type="FeatureServer", enable_caching=False,
allow_overwrite=True, capabilities=["Query"]):
"""Uploads an MXD as a Web Service."""
self.mxd = arcpy.mapping.MapDocument(mxd_file)
if self.mxd.title == "":
raise IOError("MXD Title (metadata) cannot be blank")
self.host = host
if not con:
self.con = self.host.upper().replace(" ", "_")
self.sdd = os.path.abspath("{}.sddraft".format(self.mxd.title))
self.sd = os.path.abspath("{}.sd".format(self.mxd.title))
self.analysis = arcpy.mapping.CreateMapSDDraft(
self.mxd, self.sdd, self.mxd.title, self.con)
if self.analysis["errors"]:
raise Exception(self.analysis["errors"])
# DOM Editing
self.doc = DOM.parse(self.sdd)
self._set_service_type(service_type)
self._set_caching(enable_caching)
self._set_web_capabilities(capabilities)
self._set_overwrite(allow_overwrite)
def update_draft(self):
with open(self.sdd, "w") as f:
self.doc.writexml(f)
return
def _set_caching(self, enable_caching):
cache = str(enable_caching).lower()
conf = 'ConfigurationProperties'
configProps = self.doc.getElementsByTagName(conf)[0]
propArray = configProps.firstChild
propSets = propArray.childNodes
for propSet in propSets:
keyValues = propSet.childNodes
for keyValue in keyValues:
if keyValue.tagName == 'Key':
if keyValue.firstChild.data == "isCached":
keyValue.nextSibling.firstChild.data = cache
return
def _set_service_type(self, service_type):
typeNames = self.doc.getElementsByTagName('TypeName')
for typeName in typeNames:
if typeName.firstChild.data == "MapServer":
typeName.firstChild.data = service_type
return
def _set_web_capabilities(self, capabilities):
"""Sets the web capabilities.
Args:
capabilities (list): list of capabilities
"""
capability = ",".join(capabilities)
configProps = self.doc.getElementsByTagName('Info')[0]
propSets = configProps.firstChild.childNodes
for propSet in propSets:
keyValues = propSet.childNodes
for keyValue in keyValues:
if keyValue.tagName == 'Key':
if keyValue.firstChild.data == "WebCapabilities":
keyValue.nextSibling.firstChild.data = capability
return
def _set_overwrite(self, overwrite):
replace = "esriServiceDefinitionType_Replacement"
tagsType = self.doc.getElementsByTagName('Type')
for tagType in tagsType:
if tagType.parentNode.tagName == 'SVCManifest':
if tagType.hasChildNodes():
tagType.firstChild.data = replace
tagsState = self.doc.getElementsByTagName('State')
for tagState in tagsState:
if tagState.parentNode.tagName == 'SVCManifest':
if tagState.hasChildNodes():
tagState.firstChild.data = "esriSDState_Published"
return
def upload(self):
self.update_draft()
arcpy.StageService_server(self.sdd, self.sd)
arcpy.UploadServiceDefinition_server(
self.sd, self.host, self.mxd.title,
"", "", "", "", "OVERRIDE_DEFINITION",
"SHARE_ONLINE", "PUBLIC", "SHARE_ORGANIZATION")
return
def get_props(doc):
configProps = doc.getElementsByTagName('Info')[0]
propSets = configProps.firstChild.childNodes
for propSet in propSets:
keyValues = propSet.childNodes
for keyValue in keyValues:
if keyValue.tagName == 'Key':
if keyValue.firstChild.data == "WebCapabilities":
return keyValue.nextSibling.firstChild.data.split(",")
| mit |
fabian-paul/PyEMMA | pyemma/msm/estimators/_msm_estimator_base.py | 2 | 41856 | import numpy as _np
from msmtools import estimation as msmest
from pyemma._base.estimator import Estimator as _Estimator
from pyemma.msm import MSM as _MSM
from pyemma.msm.estimators._dtraj_stats import DiscreteTrajectoryStats as _DiscreteTrajectoryStats
from pyemma.util import types as _types
from pyemma.util.annotators import fix_docs, aliased, alias
from pyemma.util.types import ensure_dtraj_list
from pyemma.util.units import TimeUnit as _TimeUnit
from decorator import decorator as _decorator
@_decorator
def _remap_indices_coring(func, self, *args, **kwargs):
"""Since milestone counting sometimes has to truncate the discrete trajectories (eg. outliers),
it becomes mission crucial to maintain the mapping of the current indices to the original input trajectories.
"""
indices = func(self, *args, **kwargs)
dtraj_offsets = self.dtrajs_milestone_counting_offsets
if any(dtraj_offsets): # need to remap indices?
import numpy as np
from pyemma.util.discrete_trajectories import _apply_offsets_to_samples
# we handle 1d and 2d indices
if isinstance(indices, np.ndarray) and indices.dtype == np.int_:
_apply_offsets_to_samples(indices, dtraj_offsets)
elif isinstance(indices, list) or (isinstance(indices, np.ndarray) and indices.dtype == np.object_):
for s in indices:
_apply_offsets_to_samples(s, dtraj_offsets)
else:
raise TypeError('Indices "{}" not supported.'.format(indices))
return indices
@fix_docs
@aliased
class _MSMEstimator(_Estimator, _MSM):
r"""Base class for different MSM estimators given discrete trajectory statistics"""
# version for serialization
__serialize_version = 0
# internal fields (eg. no estimator [ctor] or model parameter [set_model_params])
__serialize_fields = ('_active_set', '_active_state_indexes',
'_dtrajs_full', # we don't want _dtraj_active, since it is recomputed every time...
'_nstates_full',
'_is_estimated',
)
def __init__(self, lag=1, reversible=True, count_mode='sliding', sparse=False,
connectivity='largest', dt_traj='1 step', score_method='VAMP2', score_k=10,
mincount_connectivity='1/n', core_set=None, milestoning_method='last_core'):
r"""Maximum likelihood estimator for MSMs given discrete trajectory statistics
Parameters
----------
lag : int
lag time at which transitions are counted and the transition matrix is
estimated.
reversible : bool, optional, default = True
If true compute reversible MSM, else non-reversible MSM
count_mode : str, optional, default='sliding'
mode to obtain count matrices from discrete trajectories. Should be
one of:
* 'sliding' : A trajectory of length T will have :math:`T-\tau` counts
at time indexes
.. math::
(0 \rightarrow \tau), (1 \rightarrow \tau+1), ..., (T-\tau-1 \rightarrow T-1)
* 'effective' : Uses an estimate of the transition counts that are
statistically uncorrelated. Recommended when used with a
Bayesian MSM.
* 'sample' : A trajectory of length T will have :math:`T/\tau` counts
at time indexes
.. math::
(0 \rightarrow \tau), (\tau \rightarrow 2 \tau), ..., (((T/tau)-1) \tau \rightarrow T)
sparse : bool, optional, default = False
If true compute count matrix, transition matrix and all derived
quantities using sparse matrix algebra. In this case python sparse
matrices will be returned by the corresponding functions instead of
numpy arrays. This behavior is suggested for very large numbers of
states (e.g. > 4000) because it is likely to be much more efficient.
connectivity : str, optional, default = 'largest'
Connectivity mode. Three methods are intended (currently only 'largest'
is implemented)
* 'largest' : The active set is the largest reversibly connected set.
All estimation will be done on this subset and all quantities
(transition matrix, stationary distribution, etc) are only defined
on this subset and are correspondingly smaller than the full set
of states
* 'all' : The active set is the full set of states. Estimation will be
conducted on each reversibly connected set separately. That means
the transition matrix will decompose into disconnected submatrices,
the stationary vector is only defined within subsets, etc.
Currently not implemented.
* 'none' : The active set is the full set of states. Estimation will
be conducted on the full set of
states without ensuring connectivity. This only permits
nonreversible estimation. Currently not implemented.
dt_traj : str, optional, default='1 step'
Description of the physical time of the input trajectories. May be used
by analysis algorithms such as plotting tools to pretty-print the axes.
By default '1 step', i.e. there is no physical time unit. Specify by a
number, whitespace and unit. Permitted units are (* is an arbitrary
string):
* 'fs', 'femtosecond*'
* 'ps', 'picosecond*'
* 'ns', 'nanosecond*'
* 'us', 'microsecond*'
* 'ms', 'millisecond*'
* 's', 'second*'
score_method : str, optional, default='VAMP2'
Score to be used with score function - see there for documentation.
* 'VAMP1' Sum of singular values of the symmetrized transition matrix.
* 'VAMP2' Sum of squared singular values of the symmetrized transition matrix.
score_k : int or None
The maximum number of eigenvalues or singular values used in the
score. If set to None, all available eigenvalues will be used.
mincount_connectivity : float or '1/n'
minimum number of counts to consider a connection between two states.
Counts lower than that will count zero in the connectivity check and
may thus separate the resulting transition matrix. The default
evaluates to 1/nstates.
core_set : None (default) or array like, dtype=int
Definition of core set for milestoning MSMs.
If set to None, replaces state -1 (if found in discrete trajectories) and
performs milestone counting. No effect for Voronoi-discretized trajectories (default).
If a list or np.ndarray is supplied, discrete trajectories will be assigned
accordingly.
milestoning_method : str
Method to use for counting transitions in trajectories with unassigned frames.
Currently available:
| 'last_core', assigns unassigned frames to last visited core
"""
self.lag = lag
# set basic parameters
self.reversible = reversible
# sparse matrix computation wanted?
self.sparse = sparse
# store counting mode (lowercase)
self.count_mode = count_mode
if self.count_mode not in ('sliding', 'effective', 'sample'):
raise ValueError('count mode ' + count_mode + ' is unknown.')
# store connectivity mode (lowercase)
self.connectivity = connectivity
# time step
self.dt_traj = dt_traj
# score
self.score_method = score_method
self.score_k = score_k
# connectivity
self.mincount_connectivity = mincount_connectivity
self.core_set = core_set
self.milestoning_method = milestoning_method
################################################################################
# Generic functions
################################################################################
def _get_dtraj_stats(self, dtrajs):
""" Compute raw trajectory counts
Parameters
----------
dtrajs : list containing ndarrays(dtype=int) or ndarray(n, dtype=int) or :class:`DiscreteTrajectoryStats <pyemma.msm.estimators._dtraj_stats.DiscreteTrajectoryStats>`
discrete trajectories, stored as integer ndarrays (arbitrary size)
or a single ndarray for only one trajectory.
"""
# harvest discrete statistics
if isinstance(dtrajs, _DiscreteTrajectoryStats):
dtrajstats = dtrajs
else:
if any(-1 in d for d in dtrajs):
if self.core_set is None:
self.core_set = _np.sort(_np.unique(_np.concatenate(dtrajs)))[1:]
self.logger.warning('Empty core set while unassigned states (-1) in discrete trajectory. '
'Defining core set automatically; check correctness by calling self.core_set.')
else:
if set(_np.sort(_np.unique(_np.concatenate(dtrajs)))[1:]) != set(self.core_set):
self.logger.warning('dtraj containts states that are not in core set definition. '
'These states will be treated as unassigned.')
if self.core_set is not None:
self._dtrajs_original = dtrajs
from pyemma.util.discrete_trajectories import rewrite_dtrajs_to_core_sets
self._dtrajs_full, self._dtrajs_milestone_counting_offsets, self.n_cores = \
rewrite_dtrajs_to_core_sets(dtrajs, core_set=self.core_set, in_place=False)
else:
self._dtrajs_full = dtrajs
# compute and store discrete trajectory statistics
dtrajstats = _DiscreteTrajectoryStats(self._dtrajs_full)
# check if this MSM seems too large to be dense
if dtrajstats.nstates > 4000 and not self.sparse:
self.logger.warning('Building a dense MSM with {nstates} states. This can be '
'inefficient or unfeasible in terms of both runtime and memory consumption. '
'Consider using sparse=True.'.format(nstates=dtrajstats.nstates))
# count lagged
dtrajstats.count_lagged(self.lag, count_mode=self.count_mode,
mincount_connectivity=self.mincount_connectivity,
n_jobs=getattr(self, 'n_jobs', None),
show_progress=getattr(self, 'show_progress', False),
name=self.name,
core_set=self.core_set, milestoning_method=self.milestoning_method)
# for other statistics
return dtrajstats
def estimate(self, dtrajs, **kwargs):
"""
Parameters
----------
dtrajs : list containing ndarrays(dtype=int) or ndarray(n, dtype=int) or :class:`DiscreteTrajectoryStats <pyemma.msm.estimators._dtraj_stats.DiscreteTrajectoryStats>`
discrete trajectories, stored as integer ndarrays (arbitrary size)
or a single ndarray for only one trajectory.
**kwargs :
Other keyword parameters if different from the settings when this estimator was constructed
Returns
-------
MSM : :class:`pyemma.msm.MSM`
Note that this class is specialized by the used estimator, eg. it has more functionality than the plain
MSM class.
"""
dtrajs = ensure_dtraj_list(dtrajs) # ensure format
return super(_MSMEstimator, self).estimate(dtrajs, **kwargs)
def _check_is_estimated(self):
assert self._is_estimated, 'You tried to access model parameters before estimating it - run estimate first!'
def score(self, dtrajs, score_method=None, score_k=None):
""" Scores the MSM using the dtrajs using the variational approach for Markov processes [1]_ [2]_
Currently only implemented using dense matrices - will be slow for large state spaces.
Parameters
----------
dtrajs : list of arrays
test data (discrete trajectories).
score_method : str
Overwrite scoring method if desired. If `None`, the estimators scoring
method will be used. See __init__ for documentation.
score_k : int or None
Overwrite scoring rank if desired. If `None`, the estimators scoring
rank will be used. See __init__ for documentation.
score_method : str, optional, default='VAMP2'
Overwrite scoring method to be used if desired. If `None`, the estimators scoring
method will be used.
Available scores are based on the variational approach for Markov processes [1]_ [2]_ :
* 'VAMP1' Sum of singular values of the symmetrized transition matrix [2]_ .
If the MSM is reversible, this is equal to the sum of transition
matrix eigenvalues, also called Rayleigh quotient [1]_ [3]_ .
* 'VAMP2' Sum of squared singular values of the symmetrized transition matrix [2]_ .
If the MSM is reversible, this is equal to the kinetic variance [4]_ .
score_k : int or None
The maximum number of eigenvalues or singular values used in the
score. If set to None, all available eigenvalues will be used.
References
----------
.. [1] Noe, F. and F. Nueske: A variational approach to modeling slow processes
in stochastic dynamical systems. SIAM Multiscale Model. Simul. 11, 635-655 (2013).
.. [2] Wu, H and F. Noe: Variational approach for learning Markov processes
from time series data (in preparation)
.. [3] McGibbon, R and V. S. Pande: Variational cross-validation of slow
dynamical modes in molecular kinetics, J. Chem. Phys. 142, 124105 (2015)
.. [4] Noe, F. and C. Clementi: Kinetic distance and kinetic maps from molecular
dynamics simulation. J. Chem. Theory Comput. 11, 5002-5011 (2015)
"""
dtrajs = ensure_dtraj_list(dtrajs) # ensure format
# reset estimator data if needed
if score_method is not None:
self.score_method = score_method
if score_k is not None:
self.score_k = score_k
# determine actual scoring rank
if self.score_k is None:
self.score_k = self.nstates
if self.score_k > self.nstates:
self.logger.warning('Requested scoring rank {rank} exceeds number of MSM states. '
'Reduced to score_k = {nstates}'.format(rank=self.score_k, nstates=self.nstates))
self.score_k = self.nstates # limit to nstates
# training data
K = self.transition_matrix # model
C0t_train = self.count_matrix_active
from scipy.sparse import issparse
if issparse(K): # can't deal with sparse right now.
K = K.toarray()
if issparse(C0t_train): # can't deal with sparse right now.
C0t_train = C0t_train.toarray()
C00_train = _np.diag(C0t_train.sum(axis=1)) # empirical cov
Ctt_train = _np.diag(C0t_train.sum(axis=0)) # empirical cov
# test data
C0t_test_raw = msmest.count_matrix(dtrajs, self.lag, sparse_return=False)
# map to present active set
map_from = self.active_set[_np.where(self.active_set < C0t_test_raw.shape[0])[0]]
map_to = _np.arange(len(map_from))
C0t_test = _np.zeros((self.nstates, self.nstates))
C0t_test[_np.ix_(map_to, map_to)] = C0t_test_raw[_np.ix_(map_from, map_from)]
C00_test = _np.diag(C0t_test.sum(axis=1))
Ctt_test = _np.diag(C0t_test.sum(axis=0))
# score
from pyemma.util.metrics import vamp_score
return vamp_score(K, C00_train, C0t_train, Ctt_train, C00_test, C0t_test, Ctt_test,
k=self.score_k, score=self.score_method)
def _blocksplit_dtrajs(self, dtrajs, sliding):
from pyemma.msm.estimators._dtraj_stats import blocksplit_dtrajs
return blocksplit_dtrajs(dtrajs, lag=self.lag, sliding=sliding)
def score_cv(self, dtrajs, n=10, score_method=None, score_k=None):
""" Scores the MSM using the variational approach for Markov processes [1]_ [2]_ and crossvalidation [3]_ .
Divides the data into training and test data, fits a MSM using the training
data using the parameters of this estimator, and scores is using the test
data.
Currently only one way of splitting is implemented, where for each n,
the data is randomly divided into two approximately equally large sets of
discrete trajectory fragments with lengths of at least the lagtime.
Currently only implemented using dense matrices - will be slow for large state spaces.
Parameters
----------
dtrajs : list of arrays
Test data (discrete trajectories).
n : number of samples
Number of repetitions of the cross-validation. Use large n to get solid
means of the score.
score_method : str, optional, default='VAMP2'
Overwrite scoring method to be used if desired. If `None`, the estimators scoring
method will be used.
Available scores are based on the variational approach for Markov processes [1]_ [2]_ :
* 'VAMP1' Sum of singular values of the symmetrized transition matrix [2]_ .
If the MSM is reversible, this is equal to the sum of transition
matrix eigenvalues, also called Rayleigh quotient [1]_ [3]_ .
* 'VAMP2' Sum of squared singular values of the symmetrized transition matrix [2]_ .
If the MSM is reversible, this is equal to the kinetic variance [4]_ .
score_k : int or None
The maximum number of eigenvalues or singular values used in the
score. If set to None, all available eigenvalues will be used.
References
----------
.. [1] Noe, F. and F. Nueske: A variational approach to modeling slow processes
in stochastic dynamical systems. SIAM Multiscale Model. Simul. 11, 635-655 (2013).
.. [2] Wu, H and F. Noe: Variational approach for learning Markov processes
from time series data (in preparation).
.. [3] McGibbon, R and V. S. Pande: Variational cross-validation of slow
dynamical modes in molecular kinetics, J. Chem. Phys. 142, 124105 (2015).
.. [4] Noe, F. and C. Clementi: Kinetic distance and kinetic maps from molecular
dynamics simulation. J. Chem. Theory Comput. 11, 5002-5011 (2015).
"""
dtrajs = ensure_dtraj_list(dtrajs) # ensure format
from pyemma.msm.estimators._dtraj_stats import cvsplit_dtrajs
if self.count_mode not in ('sliding', 'sample'):
raise ValueError('score_cv currently only supports count modes "sliding" and "sample"')
sliding = self.count_mode == 'sliding'
scores = []
from pyemma._ext.sklearn.base import clone
estimator = clone(self)
for i in range(n):
dtrajs_split = self._blocksplit_dtrajs(dtrajs, sliding)
dtrajs_train, dtrajs_test = cvsplit_dtrajs(dtrajs_split)
estimator.fit(dtrajs_train)
s = estimator.score(dtrajs_test, score_method=score_method, score_k=score_k)
scores.append(s)
return _np.array(scores)
################################################################################
# Basic attributes
################################################################################
@alias("lagtime")
@property
def lag(self):
"""
The lag time at which the Markov model was estimated
"""
return self._lag
@lag.setter
def lag(self, value):
self._lag = int(value)
@property
def nstates_full(self):
r""" Number of states in discrete trajectories """
self._check_is_estimated()
return self._nstates_full
@property
def active_set(self):
"""
The active set of states on which all computations and estimations will be done
"""
self._check_is_estimated()
return self._active_set
@active_set.setter
def active_set(self, value):
self._active_set = value
@property
def connectivity(self):
"""Returns the connectivity mode of the MSM """
return self._connectivity
@connectivity.setter
def connectivity(self, value):
if value == 'largest':
pass # this is the current default. no need to do anything
elif value == 'all':
raise NotImplementedError('MSM estimation with connectivity=\'all\' is currently not implemented.')
elif value == 'none':
raise NotImplementedError('MSM estimation with connectivity=\'none\' is currently not implemented.')
else:
raise ValueError('connectivity mode {} is unknown. Currently only "largest" is implemented'.format(value))
self._connectivity = value
@property
def dt_traj(self):
return self._dt_traj
@dt_traj.setter
def dt_traj(self, value):
# time step
self._dt_traj = value
self.timestep_traj = _TimeUnit(self.dt_traj)
@property
def largest_connected_set(self):
"""
The largest reversible connected set of states
"""
self._check_is_estimated()
return self._connected_sets[0]
@property
def connected_sets(self):
"""
The reversible connected sets of states, sorted by size (descending)
"""
self._check_is_estimated()
return self._connected_sets
@property
@alias('dtrajs_full')
def discrete_trajectories_full(self):
"""
A list of integer arrays with the original (unmapped) discrete trajectories:
"""
self._check_is_estimated()
return self._dtrajs_full
@property
@alias('dtrajs_active')
def discrete_trajectories_active(self):
"""
A list of integer arrays with the discrete trajectories mapped to the connectivity mode used.
For example, for connectivity='largest', the indexes will be given within the connected set.
Frames that are not in the connected set will be -1.
"""
self._check_is_estimated()
# compute connected dtrajs
self._dtrajs_active = []
for dtraj in self._dtrajs_full:
self._dtrajs_active.append(self._full2active[dtraj])
return self._dtrajs_active
@property
def count_matrix_active(self):
"""The count matrix on the active set given the connectivity mode used.
For example, for connectivity='largest', the count matrix is given only on the largest reversibly connected set.
Attention: This count matrix has been obtained by sliding a window of length tau across the data. It contains
a factor of tau more counts than are statistically uncorrelated. It's fine to use this matrix for maximum
likelihood estimated, but it will give far too small errors if you use it for uncertainty calculations. In order
to do uncertainty calculations, use the effective count matrix, see:
:attr:`effective_count_matrix`
See Also
--------
effective_count_matrix
For a count matrix with effective (statistically uncorrelated) counts.
"""
self._check_is_estimated()
return self._C_active
@property
def count_matrix_full(self):
"""
The count matrix on full set of discrete states, irrespective as to whether they are connected or not.
Attention: This count matrix has been obtained by sliding a window of length tau across the data. It contains
a factor of tau more counts than are statistically uncorrelated. It's fine to use this matrix for maximum
likelihood estimated, but it will give far too small errors if you use it for uncertainty calculations. In order
to do uncertainty calculations, use the effective count matrix, see: :attr:`effective_count_matrix`
(only implemented on the active set), or divide this count matrix by tau.
See Also
--------
effective_count_matrix
For a active-set count matrix with effective (statistically uncorrelated) counts.
"""
self._check_is_estimated()
return self._C_full
@property
def active_state_fraction(self):
"""The fraction of states in the largest connected set.
"""
self._check_is_estimated()
return float(self._nstates) / float(self._nstates_full)
@property
def active_count_fraction(self):
"""The fraction of counts in the largest connected set.
"""
self._check_is_estimated()
from pyemma.util.discrete_trajectories import count_states
hist = count_states(self._dtrajs_full)
hist_active = hist[self.active_set]
return float(_np.sum(hist_active)) / float(_np.sum(hist))
@property
def core_set(self):
""" list of states which are defined to lie within the core set.
Transitions will only be considered between cores.
"""
return self._core_set
@core_set.setter
def core_set(self, value):
self._core_set = _types.ensure_int_vector(value) if value is not None else None
@property
@alias('dtrajs_unmodified')
def discrete_trajectories_unmodified(self):
"""
A list of integer arrays with the original and not modified discrete trajectories.
"""
self._check_is_estimated()
return self._dtrajs_original
@property
def dtrajs_milestone_counting_offsets(self):
""" Offsets for milestone counted trajectories for each input discrete trajectory.
In case a trajectory does not start in a core, we need to shift it towards the first core state visited.
Returns
-------
offsets: list of int (or None, indicating a trajectory never visits a core
"""
if not hasattr(self, '_dtrajs_milestone_counting_offsets'):
self._dtrajs_milestone_counting_offsets = ()
return self._dtrajs_milestone_counting_offsets
################################################################################
# Generation of trajectories and samples
################################################################################
@property
def active_state_indexes(self):
"""
Ensures that the connected states are indexed and returns the indices
"""
self._check_is_estimated()
if not hasattr(self, '_active_state_indexes'):
from pyemma.util.discrete_trajectories import index_states
self._active_state_indexes = index_states(self.discrete_trajectories_active)
return self._active_state_indexes
@_remap_indices_coring
def generate_traj(self, N, start=None, stop=None, stride=1):
"""Generates a synthetic discrete trajectory of length N and simulation time stride * lag time * N
This information can be used
in order to generate a synthetic molecular dynamics trajectory - see
:func:`pyemma.coordinates.save_traj`
Note that the time different between two samples is the Markov model lag time tau. When comparing
quantities computing from this synthetic trajectory and from the input trajectories, the time points of this
trajectory must be scaled by the lag time in order to have them on the same time scale.
Parameters
----------
N : int
Number of time steps in the output trajectory. The total simulation time is stride * lag time * N
start : int, optional, default = None
starting state. If not given, will sample from the stationary distribution of P
stop : int or int-array-like, optional, default = None
stopping set. If given, the trajectory will be stopped before N steps
once a state of the stop set is reached
stride : int, optional, default = 1
Multiple of lag time used as a time step. By default, the time step is equal to the lag time
Returns
-------
indexes : ndarray( (N, 2) )
trajectory and time indexes of the simulated trajectory. Each row consist of a tuple (i, t), where i is
the index of the trajectory and t is the time index within the trajectory.
Note that the time different between two samples is the Markov model lag time tau
See also
--------
pyemma.coordinates.save_traj
in order to save this synthetic trajectory as a trajectory file with molecular structures
"""
# TODO: this is the only function left which does something time-related in a multiple of tau rather than dt.
# TODO: we could generate dt-strided trajectories by sampling tau times from the current state, but that would
# TODO: probably lead to a weird-looking trajectory. Maybe we could use a HMM to generate intermediate 'hidden'
# TODO: frames. Anyway, this is a nontrivial issue.
self._check_is_estimated()
# generate synthetic states
from msmtools.generation import generate_traj as _generate_traj
syntraj = _generate_traj(self.transition_matrix, N, start=start, stop=stop, dt=stride)
# result
from pyemma.util.discrete_trajectories import sample_indexes_by_sequence
return sample_indexes_by_sequence(self.active_state_indexes, syntraj)
@_remap_indices_coring
def sample_by_state(self, nsample, subset=None, replace=True):
"""Generates samples of the connected states.
For each state in the active set of states, generates nsample samples with trajectory/time indexes.
This information can be used in order to generate a trajectory of length nsample * nconnected using
:func:`pyemma.coordinates.save_traj` or nconnected trajectories of length nsample each using
:func:`pyemma.coordinates.save_traj`
Parameters
----------
nsample : int
Number of samples per state. If replace = False, the number of returned samples per state could be smaller
if less than nsample indexes are available for a state.
subset : ndarray((n)), optional, default = None
array of states to be indexed. By default all states in the connected set will be used
replace : boolean, optional
Whether the sample is with or without replacement
Returns
-------
indexes : list of ndarray( (N, 2) )
list of trajectory/time index arrays with an array for each state.
Within each index array, each row consist of a tuple (i, t), where i is
the index of the trajectory and t is the time index within the trajectory.
See also
--------
pyemma.coordinates.save_traj
in order to save the sampled frames sequentially in a trajectory file with molecular structures
pyemma.coordinates.save_trajs
in order to save the sampled frames in nconnected trajectory files with molecular structures
"""
self._check_is_estimated()
# generate connected state indexes
import pyemma.util.discrete_trajectories as dt
return dt.sample_indexes_by_state(self.active_state_indexes, nsample, subset=subset, replace=replace)
# TODO: add sample_metastable() for sampling from metastable (pcca or hmm) states.
@_remap_indices_coring
def sample_by_distributions(self, distributions, nsample):
"""Generates samples according to given probability distributions
Parameters
----------
distributions : list or array of ndarray ( (n) )
m distributions over states. Each distribution must be of length n and must sum up to 1.0
nsample : int
Number of samples per distribution. If replace = False, the number of returned samples per state could be
smaller if less than nsample indexes are available for a state.
Returns
-------
indexes : length m list of ndarray( (nsample, 2) )
List of the sampled indices by distribution.
Each element is an index array with a number of rows equal to nsample, with rows consisting of a
tuple (i, t), where i is the index of the trajectory and t is the time index within the trajectory.
"""
self._check_is_estimated()
# generate connected state indexes
import pyemma.util.discrete_trajectories as dt
return dt.sample_indexes_by_distribution(self.active_state_indexes, distributions, nsample)
################################################################################
# For general statistics
################################################################################
def trajectory_weights(self):
r"""Uses the MSM to assign a probability weight to each trajectory frame.
This is a powerful function for the calculation of arbitrary observables in the trajectories one has
started the analysis with. The stationary probability of the MSM will be used to reweigh all states.
Returns a list of weight arrays, one for each trajectory, and with a number of elements equal to
trajectory frames. Given :math:`N` trajectories of lengths :math:`T_1` to :math:`T_N`, this function
returns corresponding weights:
.. math::
(w_{1,1}, ..., w_{1,T_1}), (w_{N,1}, ..., w_{N,T_N})
that are normalized to one:
.. math::
\sum_{i=1}^N \sum_{t=1}^{T_i} w_{i,t} = 1
Suppose you are interested in computing the expectation value of a function :math:`a(x)`, where :math:`x`
are your input configurations. Use this function to compute the weights of all input configurations and
obtain the estimated expectation by:
.. math::
\langle a \rangle = \sum_{i=1}^N \sum_{t=1}^{T_i} w_{i,t} a(x_{i,t})
Or if you are interested in computing the time-lagged correlation between functions :math:`a(x)` and
:math:`b(x)` you could do:
.. math::
\langle a(t) b(t+\tau) \rangle_t = \sum_{i=1}^N \sum_{t=1}^{T_i} w_{i,t} a(x_{i,t}) a(x_{i,t+\tau})
Returns
-------
weights : list of ndarray
The normalized trajectory weights. Given :math:`N` trajectories of lengths :math:`T_1` to :math:`T_N`,
returns the corresponding weights:
.. math::
(w_{1,1}, ..., w_{1,T_1}), (w_{N,1}, ..., w_{N,T_N})
"""
self._check_is_estimated()
# compute stationary distribution, expanded to full set
statdist_full = _np.zeros([self._nstates_full])
statdist_full[self.active_set] = self.stationary_distribution
# histogram observed states
import msmtools.dtraj as msmtraj
hist = 1.0 * msmtraj.count_states(self.discrete_trajectories_full)
# simply read off stationary distribution and accumulate total weight
W = []
wtot = 0.0
for dtraj in self.discrete_trajectories_full:
w = statdist_full[dtraj] / hist[dtraj]
W.append(w)
wtot += _np.sum(w)
# normalize
for w in W:
w /= wtot
# done
return W
################################################################################
# HMM-based coarse graining
################################################################################
def hmm(self, nhidden):
"""Estimates a hidden Markov state model as described in [1]_
Parameters
----------
nhidden : int
number of hidden (metastable) states
Returns
-------
hmsm : :class:`MaximumLikelihoodHMSM`
References
----------
.. [1] F. Noe, H. Wu, J.-H. Prinz and N. Plattner:
Projected and hidden Markov models for calculating kinetics and metastable states of complex molecules
J. Chem. Phys. 139, 184114 (2013)
"""
self._check_is_estimated()
# check if the time-scale separation is OK
# if hmm.nstates = msm.nstates there is no problem. Otherwise, check spectral gap
if self.nstates > nhidden:
timescale_ratios = self.timescales()[:-1] / self.timescales()[1:]
if timescale_ratios[nhidden - 2] < 1.5:
self.logger.warning('Requested coarse-grained model with {nhidden} metastable states at lag={lag}.'
' The ratio of relaxation timescales between'
' {nhidden} and {nhidden_1} states is only {ratio}'
' while we recommend at least 1.5.'
' It is possible that the resulting HMM is inaccurate. Handle with caution.'.format(
lag=self.lag,
nhidden=nhidden,
nhidden_1=nhidden + 1,
ratio=timescale_ratios[nhidden - 2],
))
# run HMM estimate
from pyemma.msm.estimators.maximum_likelihood_hmsm import MaximumLikelihoodHMSM
estimator = MaximumLikelihoodHMSM(lag=self.lagtime, nstates=nhidden, msm_init=self,
reversible=self.is_reversible, dt_traj=self.dt_traj)
estimator.estimate(self.discrete_trajectories_full)
return estimator.model
def coarse_grain(self, ncoarse, method='hmm'):
r"""Returns a coarse-grained Markov model.
Currently only the HMM method described in [1]_ is available for coarse-graining MSMs.
Parameters
----------
ncoarse : int
number of coarse states
Returns
-------
hmsm : :class:`MaximumLikelihoodHMSM`
References
----------
.. [1] F. Noe, H. Wu, J.-H. Prinz and N. Plattner:
Projected and hidden Markov models for calculating kinetics and metastable states of complex molecules
J. Chem. Phys. 139, 184114 (2013)
"""
self._check_is_estimated()
# check input
assert _types.is_int(self.nstates) and 1 < ncoarse <= self.nstates, \
'nstates must be an int in [2,msmobj.nstates]'
return self.hmm(ncoarse)
################################################################################
# MODEL VALIDATION
################################################################################
def cktest(self, nsets, memberships=None, mlags=10, conf=0.95, err_est=False,
n_jobs=None, show_progress=True):
""" Conducts a Chapman-Kolmogorow test.
Parameters
----------
nsets : int
number of sets to test on
memberships : ndarray(nstates, nsets), optional
optional state memberships. By default (None) will conduct a cktest
on PCCA (metastable) sets.
mlags : int or int-array, optional
multiples of lag times for testing the Model, e.g. range(10).
A single int will trigger a range, i.e. mlags=10 maps to
mlags=range(10). The setting None will choose mlags automatically
according to the longest available trajectory
conf : float, optional
confidence interval
err_est : bool, optional
compute errors also for all estimations (computationally expensive)
If False, only the prediction will get error bars, which is often
sufficient to validate a model.
n_jobs : int, default=None
how many jobs to use during calculation
show_progress : bool, optional
Show progress bars for calculation?
Returns
-------
cktest : :class:`ChapmanKolmogorovValidator <pyemma.msm.ChapmanKolmogorovValidator>`
References
----------
This test was suggested in [1]_ and described in detail in [2]_.
.. [1] F. Noe, Ch. Schuette, E. Vanden-Eijnden, L. Reich and
T. Weikl: Constructing the Full Ensemble of Folding Pathways
from Short Off-Equilibrium Simulations.
Proc. Natl. Acad. Sci. USA, 106, 19011-19016 (2009)
.. [2] Prinz, J H, H Wu, M Sarich, B Keller, M Senne, M Held, J D
Chodera, C Schuette and F Noe. 2011. Markov models of
molecular kinetics: Generation and validation. J Chem Phys
134: 174105
"""
from pyemma.msm.estimators import ChapmanKolmogorovValidator
if memberships is None:
self.pcca(nsets)
memberships = self.metastable_memberships
ck = ChapmanKolmogorovValidator(self, self, memberships, mlags=mlags, conf=conf,
n_jobs=n_jobs, err_est=err_est, show_progress=show_progress)
ck.estimate(self._dtrajs_full)
return ck
| lgpl-3.0 |
fmance/deep-medical-ir | classification/language-model/freqs.py | 1 | 3783 | import os
import sys
import codecs
import math
import re
import numpy as np
import scipy.stats
from collections import Counter, defaultdict
import matplotlib.pyplot as plt
sys.path.insert(0, "../../utils/")
import utils
CLASS_ID = sys.argv[1]
QUERY_OFFSETS = {"diag": 1, "test": 11, "treat": 21}
TARGETS = ["diag", "test", "physic exam", "investig", "evalu", "examin", "treat", "therap"]
#TARGETS = ["treat"]
DOC_IDS = utils.readInts("../data/res-and-qrels/ids.txt")
########### TOGGLE for writing data
LINES = codecs.open("../data/res-and-qrels/words.txt", "r", "utf-8").read().splitlines() # res-and-ALL-qrels sometimes !!
DOCS = zip(DOC_IDS, LINES)
print "read docs", len(LINES)
###########
#### TOGGLE for reading data
#LENGTHS = dict(zip(DOC_IDS, utils.readInts("word-counts/lengths.txt")))
def writeWordCountsForTarget(target):
out = open("word-counts/" + target + ".txt", "w")
counter = 0
for did, text in DOCS:
count = sum(1 for _ in re.finditer(r"\b%s" % target, text))
out.write("%d\n" % count)
counter += 1
if counter % 10000 == 0:
print counter
out.close()
def writeWordCounts():
for target in TARGETS:
print target
writeWordCountsForTarget(target)
print "----"
def writeLengths():
out = open("word-counts/lengths.txt", "w")
for did, text in DOCS:
out.write("%d\n" % len(text.split()))
out.close()
writeWordCounts()
writeLengths()
COUNTS_DICT = {}
for target in TARGETS:
COUNTS_DICT[target] = dict(zip(DOC_IDS, utils.readInts("word-counts/" + target + ".txt")))
def measure(count, did):
# return float(count)/LENGTHS[did] * 1e3
return count
def printFreqs(qid, dids):
print "%s|%5d:\t" % (str(qid), len(dids)),
allCounts = []
for target in TARGETS:
counts = [measure(count, did) for did, count in COUNTS_DICT[target].items() if did in dids]
medianCount = np.median(counts)
# totals[target].append(medianCount)
# weights[target].append(len(dids))
print "%s: %6.2f\t" % (target, medianCount),
allCounts.append(counts)
print
return allCounts
def printOverallStats(totals, weights):
print "OVR\t\t",
for target in TARGETS:
print "%s: %6.2f\t" % (target, np.average(totals[target], weights=weights[target])),
print
# print "MIN\t\t",
# for target in TARGETS:
# print "%s: %6.2f\t" % (target, min(totals[target])),
# print
# print "MAX\t\t",
# for target in TARGETS:
# print "%s: %6.2f\t" % (target, max(totals[target])),
# print
def wordFreq(qrels, queryRange):
relTotals = defaultdict(list)
relWeights = defaultdict(list)
nonrelTotals = defaultdict(list)
nonrelWeights = defaultdict(list)
allRelDocs = []
allNonrelDocs = []
for qid in queryRange:
queryQrels = qrels[qid]
relDocs = set([did for (did, rel) in queryQrels if rel > 0])
relCounts = printFreqs(qid, relDocs)
nonrelDocs = set([did for (did, rel) in queryQrels if rel == 0])
nonrelCounts = printFreqs(qid, nonrelDocs)
# plt.hist(relCounts[0], bins=50, histtype='step')
# plt.hist(nonrelCounts[0], bins=50, histtype='step')
# plt.show()
print "-" * 170
allRelDocs += relDocs
allNonrelDocs += nonrelDocs
allRelDocs = set(allRelDocs)
allNonrelDocs = set(allNonrelDocs)
allNonrelDocs -= allRelDocs
print "=" * 170
allRelCounts = printFreqs("ALL", allRelDocs)
# printOverallStats(relTotals, relWeights)
print "-" * 170
allNonrelCounts = printFreqs("ALL", allNonrelDocs)
# plt.hist(allRelCounts[0], bins=50, histtype='step')
# plt.hist(allNonrelCounts[0], bins=50, histtype='step')
# plt.show()
# printOverallStats(nonrelTotals, nonrelWeights)
print "=" * 170
print
#QRELS_2014 = utils.readQrels2014()
#QRELS_2015 = utils.readQrels2015()
#QUERY_RANGE = range(QUERY_OFFSETS[CLASS_ID], QUERY_OFFSETS[CLASS_ID] + 10)
#wordFreq(QRELS_2014, QUERY_RANGE)
#wordFreq(QRELS_2015, QUERY_RANGE)
| gpl-3.0 |
csae1152/seizure-prediction | seizure_prediction/cross_validation/legacy_strategy.py | 3 | 1468 | import numpy as np
import sklearn.cross_validation
from seizure_prediction.cross_validation.sequences import collect_sequence_ranges_from_meta
class LegacyStrategy:
"""
Hand-picked random folds maintaining sequence integrity with 80% train/cv split.
See k_fold_strategy for docs on each method.
"""
def get_name(self):
return 'legacy'
def get_folds(self, preictal_meta):
# hand-picked on my system to give a nice spread when num_sequences = 3,
# i.e. (0, 1), (0, 2), (1, 2) when using 3 folds
# The new way is to use k_fold.py instead of this
return [8, 11, 14]
def get_sequence_ranges(self, meta, fold_number, interictal=None, shuffle=None):
train_size = 0.8
seq_ranges = collect_sequence_ranges_from_meta(meta, shuffle=False)
return sklearn.cross_validation.train_test_split(seq_ranges, train_size=train_size, random_state=fold_number)
def split_train_cv(self, data, meta, fold_number, interictal=False):
train_ranges, cv_ranges = self.get_sequence_ranges(meta, fold_number, interictal=interictal)
train_data = []
for start, end in train_ranges:
train_data.append(data[start:end])
train_data = np.concatenate(train_data, axis=0)
cv_data = []
for start, end in cv_ranges:
cv_data.append(data[start:end])
cv_data = np.concatenate(cv_data, axis=0)
return train_data, cv_data
| mit |
niazangels/CADL | session-5/libs/utils.py | 4 | 21027 | """Utilities used in the Kadenze Academy Course on Deep Learning w/ Tensorflow.
Creative Applications of Deep Learning w/ Tensorflow.
Kadenze, Inc.
Parag K. Mital
Copyright Parag K. Mital, June 2016.
"""
import matplotlib.pyplot as plt
import tensorflow as tf
import urllib
import numpy as np
import zipfile
import os
from scipy.io import wavfile
def download(path):
"""Use urllib to download a file.
Parameters
----------
path : str
Url to download
Returns
-------
path : str
Location of downloaded file.
"""
import os
from six.moves import urllib
fname = path.split('/')[-1]
if os.path.exists(fname):
return fname
print('Downloading ' + path)
def progress(count, block_size, total_size):
if count % 20 == 0:
print('Downloaded %02.02f/%02.02f MB' % (
count * block_size / 1024.0 / 1024.0,
total_size / 1024.0 / 1024.0), end='\r')
filepath, _ = urllib.request.urlretrieve(
path, filename=fname, reporthook=progress)
return filepath
def download_and_extract_tar(path, dst):
"""Download and extract a tar file.
Parameters
----------
path : str
Url to tar file to download.
dst : str
Location to save tar file contents.
"""
import tarfile
filepath = download(path)
if not os.path.exists(dst):
os.makedirs(dst)
tarfile.open(filepath, 'r:gz').extractall(dst)
def download_and_extract_zip(path, dst):
"""Download and extract a zip file.
Parameters
----------
path : str
Url to zip file to download.
dst : str
Location to save zip file contents.
"""
import zipfile
filepath = download(path)
if not os.path.exists(dst):
os.makedirs(dst)
zf = zipfile.ZipFile(file=filepath)
zf.extractall(dst)
def load_audio(filename, b_normalize=True):
"""Load the audiofile at the provided filename using scipy.io.wavfile.
Optionally normalizes the audio to the maximum value.
Parameters
----------
filename : str
File to load.
b_normalize : bool, optional
Normalize to the maximum value.
"""
sr, s = wavfile.read(filename)
if b_normalize:
s = s.astype(np.float32)
s = (s / np.max(np.abs(s)))
s -= np.mean(s)
return s
def corrupt(x):
"""Take an input tensor and add uniform masking.
Parameters
----------
x : Tensor/Placeholder
Input to corrupt.
Returns
-------
x_corrupted : Tensor
50 pct of values corrupted.
"""
return tf.multiply(x, tf.cast(tf.random_uniform(shape=tf.shape(x),
minval=0,
maxval=2,
dtype=tf.int32), tf.float32))
def interp(l, r, n_samples):
"""Intepolate between the arrays l and r, n_samples times.
Parameters
----------
l : np.ndarray
Left edge
r : np.ndarray
Right edge
n_samples : int
Number of samples
Returns
-------
arr : np.ndarray
Inteporalted array
"""
return np.array([
l + step_i / (n_samples - 1) * (r - l)
for step_i in range(n_samples)])
def make_latent_manifold(corners, n_samples):
"""Create a 2d manifold out of the provided corners: n_samples * n_samples.
Parameters
----------
corners : list of np.ndarray
The four corners to intepolate.
n_samples : int
Number of samples to use in interpolation.
Returns
-------
arr : np.ndarray
Stacked array of all 2D interpolated samples
"""
left = interp(corners[0], corners[1], n_samples)
right = interp(corners[2], corners[3], n_samples)
embedding = []
for row_i in range(n_samples):
embedding.append(interp(left[row_i], right[row_i], n_samples))
return np.vstack(embedding)
def imcrop_tosquare(img):
"""Make any image a square image.
Parameters
----------
img : np.ndarray
Input image to crop, assumed at least 2d.
Returns
-------
crop : np.ndarray
Cropped image.
"""
size = np.min(img.shape[:2])
extra = img.shape[:2] - size
crop = img
for i in np.flatnonzero(extra):
crop = np.take(crop, extra[i] // 2 + np.r_[:size], axis=i)
return crop
def slice_montage(montage, img_h, img_w, n_imgs):
"""Slice a montage image into n_img h x w images.
Performs the opposite of the montage function. Takes a montage image and
slices it back into a N x H x W x C image.
Parameters
----------
montage : np.ndarray
Montage image to slice.
img_h : int
Height of sliced image
img_w : int
Width of sliced image
n_imgs : int
Number of images to slice
Returns
-------
sliced : np.ndarray
Sliced images as 4d array.
"""
sliced_ds = []
for i in range(int(np.sqrt(n_imgs))):
for j in range(int(np.sqrt(n_imgs))):
sliced_ds.append(montage[
1 + i + i * img_h:1 + i + (i + 1) * img_h,
1 + j + j * img_w:1 + j + (j + 1) * img_w])
return np.array(sliced_ds)
def montage(images, saveto='montage.png'):
"""Draw all images as a montage separated by 1 pixel borders.
Also saves the file to the destination specified by `saveto`.
Parameters
----------
images : numpy.ndarray
Input array to create montage of. Array should be:
batch x height x width x channels.
saveto : str
Location to save the resulting montage image.
Returns
-------
m : numpy.ndarray
Montage image.
"""
if isinstance(images, list):
images = np.array(images)
img_h = images.shape[1]
img_w = images.shape[2]
n_plots = int(np.ceil(np.sqrt(images.shape[0])))
if len(images.shape) == 4 and images.shape[3] == 3:
m = np.ones(
(images.shape[1] * n_plots + n_plots + 1,
images.shape[2] * n_plots + n_plots + 1, 3)) * 0.5
else:
m = np.ones(
(images.shape[1] * n_plots + n_plots + 1,
images.shape[2] * n_plots + n_plots + 1)) * 0.5
for i in range(n_plots):
for j in range(n_plots):
this_filter = i * n_plots + j
if this_filter < images.shape[0]:
this_img = images[this_filter]
m[1 + i + i * img_h:1 + i + (i + 1) * img_h,
1 + j + j * img_w:1 + j + (j + 1) * img_w] = this_img
plt.imsave(arr=m, fname=saveto)
return m
def montage_filters(W):
"""Draws all filters (n_input * n_output filters) as a
montage image separated by 1 pixel borders.
Parameters
----------
W : Tensor
Input tensor to create montage of.
Returns
-------
m : numpy.ndarray
Montage image.
"""
W = np.reshape(W, [W.shape[0], W.shape[1], 1, W.shape[2] * W.shape[3]])
n_plots = int(np.ceil(np.sqrt(W.shape[-1])))
m = np.ones(
(W.shape[0] * n_plots + n_plots + 1,
W.shape[1] * n_plots + n_plots + 1)) * 0.5
for i in range(n_plots):
for j in range(n_plots):
this_filter = i * n_plots + j
if this_filter < W.shape[-1]:
m[1 + i + i * W.shape[0]:1 + i + (i + 1) * W.shape[0],
1 + j + j * W.shape[1]:1 + j + (j + 1) * W.shape[1]] = (
np.squeeze(W[:, :, :, this_filter]))
return m
def get_celeb_files(dst='img_align_celeba', max_images=100):
"""Download the first 100 images of the celeb dataset.
Files will be placed in a directory 'img_align_celeba' if one
doesn't exist.
Returns
-------
files : list of strings
Locations to the first 100 images of the celeb net dataset.
"""
# Create a directory
if not os.path.exists(dst):
os.mkdir(dst)
# Now perform the following 100 times:
for img_i in range(1, max_images + 1):
# create a string using the current loop counter
f = '000%03d.jpg' % img_i
if not os.path.exists(os.path.join(dst, f)):
# and get the url with that string appended the end
url = 'https://s3.amazonaws.com/cadl/celeb-align/' + f
# We'll print this out to the console so we can see how far we've gone
print(url, end='\r')
# And now download the url to a location inside our new directory
urllib.request.urlretrieve(url, os.path.join(dst, f))
files = [os.path.join(dst, file_i)
for file_i in os.listdir(dst)
if '.jpg' in file_i][:max_images]
return files
def get_celeb_imgs(max_images=100):
"""Load the first `max_images` images of the celeb dataset.
Returns
-------
imgs : list of np.ndarray
List of the first 100 images from the celeb dataset
"""
return [plt.imread(f_i) for f_i in get_celeb_files(max_images=max_images)]
def gauss(mean, stddev, ksize):
"""Use Tensorflow to compute a Gaussian Kernel.
Parameters
----------
mean : float
Mean of the Gaussian (e.g. 0.0).
stddev : float
Standard Deviation of the Gaussian (e.g. 1.0).
ksize : int
Size of kernel (e.g. 16).
Returns
-------
kernel : np.ndarray
Computed Gaussian Kernel using Tensorflow.
"""
g = tf.Graph()
with tf.Session(graph=g):
x = tf.linspace(-3.0, 3.0, ksize)
z = (tf.exp(tf.negative(tf.pow(x - mean, 2.0) /
(2.0 * tf.pow(stddev, 2.0)))) *
(1.0 / (stddev * tf.sqrt(2.0 * 3.1415))))
return z.eval()
def gauss2d(mean, stddev, ksize):
"""Use Tensorflow to compute a 2D Gaussian Kernel.
Parameters
----------
mean : float
Mean of the Gaussian (e.g. 0.0).
stddev : float
Standard Deviation of the Gaussian (e.g. 1.0).
ksize : int
Size of kernel (e.g. 16).
Returns
-------
kernel : np.ndarray
Computed 2D Gaussian Kernel using Tensorflow.
"""
z = gauss(mean, stddev, ksize)
g = tf.Graph()
with tf.Session(graph=g):
z_2d = tf.matmul(tf.reshape(z, [ksize, 1]), tf.reshape(z, [1, ksize]))
return z_2d.eval()
def convolve(img, kernel):
"""Use Tensorflow to convolve a 4D image with a 4D kernel.
Parameters
----------
img : np.ndarray
4-dimensional image shaped N x H x W x C
kernel : np.ndarray
4-dimensional image shape K_H, K_W, C_I, C_O corresponding to the
kernel's height and width, the number of input channels, and the
number of output channels. Note that C_I should = C.
Returns
-------
result : np.ndarray
Convolved result.
"""
g = tf.Graph()
with tf.Session(graph=g):
convolved = tf.nn.conv2d(img, kernel, strides=[1, 1, 1, 1], padding='SAME')
res = convolved.eval()
return res
def gabor(ksize=32):
"""Use Tensorflow to compute a 2D Gabor Kernel.
Parameters
----------
ksize : int, optional
Size of kernel.
Returns
-------
gabor : np.ndarray
Gabor kernel with ksize x ksize dimensions.
"""
g = tf.Graph()
with tf.Session(graph=g):
z_2d = gauss2d(0.0, 1.0, ksize)
ones = tf.ones((1, ksize))
ys = tf.sin(tf.linspace(-3.0, 3.0, ksize))
ys = tf.reshape(ys, [ksize, 1])
wave = tf.matmul(ys, ones)
gabor = tf.multiply(wave, z_2d)
return gabor.eval()
def build_submission(filename, file_list, optional_file_list=()):
"""Helper utility to check homework assignment submissions and package them.
Parameters
----------
filename : str
Output zip file name
file_list : tuple
Tuple of files to include
"""
# check each file exists
for part_i, file_i in enumerate(file_list):
if not os.path.exists(file_i):
print('\nYou are missing the file {}. '.format(file_i) +
'It does not look like you have completed Part {}.'.format(
part_i + 1))
def zipdir(path, zf):
for root, dirs, files in os.walk(path):
for file in files:
# make sure the files are part of the necessary file list
if file.endswith(file_list) or file.endswith(optional_file_list):
zf.write(os.path.join(root, file))
# create a zip file with the necessary files
zipf = zipfile.ZipFile(filename, 'w', zipfile.ZIP_DEFLATED)
zipdir('.', zipf)
zipf.close()
print('Your assignment zip file has been created!')
print('Now submit the file:\n{}\nto Kadenze for grading!'.format(
os.path.abspath(filename)))
def normalize(a, s=0.1):
'''Normalize the image range for visualization'''
return np.uint8(np.clip(
(a - a.mean()) / max(a.std(), 1e-4) * s + 0.5,
0, 1) * 255)
# %%
def weight_variable(shape, **kwargs):
'''Helper function to create a weight variable initialized with
a normal distribution
Parameters
----------
shape : list
Size of weight variable
'''
if isinstance(shape, list):
initial = tf.random_normal(tf.stack(shape), mean=0.0, stddev=0.01)
initial.set_shape(shape)
else:
initial = tf.random_normal(shape, mean=0.0, stddev=0.01)
return tf.Variable(initial, **kwargs)
# %%
def bias_variable(shape, **kwargs):
'''Helper function to create a bias variable initialized with
a constant value.
Parameters
----------
shape : list
Size of weight variable
'''
if isinstance(shape, list):
initial = tf.random_normal(tf.stack(shape), mean=0.0, stddev=0.01)
initial.set_shape(shape)
else:
initial = tf.random_normal(shape, mean=0.0, stddev=0.01)
return tf.Variable(initial, **kwargs)
def binary_cross_entropy(z, x, name=None):
"""Binary Cross Entropy measures cross entropy of a binary variable.
loss(x, z) = - sum_i (x[i] * log(z[i]) + (1 - x[i]) * log(1 - z[i]))
Parameters
----------
z : tf.Tensor
A `Tensor` of the same type and shape as `x`.
x : tf.Tensor
A `Tensor` of type `float32` or `float64`.
"""
with tf.variable_scope(name or 'bce'):
eps = 1e-12
return (-(x * tf.log(z + eps) +
(1. - x) * tf.log(1. - z + eps)))
def conv2d(x, n_output,
k_h=5, k_w=5, d_h=2, d_w=2,
padding='SAME', name='conv2d', reuse=None):
"""Helper for creating a 2d convolution operation.
Parameters
----------
x : tf.Tensor
Input tensor to convolve.
n_output : int
Number of filters.
k_h : int, optional
Kernel height
k_w : int, optional
Kernel width
d_h : int, optional
Height stride
d_w : int, optional
Width stride
padding : str, optional
Padding type: "SAME" or "VALID"
name : str, optional
Variable scope
Returns
-------
op : tf.Tensor
Output of convolution
"""
with tf.variable_scope(name or 'conv2d', reuse=reuse):
W = tf.get_variable(
name='W',
shape=[k_h, k_w, x.get_shape()[-1], n_output],
initializer=tf.contrib.layers.xavier_initializer_conv2d())
conv = tf.nn.conv2d(
name='conv',
input=x,
filter=W,
strides=[1, d_h, d_w, 1],
padding=padding)
b = tf.get_variable(
name='b',
shape=[n_output],
initializer=tf.constant_initializer(0.0))
h = tf.nn.bias_add(
name='h',
value=conv,
bias=b)
return h, W
def deconv2d(x, n_output_h, n_output_w, n_output_ch, n_input_ch=None,
k_h=5, k_w=5, d_h=2, d_w=2,
padding='SAME', name='deconv2d', reuse=None):
"""Deconvolution helper.
Parameters
----------
x : tf.Tensor
Input tensor to convolve.
n_output_h : int
Height of output
n_output_w : int
Width of output
n_output_ch : int
Number of filters.
k_h : int, optional
Kernel height
k_w : int, optional
Kernel width
d_h : int, optional
Height stride
d_w : int, optional
Width stride
padding : str, optional
Padding type: "SAME" or "VALID"
name : str, optional
Variable scope
Returns
-------
op : tf.Tensor
Output of deconvolution
"""
with tf.variable_scope(name or 'deconv2d', reuse=reuse):
W = tf.get_variable(
name='W',
shape=[k_h, k_w, n_output_ch, n_input_ch or x.get_shape()[-1]],
initializer=tf.contrib.layers.xavier_initializer_conv2d())
conv = tf.nn.conv2d_transpose(
name='conv_t',
value=x,
filter=W,
output_shape=tf.stack(
[tf.shape(x)[0], n_output_h, n_output_w, n_output_ch]),
strides=[1, d_h, d_w, 1],
padding=padding)
conv.set_shape([None, n_output_h, n_output_w, n_output_ch])
b = tf.get_variable(
name='b',
shape=[n_output_ch],
initializer=tf.constant_initializer(0.0))
h = tf.nn.bias_add(name='h', value=conv, bias=b)
return h, W
def lrelu(features, leak=0.2):
"""Leaky rectifier.
Parameters
----------
features : tf.Tensor
Input to apply leaky rectifier to.
leak : float, optional
Percentage of leak.
Returns
-------
op : tf.Tensor
Resulting output of applying leaky rectifier activation.
"""
f1 = 0.5 * (1 + leak)
f2 = 0.5 * (1 - leak)
return f1 * features + f2 * abs(features)
def linear(x, n_output, name=None, activation=None, reuse=None):
"""Fully connected layer.
Parameters
----------
x : tf.Tensor
Input tensor to connect
n_output : int
Number of output neurons
name : None, optional
Scope to apply
Returns
-------
h, W : tf.Tensor, tf.Tensor
Output of fully connected layer and the weight matrix
"""
if len(x.get_shape()) != 2:
x = flatten(x, reuse=reuse)
n_input = x.get_shape().as_list()[1]
with tf.variable_scope(name or "fc", reuse=reuse):
W = tf.get_variable(
name='W',
shape=[n_input, n_output],
dtype=tf.float32,
initializer=tf.contrib.layers.xavier_initializer())
b = tf.get_variable(
name='b',
shape=[n_output],
dtype=tf.float32,
initializer=tf.constant_initializer(0.0))
h = tf.nn.bias_add(
name='h',
value=tf.matmul(x, W),
bias=b)
if activation:
h = activation(h)
return h, W
def flatten(x, name=None, reuse=None):
"""Flatten Tensor to 2-dimensions.
Parameters
----------
x : tf.Tensor
Input tensor to flatten.
name : None, optional
Variable scope for flatten operations
Returns
-------
flattened : tf.Tensor
Flattened tensor.
"""
with tf.variable_scope('flatten'):
dims = x.get_shape().as_list()
if len(dims) == 4:
flattened = tf.reshape(
x,
shape=[-1, dims[1] * dims[2] * dims[3]])
elif len(dims) == 2 or len(dims) == 1:
flattened = x
else:
raise ValueError('Expected n dimensions of 1, 2 or 4. Found:',
len(dims))
return flattened
def to_tensor(x):
"""Convert 2 dim Tensor to a 4 dim Tensor ready for convolution.
Performs the opposite of flatten(x). If the tensor is already 4-D, this
returns the same as the input, leaving it unchanged.
Parameters
----------
x : tf.Tesnor
Input 2-D tensor. If 4-D already, left unchanged.
Returns
-------
x : tf.Tensor
4-D representation of the input.
Raises
------
ValueError
If the tensor is not 2D or already 4D.
"""
if len(x.get_shape()) == 2:
n_input = x.get_shape().as_list()[1]
x_dim = np.sqrt(n_input)
if x_dim == int(x_dim):
x_dim = int(x_dim)
x_tensor = tf.reshape(
x, [-1, x_dim, x_dim, 1], name='reshape')
elif np.sqrt(n_input / 3) == int(np.sqrt(n_input / 3)):
x_dim = int(np.sqrt(n_input / 3))
x_tensor = tf.reshape(
x, [-1, x_dim, x_dim, 3], name='reshape')
else:
x_tensor = tf.reshape(
x, [-1, 1, 1, n_input], name='reshape')
elif len(x.get_shape()) == 4:
x_tensor = x
else:
raise ValueError('Unsupported input dimensions')
return x_tensor
| apache-2.0 |
giorgiop/scikit-learn | sklearn/learning_curve.py | 7 | 15161 | """Utilities to evaluate models with respect to a variable
"""
# Author: Alexander Fabisch <afabisch@informatik.uni-bremen.de>
#
# License: BSD 3 clause
import warnings
import numpy as np
from .base import is_classifier, clone
from .cross_validation import check_cv
from .externals.joblib import Parallel, delayed
from .cross_validation import _safe_split, _score, _fit_and_score
from .metrics.scorer import check_scoring
from .utils import indexable
from .utils.fixes import astype
warnings.warn("This module was deprecated in version 0.18 in favor of the "
"model_selection module into which all the functions are moved."
" This module will be removed in 0.20",
DeprecationWarning)
__all__ = ['learning_curve', 'validation_curve']
def learning_curve(estimator, X, y, train_sizes=np.linspace(0.1, 1.0, 5),
cv=None, scoring=None, exploit_incremental_learning=False,
n_jobs=1, pre_dispatch="all", verbose=0,
error_score='raise'):
"""Learning curve.
Determines cross-validated training and test scores for different training
set sizes.
A cross-validation generator splits the whole dataset k times in training
and test data. Subsets of the training set with varying sizes will be used
to train the estimator and a score for each training subset size and the
test set will be computed. Afterwards, the scores will be averaged over
all k runs for each training subset size.
Read more in the :ref:`User Guide <learning_curves>`.
Parameters
----------
estimator : object type that implements the "fit" and "predict" methods
An object of that type which is cloned for each validation.
X : array-like, shape (n_samples, n_features)
Training vector, where n_samples is the number of samples and
n_features is the number of features.
y : array-like, shape (n_samples) or (n_samples, n_features), optional
Target relative to X for classification or regression;
None for unsupervised learning.
train_sizes : array-like, shape (n_ticks,), dtype float or int
Relative or absolute numbers of training examples that will be used to
generate the learning curve. If the dtype is float, it is regarded as a
fraction of the maximum size of the training set (that is determined
by the selected validation method), i.e. it has to be within (0, 1].
Otherwise it is interpreted as absolute sizes of the training sets.
Note that for classification the number of samples usually have to
be big enough to contain at least one sample from each class.
(default: np.linspace(0.1, 1.0, 5))
cv : int, cross-validation generator or an iterable, optional
Determines the cross-validation splitting strategy.
Possible inputs for cv are:
- None, to use the default 3-fold cross-validation,
- integer, to specify the number of folds.
- An object to be used as a cross-validation generator.
- An iterable yielding train/test splits.
For integer/None inputs, if the estimator is a classifier and ``y`` is
either binary or multiclass,
:class:`sklearn.model_selection.StratifiedKFold` is used. In all
other cases, :class:`sklearn.model_selection.KFold` is used.
Refer :ref:`User Guide <cross_validation>` for the various
cross-validation strategies that can be used here.
scoring : string, callable or None, optional, default: None
A string (see model evaluation documentation) or
a scorer callable object / function with signature
``scorer(estimator, X, y)``.
exploit_incremental_learning : boolean, optional, default: False
If the estimator supports incremental learning, this will be
used to speed up fitting for different training set sizes.
n_jobs : integer, optional
Number of jobs to run in parallel (default 1).
pre_dispatch : integer or string, optional
Number of predispatched jobs for parallel execution (default is
all). The option can reduce the allocated memory. The string can
be an expression like '2*n_jobs'.
verbose : integer, optional
Controls the verbosity: the higher, the more messages.
error_score : 'raise' (default) or numeric
Value to assign to the score if an error occurs in estimator fitting.
If set to 'raise', the error is raised. If a numeric value is given,
FitFailedWarning is raised. This parameter does not affect the refit
step, which will always raise the error.
Returns
-------
train_sizes_abs : array, shape = (n_unique_ticks,), dtype int
Numbers of training examples that has been used to generate the
learning curve. Note that the number of ticks might be less
than n_ticks because duplicate entries will be removed.
train_scores : array, shape (n_ticks, n_cv_folds)
Scores on training sets.
test_scores : array, shape (n_ticks, n_cv_folds)
Scores on test set.
Notes
-----
See :ref:`examples/model_selection/plot_learning_curve.py
<sphx_glr_auto_examples_model_selection_plot_learning_curve.py>`
"""
if exploit_incremental_learning and not hasattr(estimator, "partial_fit"):
raise ValueError("An estimator must support the partial_fit interface "
"to exploit incremental learning")
X, y = indexable(X, y)
# Make a list since we will be iterating multiple times over the folds
cv = list(check_cv(cv, X, y, classifier=is_classifier(estimator)))
scorer = check_scoring(estimator, scoring=scoring)
# HACK as long as boolean indices are allowed in cv generators
if cv[0][0].dtype == bool:
new_cv = []
for i in range(len(cv)):
new_cv.append((np.nonzero(cv[i][0])[0], np.nonzero(cv[i][1])[0]))
cv = new_cv
n_max_training_samples = len(cv[0][0])
# Because the lengths of folds can be significantly different, it is
# not guaranteed that we use all of the available training data when we
# use the first 'n_max_training_samples' samples.
train_sizes_abs = _translate_train_sizes(train_sizes,
n_max_training_samples)
n_unique_ticks = train_sizes_abs.shape[0]
if verbose > 0:
print("[learning_curve] Training set sizes: " + str(train_sizes_abs))
parallel = Parallel(n_jobs=n_jobs, pre_dispatch=pre_dispatch,
verbose=verbose)
if exploit_incremental_learning:
classes = np.unique(y) if is_classifier(estimator) else None
out = parallel(delayed(_incremental_fit_estimator)(
clone(estimator), X, y, classes, train, test, train_sizes_abs,
scorer, verbose) for train, test in cv)
else:
out = parallel(delayed(_fit_and_score)(
clone(estimator), X, y, scorer, train[:n_train_samples], test,
verbose, parameters=None, fit_params=None, return_train_score=True,
error_score=error_score)
for train, test in cv for n_train_samples in train_sizes_abs)
out = np.array(out)[:, :2]
n_cv_folds = out.shape[0] // n_unique_ticks
out = out.reshape(n_cv_folds, n_unique_ticks, 2)
out = np.asarray(out).transpose((2, 1, 0))
return train_sizes_abs, out[0], out[1]
def _translate_train_sizes(train_sizes, n_max_training_samples):
"""Determine absolute sizes of training subsets and validate 'train_sizes'.
Examples:
_translate_train_sizes([0.5, 1.0], 10) -> [5, 10]
_translate_train_sizes([5, 10], 10) -> [5, 10]
Parameters
----------
train_sizes : array-like, shape (n_ticks,), dtype float or int
Numbers of training examples that will be used to generate the
learning curve. If the dtype is float, it is regarded as a
fraction of 'n_max_training_samples', i.e. it has to be within (0, 1].
n_max_training_samples : int
Maximum number of training samples (upper bound of 'train_sizes').
Returns
-------
train_sizes_abs : array, shape (n_unique_ticks,), dtype int
Numbers of training examples that will be used to generate the
learning curve. Note that the number of ticks might be less
than n_ticks because duplicate entries will be removed.
"""
train_sizes_abs = np.asarray(train_sizes)
n_ticks = train_sizes_abs.shape[0]
n_min_required_samples = np.min(train_sizes_abs)
n_max_required_samples = np.max(train_sizes_abs)
if np.issubdtype(train_sizes_abs.dtype, np.float):
if n_min_required_samples <= 0.0 or n_max_required_samples > 1.0:
raise ValueError("train_sizes has been interpreted as fractions "
"of the maximum number of training samples and "
"must be within (0, 1], but is within [%f, %f]."
% (n_min_required_samples,
n_max_required_samples))
train_sizes_abs = astype(train_sizes_abs * n_max_training_samples,
dtype=np.int, copy=False)
train_sizes_abs = np.clip(train_sizes_abs, 1,
n_max_training_samples)
else:
if (n_min_required_samples <= 0 or
n_max_required_samples > n_max_training_samples):
raise ValueError("train_sizes has been interpreted as absolute "
"numbers of training samples and must be within "
"(0, %d], but is within [%d, %d]."
% (n_max_training_samples,
n_min_required_samples,
n_max_required_samples))
train_sizes_abs = np.unique(train_sizes_abs)
if n_ticks > train_sizes_abs.shape[0]:
warnings.warn("Removed duplicate entries from 'train_sizes'. Number "
"of ticks will be less than the size of "
"'train_sizes' %d instead of %d)."
% (train_sizes_abs.shape[0], n_ticks), RuntimeWarning)
return train_sizes_abs
def _incremental_fit_estimator(estimator, X, y, classes, train, test,
train_sizes, scorer, verbose):
"""Train estimator on training subsets incrementally and compute scores."""
train_scores, test_scores = [], []
partitions = zip(train_sizes, np.split(train, train_sizes)[:-1])
for n_train_samples, partial_train in partitions:
train_subset = train[:n_train_samples]
X_train, y_train = _safe_split(estimator, X, y, train_subset)
X_partial_train, y_partial_train = _safe_split(estimator, X, y,
partial_train)
X_test, y_test = _safe_split(estimator, X, y, test, train_subset)
if y_partial_train is None:
estimator.partial_fit(X_partial_train, classes=classes)
else:
estimator.partial_fit(X_partial_train, y_partial_train,
classes=classes)
train_scores.append(_score(estimator, X_train, y_train, scorer))
test_scores.append(_score(estimator, X_test, y_test, scorer))
return np.array((train_scores, test_scores)).T
def validation_curve(estimator, X, y, param_name, param_range, cv=None,
scoring=None, n_jobs=1, pre_dispatch="all", verbose=0):
"""Validation curve.
Determine training and test scores for varying parameter values.
Compute scores for an estimator with different values of a specified
parameter. This is similar to grid search with one parameter. However, this
will also compute training scores and is merely a utility for plotting the
results.
Read more in the :ref:`User Guide <validation_curve>`.
Parameters
----------
estimator : object type that implements the "fit" and "predict" methods
An object of that type which is cloned for each validation.
X : array-like, shape (n_samples, n_features)
Training vector, where n_samples is the number of samples and
n_features is the number of features.
y : array-like, shape (n_samples) or (n_samples, n_features), optional
Target relative to X for classification or regression;
None for unsupervised learning.
param_name : string
Name of the parameter that will be varied.
param_range : array-like, shape (n_values,)
The values of the parameter that will be evaluated.
cv : int, cross-validation generator or an iterable, optional
Determines the cross-validation splitting strategy.
Possible inputs for cv are:
- None, to use the default 3-fold cross-validation,
- integer, to specify the number of folds.
- An object to be used as a cross-validation generator.
- An iterable yielding train/test splits.
For integer/None inputs, if the estimator is a classifier and ``y`` is
either binary or multiclass,
:class:`sklearn.model_selection.StratifiedKFold` is used. In all
other cases, :class:`sklearn.model_selection.KFold` is used.
Refer :ref:`User Guide <cross_validation>` for the various
cross-validation strategies that can be used here.
scoring : string, callable or None, optional, default: None
A string (see model evaluation documentation) or
a scorer callable object / function with signature
``scorer(estimator, X, y)``.
n_jobs : integer, optional
Number of jobs to run in parallel (default 1).
pre_dispatch : integer or string, optional
Number of predispatched jobs for parallel execution (default is
all). The option can reduce the allocated memory. The string can
be an expression like '2*n_jobs'.
verbose : integer, optional
Controls the verbosity: the higher, the more messages.
Returns
-------
train_scores : array, shape (n_ticks, n_cv_folds)
Scores on training sets.
test_scores : array, shape (n_ticks, n_cv_folds)
Scores on test set.
Notes
-----
See
:ref:`examples/model_selection/plot_validation_curve.py
<sphx_glr_auto_examples_model_selection_plot_validation_curve.py>`
"""
X, y = indexable(X, y)
cv = check_cv(cv, X, y, classifier=is_classifier(estimator))
scorer = check_scoring(estimator, scoring=scoring)
parallel = Parallel(n_jobs=n_jobs, pre_dispatch=pre_dispatch,
verbose=verbose)
out = parallel(delayed(_fit_and_score)(
estimator, X, y, scorer, train, test, verbose,
parameters={param_name: v}, fit_params=None, return_train_score=True)
for train, test in cv for v in param_range)
out = np.asarray(out)[:, :2]
n_params = len(param_range)
n_cv_folds = out.shape[0] // n_params
out = out.reshape(n_cv_folds, n_params, 2).transpose((2, 1, 0))
return out[0], out[1]
| bsd-3-clause |
hrishioa/ISci | PS8.py | 1 | 1867 | from random import *
import numpy
import matplotlib.pyplot as plt
import math
def throwDie(times):
mean = float(0)
for i in range(0,times):
mean += randint(1,6)
mean/=times
return mean
def throwSquareDie(times):
mean = float(0)
for i in range(0,times):
mean += randint(1,6)**2
mean/=times
return mean
def singleTurn():
return randint(1,6)+randint(1,6)+randint(1,6)
def plotHist(times):
Y = []
for i in range(0,times):
Y.append(singleTurn())
mean = numpy.mean(Y)
sd = numpy.std(Y)
Y2 = numpy.random.normal(mean,sd,times)
plt.hist(Y,label="singleTurn",bins=10,histtype='stepfilled')
plt.hist(Y2,alpha=0.6,label="Normal - mean: %f, sd: %f" % (mean,sd),bins=10,histtype='stepfilled')
plt.legend()
plt.show()
print "Mean: %f, SD: %f" % (mean,sd)
def flipIt(N):
pos = 0;
for i in range(0,N):
flip = randrange(2)
if(flip==0):
pos-=2
else:
pos+=1
if pos<0:
pos=0
return pos
def flipMany(M,N):
data = [0]*(N+1)
for j in range(0,M):
data[flipIt(N)] += 1
return data
def boltzMann(kbT,e):
return ((float(1)/float(kbT))*(math.e**(-e/kbT)))
def histFlipMany(M,N):
Y = numpy.array(flipMany(M,N))
print len(Y)
OrigY = []
mean = float(0)
for i in range(0,len(Y)):
for j in range(0,Y[i]):
OrigY.append(i)
mean=numpy.mean(OrigY)
sd = numpy.std(OrigY)
YNorm = (OrigY-mean)/sd
print "Mean: %f" % mean
X=numpy.linspace(0,N,N)
Y3 = boltzMann(mean,X+1)
plt.plot(X,Y3,label="Boltzmann")
plt.hist(OrigY,alpha=0.5,normed=1,label="Steps")
plt.legend()
plt.show()
def main():
print "Program Running..."
print "ThrowDie: %f,ThrowSquareDie: %f, ThrowDieSquare: %f" % (throwDie(100),throwSquareDie(100),throwDie(100)**2);
#plotHist(1000)
histFlipMany(1000,100)
if __name__ == "__main__":
main() | gpl-2.0 |
PrincessMadMath/LOG8415-Advanced_Cloud | TP1/Sources/plot_CPU.py | 1 | 1183 | import matplotlib.pyplot as pyplot
import numpy
# inspired by http://people.duke.edu/~ccc14/pcfb/numpympl/MatplotlibBarPlots.html
xTickMarks = ["azure A1", "azure A4", "amazon T2", "amazon C4", "amazon M4", "amazon R4"]
N = 6
CPU_total_time = [66.8626, 66.6122, 29.8535, 25.0010, 29.3211, 27.8841]
CPU_avg_request = [6.69, 6.66, 2.986, 2.50, 2.93, 2.79]
ind = numpy.arange(N)
width = 0.35
fig = pyplot.figure()
fig.suptitle("sysbench CPU benchmark")
ax = fig.add_subplot(121) # subplot(nbcol, nbligne, numfigure)
ax.bar(ind, CPU_total_time, width)
ax.set_xlim(-width,len(ind)+width)
ax.set_ylim(min(CPU_total_time)-1, max(CPU_total_time)+1)
ax.set_xticks(ind)
ax.set_ylabel("total time (s)")
xtickNames = ax.set_xticklabels(xTickMarks)
pyplot.setp(xtickNames, rotation=45, fontsize=10)
ax2 = fig.add_subplot(122)
ax2.bar(ind, CPU_avg_request, width)
ax2.set_xlim(-width,len(ind)+width)
ax2.set_ylim(min(CPU_avg_request)-1, max(CPU_avg_request)+1)
ax2.set_xticks(ind)
ax2.set_ylabel("avg time per request (ms)")
xtickNames = ax2.set_xticklabels(xTickMarks)
pyplot.setp(xtickNames, rotation=45, fontsize=10)
pyplot.tight_layout()
pyplot.subplots_adjust(top=0.9)
pyplot.show()
| mit |
nicholasmalaya/paleologos | disputatio/routines/collective_duane.py | 2 | 1665 | #!/bin/py
#
#
#
import sys
import numpy as np
# no twist
flux = 1.35* np.array([0.99,1.02,1.1,1.12,1.12,1.11,1.112,1.123,1.1159,1.101,1.042])
it = [35,40,43,45,47,50,53,55,57,60,65]
#
# duane (top)
#
fluxd = 1.45*np.array([0.99,1.02,1.06,1.1,1.09,1.11,1.117,1.12,1.115,1.10,1.04])
itd = [35,40,43,45,47,50,53,55,57,60,65]
#
# twist
#
flux2 = 1.6*np.array([1.13,1.25,1.32,1.33,1.34,1.33,1.31,1.25,1.11,1.125,1.15])
it2 = [35,40,43,45,47,50,53,55,57,60,65]
flux2d = 1.7*np.array([1.02,1.2,1.321,1.335,1.335,1.325,1.29,1.22,1.11])
it2d = [35,40,43,45,47,50,53,55,57]
print np.max(flux2)
print np.max(flux)
#
# iteration number (should be same as flux, above)
#
#it = range(len(flux))
#
# now, plot it and make it look good while you are at it
#
import matplotlib.pyplot as plt
fsz=24
#plt.subplot(1, 1, 1)
fig,ax = plt.subplots()
plt.plot(it,flux, 'kd--', color='blue',label='CFD with no Twist')
plt.plot(itd,fluxd, 'kd-', color='blue',label='Frozen with no Twist')
plt.plot(it2,flux2, 'ko--', color='red',label='CFD with Twist')
plt.plot(it2d,flux2d, 'ko-', color='red',label='Frozen with Twist')
#plt.axvline(x=55,linewidth=2, color = 'blue')
#plt.axvline(x=47, ymin=0.5, ymax = 1.1, linewidth=2, color = 'red')
plt.xlim(30, 80)
plt.ylim(1.2, 2.5)
plt.ylabel(r'Turbine Power Extracted (kW)',fontsize=fsz)
plt.xlabel(r'Inner Blade Angle (degrees)',fontsize=fsz)
#ax.set_yscale('log')
plt.legend(loc='best')
#ax.xaxis.set_major_formatter(plt.NullFormatter())
#
# save figure
#
plt.savefig('collective.png')
#
# MARK IT ZERO
#
print 'exiting successfully...'
sys.exit(0)
#
# invoked from command line as:
#
# python plot_flux.py
#
#
# nick
# 2/26/15
#
| mit |
gammasky/cta-dc | sky_model_checks/plot_logN_logS.py | 2 | 19616 | import numpy as np
from astropy.table import Table, vstack
from gammapy.catalog import SourceCatalogGammaCat, SourceCatalogHGPS
import matplotlib.pyplot as plt
import os
# from gammapy.spectrum import CrabSpectrum
from astropy import units as u
from astropy.table import Column
#from scipy import optimize
from scipy.stats import norm
from gammapy.spectrum.models import LogParabola
def define_flux_crab_above_energy():
emin, emax = [1, 10] * u.TeV
# crab = CrabSpectrum('meyer').model
crabMAGIC = LogParabola(amplitude=3.23e-11 * u.Unit('cm-2 s-1 TeV-1'), reference=1 * u.TeV, alpha=2.47, beta=-0.24)
crab_flux_above_1TeV = crabMAGIC.integral(emin=emin, emax=emax)
print(crab_flux_above_1TeV)
crab_flux_above_1TeV = crab_flux_above_1TeV.to('cm-2 s-1')
return crab_flux_above_1TeV
def prepare_the_pwn_table():
gammacat = SourceCatalogGammaCat()
cat_table = gammacat.table
mask_pwn = cat_table['classes'] == 'pwn'
cat_pwn = cat_table[mask_pwn]
cat_pwn.pprint()
mask_nan= np.isfinite(cat_pwn['spec_flux_above_1TeV'])
cat_pwn = cat_pwn[mask_nan]
hgpscat = SourceCatalogHGPS(os.environ['HGPS'])
hgpscat_table = hgpscat.table
mask_pwn_hgps = hgpscat_table['Source_Class'] == 'PWN'
hgpscat_pwn = hgpscat_table[mask_pwn_hgps]
# hgpscat_pwn.pprint()
print('----------------- PWN -----------------------')
print(hgpscat_pwn['Source_Name','Flux_Spec_Int_1TeV'])
num_pwn = len(hgpscat_pwn)
print('num_pwn = ', num_pwn)
mask_composite_hgps = hgpscat_table['Source_Class'] == 'Composite'
hgpscat_composite = hgpscat_table[mask_composite_hgps]
# print(hgpscat_composite['Source_Name','Flux_Spec_Int_1TeV'])
idxx1 = np.where(hgpscat_composite['Source_Name'] == 'HESS J1714-385')[0]
# print(len(idxx1), idxx1[0])
hgpscat_composite.remove_row(int(idxx1[0]))
print('----------------- composite -----------------------')
print(hgpscat_composite['Source_Name', 'Flux_Spec_Int_1TeV'])
num_composite = len(hgpscat_composite)
print('num composite = ', num_composite)
mask_unid_hgps = hgpscat_table['Source_Class'] == 'Unid'
hgpscat_unid = hgpscat_table[mask_unid_hgps]
# hgpscat_unid.pprint()
# print(hgpscat_unid['Source_Name','Flux_Spec_Int_1TeV'])
idx1 = np.where(hgpscat_unid['Source_Name'] == 'HESS J1852-000')[0]
hgpscat_unid.remove_row(int(idx1[0]))
idx2 = np.where(hgpscat_unid['Source_Name'] == 'HESS J1457-593')[0]
hgpscat_unid.remove_row(int(idx2[0]))
idx3 = np.where(hgpscat_unid['Source_Name'] == 'HESS J1503-582')[0]
hgpscat_unid.remove_row(int(idx3[0]))
idx4 = np.where(hgpscat_unid['Source_Name'] == 'HESS J1646-458')[0]
hgpscat_unid.remove_row(int(idx4[0]))
idx5 = np.where(hgpscat_unid['Source_Name'] == 'HESS J1641-463')[0]
hgpscat_unid.remove_row(int(idx5[0]))
idx6 = np.where(hgpscat_unid['Source_Name'] == 'HESS J1729-345')[0]
hgpscat_unid.remove_row(int(idx6[0]))
idx7 = np.where(hgpscat_unid['Source_Name'] == 'HESS J1747-248')[0]
hgpscat_unid.remove_row(int(idx7[0]))
idx8 = np.where(hgpscat_unid['Source_Name'] == 'HESS J1800-240')[0]
hgpscat_unid.remove_row(int(idx8[0]))
idx9 = np.where(hgpscat_unid['Source_Name'] == 'HESS J1808-204')[0]
hgpscat_unid.remove_row(int(idx9[0]))
idx10 = np.where(hgpscat_unid['Source_Name'] == 'HESS J1832-085')[0]
hgpscat_unid.remove_row(int(idx10[0]))
idx11 = np.where(hgpscat_unid['Source_Name'] == 'HESS J1832-093')[0]
hgpscat_unid.remove_row(int(idx11[0]))
idx12 = np.where(hgpscat_unid['Source_Name'] == 'HESS J1848-018')[0]
hgpscat_unid.remove_row(int(idx12[0]))
idx13 = np.where(hgpscat_unid['Source_Name'] == 'HESS J1923+141')[0]
hgpscat_unid.remove_row(int(idx13[0]))
idx14 = np.where(hgpscat_unid['Source_Name'] == 'HESS J1943+213')[0]
hgpscat_unid.remove_row(int(idx14[0]))
idx15 = np.where(hgpscat_unid['Source_Name'] == 'HESS J1702-420')[0]
hgpscat_unid.remove_row(int(idx15[0]))
idx16 = np.where(hgpscat_unid['Source_Name'] == 'HESS J1844-030')[0]
hgpscat_unid.remove_row(int(idx16[0]))
idx17 = np.where(hgpscat_unid['Source_Name'] == 'HESS J1746-285')[0]
hgpscat_unid.remove_row(int(idx17[0]))
idx18 = np.where(hgpscat_unid['Source_Name'] == 'HESS J1745-303')[0]
hgpscat_unid.remove_row(int(idx18[0]))
idx19 = np.where(hgpscat_unid['Source_Name'] == 'HESS J1741-302')[0]
hgpscat_unid.remove_row(int(idx19[0]))
idx20 = np.where(hgpscat_unid['Source_Name'] == 'HESS J1745-290')[0]
hgpscat_unid.remove_row(int(idx20[0]))
idx21 = np.where(hgpscat_unid['Source_Name'] == 'HESS J1626-490')[0]
hgpscat_unid.remove_row(int(idx21[0]))
# idx2 = np.where(hgpscat_unid['Source_Name'] == 'HESS J1708-410')[0]
# hgpscat_unid.remove_row(int(idx2[0]))
# idx1 = np.where(hgpscat_unid['Source_Name'] == 'HESS J1018-589B')[0]
# hgpscat_unid.remove_row(int(idx1[0]))
# idx15 = np.where(hgpscat_unid['Source_Name'] == 'HESS J1843-033')[0]
# hgpscat_unid.remove_row(int(idx15[0]))
# idx5 = np.where(hgpscat_unid['Source_Name'] == 'HESS J1614-518')[0]
# hgpscat_unid.remove_row(int(idx5[0]))
# idx21 = np.where(hgpscat_unid['Source_Name'] == 'HESS J1507-622')[0]
# hgpscat_unid.remove_row(int(idx21[0]))
# idx27 = np.where(hgpscat_unid['Source_Name'] == 'HESS J1841-055')[0]
# hgpscat_unid.remove_row(int(idx27[0]))
# idx20 = np.where(hgpscat_unid['Source_Name'] == 'HESS J1713-381')[0]
# hgpscat_unid.remove_row(int(idx20[0]))
# idx6 = np.where(hgpscat_unid['Source_Name'] == 'HESS J1634-472')[0]
# hgpscat_unid.remove_row(int(idx6[0]))
# print(hgpscat_unid['Source_Name','Flux_Spec_Int_1TeV'])
num_unid = len(hgpscat_unid)
print('num_unid = ', num_unid)
hgpscat_pwn_extended = vstack([hgpscat_pwn, hgpscat_unid, hgpscat_composite])
print('----------------- extended -----------------------')
print(hgpscat_pwn_extended['Source_Name', 'Flux_Spec_Int_1TeV'])
num_pwn_extended = len(hgpscat_pwn_extended)
print('num_tot = ', num_pwn_extended)
crab_flux_above_1TeV = define_flux_crab_above_energy()
print('crab_flux_above_1TeV ', crab_flux_above_1TeV)
flux_above_1TeV_cu = []
sigma = []
for id in range(len(hgpscat_pwn_extended)):
# print((hgpscat_pwn_extended[id]['Source_Name']),(hgpscat_pwn_extended[id]['Flux_Spec_Int_1TeV']))
ff = hgpscat_pwn_extended[id]['Flux_Spec_Int_1TeV'] * u.Unit('cm-2 s-1')
flux_cu = (ff/crab_flux_above_1TeV).to('%')
flux_above_1TeV_cu.append(flux_cu)
#print(hgpscat_pwn_extended[id]['Flux_Spec_Int_1TeV'], flux_cu)
sigma.append(hgpscat_pwn_extended[id]['Size']/2.0)
flux_above_1TeV_cuu = u.Quantity(flux_above_1TeV_cu)
sigma_d = u.Quantity(sigma)
hgpscat_pwn_extended['flux_above_1TeV_cu'] = Column(flux_above_1TeV_cuu,
description='Integral flux above 1 TeV in crab units')
hgpscat_pwn_extended['sigma'] = Column(sigma_d, description='radius of angular extension')
# print(hgpscat_pwn_extended['flux_crab'])
how_many_above_10, how_many_8to10, how_many_6to8, how_many_4to6, how_many_2to4, how_many_1to2, how_many_below1 = 0, 0, 0, 0, 0, 0, 0
for row in range(len(hgpscat_pwn_extended)):
if (hgpscat_pwn_extended[row]['flux_above_1TeV_cu'] > 10):
#print('CC crab>10: ', row, hgpscat_pwn_extended[row]['flux_above_1TeV_cu'], hgpscat_pwn_extended[row]['Source_Name'])
how_many_above_10 += 1
if (hgpscat_pwn_extended[row]['flux_above_1TeV_cu'] > 8 and hgpscat_pwn_extended[row]['flux_above_1TeV_cu'] < 10 ):
#print('CC 8-10: ', row, hgpscat_pwn_extended[row]['flux_above_1TeV_cu'], hgpscat_pwn_extended[row]['Source_Name'])
how_many_8to10 += 1
if (hgpscat_pwn_extended[row]['flux_above_1TeV_cu'] > 6 and hgpscat_pwn_extended[row]['flux_above_1TeV_cu'] < 8):
#print('CC 6-8: ', row, hgpscat_pwn_extended[row]['flux_above_1TeV_cu'], hgpscat_pwn_extended[row]['Source_Name'])
how_many_6to8 += 1
if (hgpscat_pwn_extended[row]['flux_above_1TeV_cu'] > 4 and hgpscat_pwn_extended[row]['flux_above_1TeV_cu'] < 6):
#print('CC 4-6: ', row, hgpscat_pwn_extended[row]['flux_above_1TeV_cu'], hgpscat_pwn_extended[row]['Source_Name'])
how_many_6to8 += 1
if (hgpscat_pwn_extended[row]['flux_above_1TeV_cu'] > 2 and hgpscat_pwn_extended[row]['flux_above_1TeV_cu'] < 4):
#print('CC 2-4: ', row, hgpscat_pwn_extended[row]['flux_above_1TeV_cu'], hgpscat_pwn_extended[row]['Source_Name'])
how_many_2to4 += 1
if (hgpscat_pwn_extended[row]['flux_above_1TeV_cu'] < 2 and hgpscat_pwn_extended[row]['flux_above_1TeV_cu'] > 1):
#print('CC 1-2: ', row, hgpscat_pwn_extended[row]['flux_above_1TeV_cu'], hgpscat_pwn_extended[row]['Source_Name'])
how_many_1to2 += 1
if (hgpscat_pwn_extended[row]['flux_above_1TeV_cu'] < 1 ):
#print('CC below1: ', row, hgpscat_pwn_extended[row]['flux_above_1TeV_cu'], hgpscat_pwn_extended[row]['Source_Name'])
how_many_below1 += 1
print('how_many_above_10: ', how_many_above_10)
print('how_many_8to10: ', how_many_8to10)
print('how_many_6to8: ', how_many_6to8)
print('how_many_4to6: ', how_many_4to6)
print('how_many_2to4: ', how_many_2to4)
print('how_many_1to2: ', how_many_1to2)
print('how_many_below1: ', how_many_below1)
return hgpscat_pwn_extended
def plot_logN_logS(table_pwn, new_table_sim_pwn, merged_table, flux_min, flux_max):
print('--------------------------------------- table ready now plotting ----------------------------')
# print(table_pwn['Source_Name', 'Flux_Spec_Int_1TeV'])
num_bins = 100
bins = np.logspace(np.log10(flux_min), np.log10(flux_max), num_bins)
hist, bin_edges = np.histogram(table_pwn['int_flux_above_1TeV_cu'], bins=bins)
hist_sim, bin_edges_sim = np.histogram(new_table_sim_pwn['int_flux_above_1TeV_cu'], bins=bins)
hist_all, bin_edges_sim_all = np.histogram(merged_table['int_flux_above_1TeV_cu'], bins=bins)
y = np.insert(hist[::-1].astype('float64').cumsum(), 0, 0.01)
y_err = np.sqrt(y)
print(' lenght hgps array: ', len(hist), hist.sum())
# print(y)
y_sim = np.insert(hist_sim[::-1].astype('float64').cumsum(), 0, 0.01)
y_err_sim = np.sqrt(y)
print(' lenght sim array: ', len(hist_sim), hist_sim.sum())
# print(y_sim)
y_merged = np.insert(hist_all[::-1].astype('float64').cumsum(), 0, 0.01)
y_err_merged = np.sqrt(y)
print(' lenght merged array: ', len(hist_all), hist_all.sum())
#for i in range(1, num_bins):
# print(bin_edges_sim[num_bins-i],'-',bin_edges_sim[num_bins-i-1],y[i],y_sim[i], y_merged[i]
# for i in range(1, num_bins):
# # print(bin_edges_hgps[num_bins - i], ' ',(y_hgps[i]))
# print(np.log10(bin_edges_hgps[num_bins - i]), ' ',
# np.log10(y_hgps[i])) # , ' ', bin_edges_hgps[num_bins - i], ' ',(y_hgps[i]))
# logx = np.log10(bin_edges_hgps[::-1])
## logy = np.log10(y_hgps)
# logyerr = y_hgps_err / y_hgps
#p0 = 2.5
#p1 = 1.24
p0 = 2.4
p1 = 1.12
x0 = np.log10(53)
x1 = np.log10(10)
x2 = np.log10(0.2)
y1 = p0 - x1 * p1
y0 = p0 - x0 * p1
y2 = p0 - x2 * p1
plt.figure()
plt.step(bins[::-1], y, color='r',lw=2)
plt.step(bins[::-1], y_sim, color='black',lw=1)
plt.step(bins[::-1], y_merged, color='b',lw=2)
plt.loglog()
plt.ylim(0.8, 5000)
plt.xlim(0.05, 100)
plt.ylabel('N (F>1 TeV)')
plt.xlabel('F>1 TeV [cu]')
plt.plot([10, 10], [flux_min, 100], 'k-', lw=1, color='black')
plt.plot([0.2, 0.2], [flux_min, 5000], 'k--', lw=1, color='black')
plt.plot([0.2, 53], [np.power(10, y2), np.power(10, y0)], 'k--', lw=2, color='b')
plt.savefig('logNlogS.png')
def remove_bright_pwn(table_sim_pwn):
remove_or_not_6, remove_or_not, remove_or_not_2,remove_or_not_3, remove_or_not_4, remove_or_not_5 = 0, 0, 0, 0, 0, 0
how_many_above_10,how_many_8to10,how_many_6to8, how_many_4to6, how_many_2to4, how_many_1to2, how_many_01to1, how_many_001to01 = 0, 0, 0, 0, 0, 0, 0, 0
lenght_table = len(table_sim_pwn)
# print('len table: ', len(table_sim_pwn))
remove_idx = []
for row in range(1, lenght_table):
if (table_sim_pwn[row]['int_flux_above_1TeV_cu'] > 10):
how_many_above_10 += 1
if (table_sim_pwn[row]['int_flux_above_1TeV_cu'] < 10 and table_sim_pwn[row]['int_flux_above_1TeV_cu'] >8):
how_many_8to10 += 1
if (table_sim_pwn[row]['int_flux_above_1TeV_cu'] < 8 and table_sim_pwn[row]['int_flux_above_1TeV_cu'] > 6):
how_many_6to8 += 1
if (table_sim_pwn[row]['int_flux_above_1TeV_cu'] < 6 and table_sim_pwn[row]['int_flux_above_1TeV_cu'] > 4):
how_many_4to6 += 1
if (table_sim_pwn[row]['int_flux_above_1TeV_cu'] < 4 and table_sim_pwn[row]['int_flux_above_1TeV_cu'] > 2):
how_many_2to4 += 1
if (table_sim_pwn[row]['int_flux_above_1TeV_cu'] < 2 and table_sim_pwn[row]['int_flux_above_1TeV_cu'] > 1):
how_many_1to2 += 1
if (table_sim_pwn[row]['int_flux_above_1TeV_cu'] < 1 and table_sim_pwn[row]['int_flux_above_1TeV_cu'] > 0.1):
how_many_01to1 += 1
if (table_sim_pwn[row]['int_flux_above_1TeV_cu'] < 0.1 and table_sim_pwn[row]['int_flux_above_1TeV_cu'] > 0.01):
how_many_001to01 += 1
if (table_sim_pwn[row]['int_flux_above_1TeV_cu']>10):
if (remove_or_not_6 < 20):
#print('crab>10: ', row, table_sim_pwn[row]['int_flux_above_1TeV_cu'], table_sim_pwn[row]['source_name'])
remove_idx.append(row)
remove_or_not_6 += 1
if table_sim_pwn[row]['int_flux_above_1TeV_cu']>8 and table_sim_pwn[row]['int_flux_above_1TeV_cu'] < 10:
if (remove_or_not < 3):
#print('8-10: ', row, table_sim_pwn[row]['int_flux_above_1TeV_cu'], table_sim_pwn[row]['int_flux_above_1TeV_cu'])
remove_idx.append(row)
remove_or_not += 1
if (table_sim_pwn[row]['int_flux_above_1TeV_cu'] > 6 and table_sim_pwn[row]['int_flux_above_1TeV_cu'] < 8):
if (remove_or_not_3 < 3):
#print('6-8: ', row, table_sim_pwn[row]['int_flux_above_1TeV_cu'], table_sim_pwn[row]['source_name'])
remove_idx.append(row)
remove_or_not_3 += 1
if (table_sim_pwn[row]['int_flux_above_1TeV_cu'] > 4 and table_sim_pwn[row]['int_flux_above_1TeV_cu'] < 6):
if (remove_or_not_4 < 1):
#print('4-6: ', row, table_sim_pwn[row]['int_flux_above_1TeV_cu'], table_sim_pwn[row]['source_name'])
remove_idx.append(row)
remove_or_not_4 += 1
if (table_sim_pwn[row]['int_flux_above_1TeV_cu'] > 2 and table_sim_pwn[row]['int_flux_above_1TeV_cu'] < 4):
if (remove_or_not_5 < 6):
#print('2-4: ', row, table_sim_pwn[row]['int_flux_above_1TeV_cu'], table_sim_pwn[row]['source_name'])
remove_idx.append(row)
remove_or_not_5 += 1
if (table_sim_pwn[row]['int_flux_above_1TeV_cu'] > 1 and table_sim_pwn[row]['int_flux_above_1TeV_cu'] < 2):
if (remove_or_not_2 < 9):
#print('1-2: ', row, table_sim_pwn[row]['int_flux_above_1TeV_cu'], table_sim_pwn[row]['source_name'])
remove_idx.append(row)
remove_or_not_2 += 1
# if (table_sim_pwn[row]['spec_norm_crab'] > 1 and table_sim_pwn[row]['spec_norm_crab'] < 2):
# if (remove_or_not_2 < 1):
# print('1-2: ', row, table_sim_pwn[row]['spec_norm_crab'],table_sim_pwn[row]['source_name'])
# remove_idx.append(row)
# remove_or_not_2 += 1
print('how many bright srcs: ',len(remove_idx))
print('how many >10', how_many_above_10)
print('how many 8to10', how_many_8to10)
print('how many 6tp8', how_many_6to8)
print('how many 4to6', how_many_4to6)
print('how many 2to4', how_many_2to4)
print('how many 1to2', how_many_1to2)
print('how many 01to1', how_many_01to1)
print('how many 001to01', how_many_001to01)
print(remove_idx)
for idx in range(0, len(remove_idx)):
source_name = 'pwn_{}'.format(remove_idx[idx])
id = np.where(table_sim_pwn['source_name']== source_name)[0]
# print(remove_idx[idx],source_name, id)
table_sim_pwn.remove_row(int(id[0]))
table_sim_pwn_reduced = table_sim_pwn['source_name','spec_norm','spec_norm_cu',
'int_flux_above_1TeV','int_flux_above_1TeV_cu','sigma']
return table_sim_pwn, table_sim_pwn_reduced
def plot_size_distrib(table_pwn, table_sim, merged_table):
bins_size = np.linspace(0, 1.0, num=100)
bin_center = (bins_size[:-1] + bins_size[1:]) / 2
#for row in range(1, len(table_pwn) ):
# if (table_pwn[row]['sigma'] > 0.1):
# print('sigma:', row, table_pwn[row]['sigma'], table_pwn[row]['source_name'])
mask_nan_hgps = np.isfinite(table_pwn['sigma'])
# np.isnan
table_pwn_size = table_pwn[mask_nan_hgps]
mask_nan_merged = np.isfinite(merged_table['sigma'])
# np.isnan
merged_table_size = merged_table[mask_nan_merged]
hist_size_hgps, bin_edges_hgps_size = np.histogram(table_pwn_size['sigma'], bins=bins_size, normed=True)
hist_size_sim, bin_edges_sim_size = np.histogram(table_sim['sigma'], bins=bins_size, normed=True)
hist_size_merged, bin_edges_merged_size = np.histogram(merged_table_size['sigma'], bins=bins_size, normed=True)
# print(hist_size_hgps)
#for i in range(0, len(hist_size_sim) ):
# print(i , ' tt ', hist_size_sim[i], ' ', bin_center[i], bins_size[i])
plt.figure()
# plt.plot(bin_center,hist_hgps_size, 'ro') #where='mid'
# plt.bar(bin_center, hist_hgps_size, align='center')
plt.step(bin_center, hist_size_hgps, where='post')
plt.step(bin_center, hist_size_sim, where='post', color='b')
plt.step(bin_center, hist_size_merged, where='post', color='r')
print('histograms')
#rv = norm()
#mean, sigma = norm.fit(table_pwn_reduced['sigma'], loc=0.2)
#x = np.linspace(0, 1, 100)
#y = norm.pdf(x, mean, sigma)
#plt.plot(x, y)
#print(mean, sigma)
plt.savefig('size.png')
if __name__ == '__main__':
table_pwn = prepare_the_pwn_table()
print('-----------------------------------------------------')
table_pwn_reduced = table_pwn['Source_Name', 'Flux_Spec_Int_1TeV', 'flux_above_1TeV_cu', 'sigma']
table_pwn_reduced.rename_column('flux_above_1TeV_cu', 'int_flux_above_1TeV_cu')
table_pwn_reduced.rename_column('Flux_Spec_Int_1TeV', 'int_flux_above_1TeV')
table_pwn_reduced.rename_column('Source_Name', 'source_name')
#table_pwn_reduced.pprint()
# table_pwn_reduced.rename_column('Size', 'sigma')
print('-----------------------------------------------------')
table_sim_pwn = Table.read('ctadc_skymodel_gps_sources_pwn.ecsv', format='ascii.ecsv')
new_table_sim_pwn, new_table_sim_pwn_reduced = remove_bright_pwn(table_sim_pwn)
merged_table = vstack([table_pwn_reduced, new_table_sim_pwn_reduced])
#merged_table.pprint()
plot_logN_logS(table_pwn_reduced, new_table_sim_pwn_reduced, merged_table, flux_min=0.07, flux_max=100)
plot_size_distrib(table_pwn=table_pwn_reduced, table_sim=new_table_sim_pwn_reduced, merged_table=merged_table) | bsd-3-clause |
annoviko/pyclustering | pyclustering/cluster/tests/integration/it_somsc.py | 1 | 4743 | """!
@brief Integration-tests for SOM-SC algorithm.
@authors Andrei Novikov (pyclustering@yandex.ru)
@date 2014-2020
@copyright BSD-3-Clause
"""
import unittest
import matplotlib
matplotlib.use('Agg')
from pyclustering.cluster.tests.somsc_templates import SyncnetTestTemplates
from pyclustering.samples.definitions import SIMPLE_SAMPLES
from pyclustering.core.tests import remove_library
class SomscIntegrationTest(unittest.TestCase):
def testClusterAllocationSampleSimple1ByCore(self):
SyncnetTestTemplates().templateLengthProcessData(SIMPLE_SAMPLES.SAMPLE_SIMPLE1, 2, [5, 5], True)
def testClusterOneAllocationSampleSimple1ByCore(self):
SyncnetTestTemplates().templateLengthProcessData(SIMPLE_SAMPLES.SAMPLE_SIMPLE1, 1, [10], True)
def testClusterAllocationSampleSimple2ByCore(self):
SyncnetTestTemplates().templateLengthProcessData(SIMPLE_SAMPLES.SAMPLE_SIMPLE2, 3, [10, 5, 8], True)
def testClusterOneAllocationSampleSimple2ByCore(self):
SyncnetTestTemplates().templateLengthProcessData(SIMPLE_SAMPLES.SAMPLE_SIMPLE2, 1, [23], True)
def testClusterAllocationSampleSimple3ByCore(self):
SyncnetTestTemplates().templateLengthProcessData(SIMPLE_SAMPLES.SAMPLE_SIMPLE3, 4, [10, 10, 10, 30], True)
def testClusterOneAllocationSampleSimple3ByCore(self):
SyncnetTestTemplates().templateLengthProcessData(SIMPLE_SAMPLES.SAMPLE_SIMPLE3, 1, [60], True)
def testClusterAllocationSampleSimple4ByCore(self):
SyncnetTestTemplates().templateLengthProcessData(SIMPLE_SAMPLES.SAMPLE_SIMPLE4, 5, [15, 15, 15, 15, 15], True)
def testClusterOneAllocationSampleSimple4ByCore(self):
SyncnetTestTemplates().templateLengthProcessData(SIMPLE_SAMPLES.SAMPLE_SIMPLE4, 1, [75], True)
def testClusterAllocationSampleSimple5ByCore(self):
SyncnetTestTemplates().templateLengthProcessData(SIMPLE_SAMPLES.SAMPLE_SIMPLE5, 4, [15, 15, 15, 15], True)
def testClusterOneAllocationSampleSimple5ByCore(self):
SyncnetTestTemplates().templateLengthProcessData(SIMPLE_SAMPLES.SAMPLE_SIMPLE5, 1, [60], True)
def testClusterOneDimensionSampleSimple7ByCore(self):
SyncnetTestTemplates().templateLengthProcessData(SIMPLE_SAMPLES.SAMPLE_SIMPLE7, 2, [10, 10], True)
def testClusterOneDimensionSampleSimple8ByCore(self):
SyncnetTestTemplates().templateLengthProcessData(SIMPLE_SAMPLES.SAMPLE_SIMPLE8, 4, None, True)
def testWrongNumberOfCentersSimpleSample1ByCore(self):
SyncnetTestTemplates().templateLengthProcessData(SIMPLE_SAMPLES.SAMPLE_SIMPLE1, 3, None, True)
def testWrongNumberOfCentersSimpleSample2ByCore(self):
SyncnetTestTemplates().templateLengthProcessData(SIMPLE_SAMPLES.SAMPLE_SIMPLE2, 4, None, True)
def testClusterTheSameData1ByCore(self):
SyncnetTestTemplates().templateLengthProcessData(SIMPLE_SAMPLES.SAMPLE_SIMPLE9, 2, [10, 20], True)
def testClusterTheSameData2ByCore(self):
SyncnetTestTemplates().templateLengthProcessData(SIMPLE_SAMPLES.SAMPLE_SIMPLE12, 3, [5, 5, 5], True)
def testClusterAllocationOneDimensionDataByCore(self):
SyncnetTestTemplates().templateClusterAllocationOneDimensionData(True)
def test_predict_one_point_ccore(self):
SyncnetTestTemplates().predict(SIMPLE_SAMPLES.SAMPLE_SIMPLE3, 4, [[0.3, 0.2]], [0], True)
SyncnetTestTemplates().predict(SIMPLE_SAMPLES.SAMPLE_SIMPLE3, 4, [[4.1, 1.1]], [1], True)
SyncnetTestTemplates().predict(SIMPLE_SAMPLES.SAMPLE_SIMPLE3, 4, [[2.1, 1.9]], [2], True)
SyncnetTestTemplates().predict(SIMPLE_SAMPLES.SAMPLE_SIMPLE3, 4, [[2.1, 4.1]], [3], True)
def test_predict_two_points_ccore(self):
SyncnetTestTemplates().predict(SIMPLE_SAMPLES.SAMPLE_SIMPLE3, 4, [[0.3, 0.2], [2.1, 1.9]], [0, 2], True)
SyncnetTestTemplates().predict(SIMPLE_SAMPLES.SAMPLE_SIMPLE3, 4, [[2.1, 4.1], [2.1, 1.9]], [3, 2], True)
def test_predict_four_points_ccore(self):
to_predict = [[0.3, 0.2], [4.1, 1.1], [2.1, 1.9], [2.1, 4.1]]
SyncnetTestTemplates().predict(SIMPLE_SAMPLES.SAMPLE_SIMPLE3, 4, to_predict, [0, 1, 2, 3], True)
def test_predict_five_points_ccore(self):
to_predict = [[0.3, 0.2], [4.1, 1.1], [3.9, 1.1], [2.1, 1.9], [2.1, 4.1]]
SyncnetTestTemplates().predict(SIMPLE_SAMPLES.SAMPLE_SIMPLE3, 4, to_predict, [0, 1, 1, 2, 3], True)
def testProcessingWhenLibraryCoreRemoved(self):
self.runRemovedLibraryCoreTest()
@remove_library
def runRemovedLibraryCoreTest(self):
SyncnetTestTemplates().templateLengthProcessData(SIMPLE_SAMPLES.SAMPLE_SIMPLE1, 2, [5, 5], True)
| gpl-3.0 |
ssokota/tiny-hanabi | scripts/interface.py | 1 | 6551 | """Interface for running experiments.
"""
import argparse
from pathlib import Path
from typing import Optional
import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
from tiny_hanabi.agent.assemblers import construct_learner, learnertype
from tiny_hanabi.agent.single_agent_learners import Algorithms
from tiny_hanabi.agent.multi_agent_learners import Sads
from tiny_hanabi.game.settings import Game, Settings
from tiny_hanabi.game.payoff_matrices import GameNames
from tiny_hanabi.game.assemblers import get_game
from tiny_hanabi.trainer.run import run
Path("results").mkdir(exist_ok=True)
Path("results/data").mkdir(exist_ok=True)
Path("results/figures").mkdir(exist_ok=True)
def save_and_plot(
expected_returns: list,
game: Game,
learner: learnertype,
gamename: GameNames,
setting: Settings,
algorithm: Algorithms,
pg_init_lr: Optional[float],
ql_init_lr: Optional[float],
ql_init_lr2: Optional[float],
init_epsilon: Optional[float],
avd: bool,
central_critic: bool,
sad: Optional[Sads],
num_episodes: int,
num_evals: int,
fn: str,
plot: bool,
) -> None:
"""Save (and plot) data
Args:
expected_returns: List of expected returns
game: Game object
gamename: Name of Tiny Hanabi game
setting: Name of setting
algorithm: Name of algorithm
ql_init_lr: Initial Q-learning learning rate, is linearly decayed
ql_init_lr2: Second Q-learning learning for HQL, used when
target < estimate, is linearly decayed
init_epsilon: Epsilon-greedy exploration rate, is linearly decayed
avd: Whether to used additive value decomposition (aka VDN)
central_critic: Whether to use a centralized value function
sad: Whether to use a sad variant
num_episodes: How many episodes to train for
num_evals: How many evaluations to do
fn: filename to which to save results
plot: whether to plot results
"""
df = pd.DataFrame(
{
"episode": learner.eval_schedule,
"expected_return": expected_returns,
"optimal_return": num_evals * [game.optimal_return],
"gamename": num_evals * [gamename.value],
"setting": num_evals * [setting.value],
"algorithm": num_evals * [algorithm.value],
"pg_init_lr": num_evals * [pg_init_lr],
"ql_init_lr": num_evals * [ql_init_lr],
"ql_init_lr2": num_evals * [ql_init_lr2],
"init_epsilon": num_evals * [init_epsilon],
"avd": num_evals * [avd],
"central_critic": num_evals * [central_critic],
"sad": num_evals * [sad.value if sad else None],
}
)
df.to_pickle("results/data/" + fn + ".pkl")
if plot:
plt.axhline(y=game.optimal_return, color="gray", linestyle="-", linewidth=1)
sns.lineplot(data=df, x="episode", y="expected_return")
algorithm = "a2c2" if central_critic else algorithm.value
title = f"Game {gamename.value}; {setting.value}; {algorithm};"
if avd:
title += " avd;"
if sad:
title += f" {sad.value};"
if pg_init_lr:
title += f" pglr={pg_init_lr};"
if ql_init_lr:
title += f" qllr={ql_init_lr};"
if ql_init_lr2:
title += f" qllr2={ql_init_lr2};"
if init_epsilon:
title += f" eps={init_epsilon};"
plt.title(title[:-1])
plt.savefig("results/figures/" + fn + ".pdf")
def interface(
gamename: GameNames,
setting: Settings,
algorithm: Algorithms,
pg_init_lr: Optional[float] = None,
ql_init_lr: Optional[float] = None,
ql_init_lr2: Optional[float] = None,
init_epsilon: Optional[float] = None,
avd: bool = False,
central_critic: bool = False,
sad: Optional[Sads] = None,
num_episodes: int = 1_000_000,
num_evals: int = 100,
fn: str = "example",
plot: bool = False,
) -> None:
"""User interface for running experiments
Args:
gamename: Name of Tiny Hanabi game
setting: Name of setting
algorithm: Name of algorithm
ql_init_lr: Initial Q-learning learning rate, is linearly decayed
ql_init_lr2: Second Q-learning learning for HQL, used when
target < estimate, is linearly decayed
init_epsilon: Epsilon-greedy exploration rate, is linearly decayed
avd: Whether to used additive value decomposition (aka VDN)
central_critic: Whether to use a centralized value function
sad: Whether to use a sad variant
num_episodes: How many episodes to train for
num_evals: How many evaluations to do
fn: filename to which to save results
plot: whether to plot results
"""
game = get_game(gamename, setting)
learner = construct_learner(
setting,
algorithm,
pg_init_lr,
ql_init_lr,
ql_init_lr2,
init_epsilon,
avd,
central_critic,
sad,
num_episodes,
num_evals,
)
expected_returns = run(game, learner)
save_and_plot(
expected_returns,
game,
learner,
gamename,
setting,
algorithm,
pg_init_lr,
ql_init_lr,
ql_init_lr2,
init_epsilon,
avd,
central_critic,
sad,
num_episodes,
num_evals,
fn,
plot,
)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("gamename", type=GameNames, choices=list(GameNames))
parser.add_argument("setting", type=Settings, choices=list(Settings))
parser.add_argument("algorithm", type=Algorithms, choices=list(Algorithms))
parser.add_argument("--pg_init_lr", type=float)
parser.add_argument("--ql_init_lr", type=float)
parser.add_argument("--ql_init_lr2", type=float)
parser.add_argument("--init_epsilon", type=float)
parser.add_argument("--avd", default=False, action="store_true")
parser.add_argument("--central_critic", default=False, action="store_true")
parser.add_argument("--sad", default=None, type=Sads, choices=list(Sads))
parser.add_argument("--num_episodes", type=int, default=int(1e6))
parser.add_argument("--num_evals", type=int, default=100)
parser.add_argument("--fn", default="example")
parser.add_argument("--plot", default=False, action="store_true")
args = parser.parse_args()
interface(**vars(args))
| mit |
jorik041/scikit-learn | sklearn/tests/test_qda.py | 155 | 3481 | import numpy as np
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import ignore_warnings
from sklearn import qda
# Data is just 6 separable points in the plane
X = np.array([[0, 0], [-2, -2], [-2, -1], [-1, -1], [-1, -2],
[1, 3], [1, 2], [2, 1], [2, 2]])
y = np.array([1, 1, 1, 1, 1, 2, 2, 2, 2])
y3 = np.array([1, 2, 3, 2, 3, 1, 2, 3, 1])
# Degenerate data with 1 feature (still should be separable)
X1 = np.array([[-3, ], [-2, ], [-1, ], [-1, ], [0, ], [1, ], [1, ],
[2, ], [3, ]])
# Data that has zero variance in one dimension and needs regularization
X2 = np.array([[-3, 0], [-2, 0], [-1, 0], [-1, 0], [0, 0], [1, 0], [1, 0],
[2, 0], [3, 0]])
# One element class
y4 = np.array([1, 1, 1, 1, 1, 1, 1, 1, 2])
# Data with less samples in a class than n_features
X5 = np.c_[np.arange(8), np.zeros((8,3))]
y5 = np.array([0, 0, 0, 0, 0, 1, 1, 1])
def test_qda():
# QDA classification.
# This checks that QDA implements fit and predict and returns
# correct values for a simple toy dataset.
clf = qda.QDA()
y_pred = clf.fit(X, y).predict(X)
assert_array_equal(y_pred, y)
# Assure that it works with 1D data
y_pred1 = clf.fit(X1, y).predict(X1)
assert_array_equal(y_pred1, y)
# Test probas estimates
y_proba_pred1 = clf.predict_proba(X1)
assert_array_equal((y_proba_pred1[:, 1] > 0.5) + 1, y)
y_log_proba_pred1 = clf.predict_log_proba(X1)
assert_array_almost_equal(np.exp(y_log_proba_pred1), y_proba_pred1, 8)
y_pred3 = clf.fit(X, y3).predict(X)
# QDA shouldn't be able to separate those
assert_true(np.any(y_pred3 != y3))
# Classes should have at least 2 elements
assert_raises(ValueError, clf.fit, X, y4)
def test_qda_priors():
clf = qda.QDA()
y_pred = clf.fit(X, y).predict(X)
n_pos = np.sum(y_pred == 2)
neg = 1e-10
clf = qda.QDA(priors=np.array([neg, 1 - neg]))
y_pred = clf.fit(X, y).predict(X)
n_pos2 = np.sum(y_pred == 2)
assert_greater(n_pos2, n_pos)
def test_qda_store_covariances():
# The default is to not set the covariances_ attribute
clf = qda.QDA().fit(X, y)
assert_true(not hasattr(clf, 'covariances_'))
# Test the actual attribute:
clf = qda.QDA().fit(X, y, store_covariances=True)
assert_true(hasattr(clf, 'covariances_'))
assert_array_almost_equal(
clf.covariances_[0],
np.array([[0.7, 0.45], [0.45, 0.7]])
)
assert_array_almost_equal(
clf.covariances_[1],
np.array([[0.33333333, -0.33333333], [-0.33333333, 0.66666667]])
)
def test_qda_regularization():
# the default is reg_param=0. and will cause issues
# when there is a constant variable
clf = qda.QDA()
with ignore_warnings():
y_pred = clf.fit(X2, y).predict(X2)
assert_true(np.any(y_pred != y))
# adding a little regularization fixes the problem
clf = qda.QDA(reg_param=0.01)
with ignore_warnings():
clf.fit(X2, y)
y_pred = clf.predict(X2)
assert_array_equal(y_pred, y)
# Case n_samples_in_a_class < n_features
clf = qda.QDA(reg_param=0.1)
with ignore_warnings():
clf.fit(X5, y5)
y_pred5 = clf.predict(X5)
assert_array_equal(y_pred5, y5)
| bsd-3-clause |
kerkphil/multi-country | Python/7CountryElliptical/AuxiliaryClass.py | 2 | 160723 | from __future__ import division
import csv
import time
import numpy as np
import scipy as sp
import scipy.optimize as opt
import scipy.interpolate as interpol
from matplotlib import pyplot as plt
from pprint import pprint
from mpl_toolkits.mplot3d import Axes3D
import AuxiliaryDemographics as demog
from pylab import savefig
#from pure_cython import cy_fillca
class OLG(object):
"""
This object takes all of the parts of calculating the OG multi-country model
and stores it into a centralized object. Thihas a huge advantage over previous
versions as we are now able to quickly access stored parts when we are trying
to expand the code. Before, we had to carefully pass tuples of parameters
everywhere and it was easy to get lost in the details.
The variables are listed in alphabetical order of their data type,
then alphabetical order of their name, so Arrays are listed first,
Booleans second, etc.
For each function there are the following categories:
Description: Brief description of what the function does
Inputs: Lists the inputs that the function uses
Variables Called From Object: Lists the variables that the function calls
from storage
Variables Stored in Object: Lists the variables that are put into storage
Other Functions Called: Lists the other non-library functions needed
to complete the process of the current function
Objects in Function: Lists the variables that are exclusive to
that function and are not used again.
Outputs: Lists the outputs that the function puts out.
"""
def __init__(self, countries, HH_Params, Firm_Params, Lever_Params):
"""
Description:
-This creates the object and stores all of the parameters into the object.
-The initialization is the starting point for model, think of this as the
"foundation" for the object.
Inputs:
-self: "self" stores all of the components of the model. To access any part,
simply type "self.variable_name" while in the object and
"objectname.variable_name" outside the object. Every other
object function will just take this as given, so future mentions of
self won't be rewritten.
-countries = tuple: contains a dictionary and tuple for
countries and their associated number,
(i.e USA is country 0, EU is country 1, etc.)
-Firm_Params = tuple: contains alpha, annualized delta,
chi, rho and g_A
-HH_Params = tuple: contains S, I, annualized Beta and sigma.
-Lever_Params = tuple: contains the following boolean
levers indicated by the users:
PrintAges,self.CheckerMode,
self.Iterate,self.UseDiffDemog,
self.UseDiffProductivities,
self.Matrix_Time
Variables Stored in Object:
- self.A = Array: [I], Technology level for each country
- self.agestopull = Array: [S], Contains which ages to be used
from the data when S<80
- self.e = Array: [I,S,T+S], Labor Productivities
- self.e_ss = Array: [I,S], Labor produtivities for the
Steady State
- self.lbar = Array: [T+S], Time endowment in each year
- self.CheckerMode = Boolean: Used in conjunction with Checker.py,
an MPI code that checks the robustness
of the code. With this activated,
the code only print the statements
that are necessary. This speeds up
the robust choice process.
- self.Iterate = Boolean: Activates printing the iteration
number and euler errors at each
step of the TPI process.
- PrintAges = Boolean: Prints the ages calculated in the
demographics
- self.UseDiffDemog = Boolean: Allows each country to have
different demographics
- self.UseDiffProductivities = Boolean: Allows cohorts of different ages
to produce different labor
productivities
- self.Matrix_Time = Boolean: Prints how long it takes to
calculate the 2 parts of the
household problem
- self.ShaveTime = Boolean: Activates the use of the Cython
module that allows the code to
work faster
- self.I_dict = Dictionary: [I], Associates a country
with a number
- self.I_touse = List: [I], Roster of countries that are
being used
- self.alpha = Scalar: Capital share of production
- self.beta = Scalar: Calculated overall future discount rate
- self.chi = Scalar: Leisure preference Parameter
- self.delta = Scalar: Calulated overall depreciation rate
- self.g_A = Scalar: Growth rate of technology
- self.rho = Scalar: The intratemporal elasticity of
substitution between consumption
and leisure
- self.sigma = Scalar: Rate of Time Preference
- self.FirstDyingAge = Int: First age where mortality rates
effect agents
- self.FirstFertilityAge = Int: First age where agents give birth
- self.I = Int: Number of Countries
- self.LastFertilityAge = Int: Last age where agents give birth
- self.LeaveHouseAge = Int: First age where agents don't count
as children in utility function
- self.MaxImmigrantAge = Int: No immigration takes place for cohorts
older than this age
- self.S = Int: Number of Cohorts
- self.T = Int: Number of time periods
- self.T_1 = Int: Transition year for the demographics
- self.Timepath_counter = Int: Counter that keeps track of the
number of iterations in solving
for the time paths
- self.IterationsToShow = Set: A set of user inputs of iterations of
TPI graphs to show
Other Functions Called:
- getkeyages = Gets the important ages for calculating demographic
dynamics like FirstFertilityAge, etc.
- Importdata = Imports the demographic data from CSV files
Objects in Function:
- beta_annual = Scalar: Annualized value for beta.
Adjusted by S and stored as self.beta
- delta_annual = Scalar: Annualized value for delta.
Adjusted by S and stored as self.delta
"""
#PARAMETER SET UP
#HH Parameters
(self.S, self.I, beta_annual, self.sigma,self.Dem_Degree) = HH_Params
self.beta=beta_annual**(70/self.S)
self.T = int(round(6*self.S))
self.T_1 = self.S
if self.S > 50:
self.T_1 = 50
#Demographics Parameters
self.I_dict, self.I_touse = countries
#Firm Parameters
(self.alpha,delta_annual,self.chil,self.chik,self.mu, self.g_A)=Firm_Params
self.delta=1-(1-delta_annual)**(70/self.S)
#Lever Parameters
(PrintAges,self.CheckerMode,self.Iterate,self.UseDiffDemog,self.UseDiffProductivities\
,self.Matrix_Time,self.ShaveTime,self.UseCalcDemog,self.ShowCompGraphs,\
self.coeff,self.STATA,self.AddTime) = Lever_Params
#Getting key ages for calculating demographic dynamics
#if self.S<=80:
self.LeaveHouseAge, self.FirstFertilityAge, self.LastFertilityAge,\
self.MaxImmigrantAge, self.FirstDyingAge, self.agestopull = \
demog.getkeyages(self.S,PrintAges)
if self.UseDiffDemog:
self.A = np.ones(self.I)+np.cumsum(np.ones(self.I)*.05)-.05
#Techonological Change, used for when countries are different
else:
self.A = np.ones(self.I) #Techonological Change, used for idential countries
#Initialize Labor Productivities
if self.UseDiffProductivities:
self.e = np.ones((self.I, self.S, self.T+self.S))
self.e[:,self.FirstDyingAge:,:] = 0.3
self.e[:,:self.LeaveHouseAge,:] = 0.3
else:
self.e = np.ones((self.I, self.S, self.T+self.S)) #Labor productivities
self.e_ss=self.e[:,:,-1]
#Initilize Time Endowment
self.lbar = np.cumsum(np.ones(self.T+self.S)*self.g_A)
self.lbar[self.T:] = np.ones(self.S)
self.lbar[:self.T] = np.ones(self.T)
self.lbar_ss=self.lbar[-1]
#Imports all of the data from .CSV files needed for the model
self.Import_Data()
#Initialize counter that will keep track of the number of iterations
#the time path solver takes
self.Timepath_counter = 1
#DEMOGRAPHICS SET-UP
def Import_Data(self):
"""
Description:
- This function activates importing the .CSV files that contain
our demographics data
Variables Called from Object:
- self.agestopull = Array: [S], Contains which ages to be
used from the data when S<80
- self.UseDiffDemog = Boolean: True activates using unique
country demographic data
- self.I = Int: Number of Countries
- self.S = Int: Number of Cohorts
- self.T = Int: Number of Time Periods
- self.FirstFertilityAge = Int: First age where agents give birth
- self.LastFertilityAge = Int: Last age where agents give birth
- self.AddTime = Boolean: True activates adding time variables
to the regression
- self.coeff = Boolean: True activates printing the regression
coefficients.
Variables Stored in Object:
- self.all_FertilityAges = Array: [I,S,f_range+T], Fertility rates
from a f_range years ago to year T
- self.FertilityRates = Array: [I,S,T], Fertility rates from the
present time to year T
- self.Migrants = Array: [I,S,T], Number of immigrants
- self.MortalityRates = Array: [I,S,T], Mortality rates of each
country for each age cohort and year
- self.N = Array: [I,S,T], Population of each country
for each age cohort and year
- self.Nhat = Array: [I,S,T], World population share of
each country for each age cohort
and year
- self.Mort_Output = List: [I], stores the output of the least
squares function for mortality.
- self.Fert_Output = List: [I],stores the output of the least
squares function for fertility.
- self.Migr_Output = List: [I],stores the output of the least
squares function for migration.
- self.Mortality_Test = Array: [I,S,T], fitted polynomial values for
mortality.
- self.Fertility_Test = Array: [I,S,T],fitted polynomial values for
fertility.
- self.Migration_Test = Array: [I,S,T],fitted polynomial values for
migration.
Other Functions Called:
- None
Objects in Function:
- f_range = Int: Number of fertile years, will be
used to correctly store the fertilty data
- index = Int: Unique index for a given country that
corresponds to the I_dict
- f_bar = Array: [I,S], Average fertility rate across
all countries and cohorts in year T_1,
used to get the SS demographics
- rho_bar = Array: [I,S], Average mortality rate across
all countries and cohorts in year T_1,
used to get the SS demographics
- Mort_Temp = Array: [I,51,23], Raw import of the mortality
data from the Data_Files folder
- Fert_Temp = Array: [I,99,23], Raw import of the fertility
data from the Data_Files folder
- Raw_Migrants = Array: [I,65], Raw import of the migration
data from the Data_Files folder
- X = Array: [I,23,Dem_Degree+1] OR
[I,1173,2*Dem_Degree+1]
Independent variable for the regression.
It can either include time or not.
This is specific for mortality rates.
- X_Fert = Array: [I,23,Dem_Degree+1] OR
[I,2277,2*Dem_Degree+1]
Independent variable for the regression.
It can either include time or not.
This is specific for fertility rates.
- X_Migrant = Array: [I,65,Dem_Degree+1]
Independent variable for the regression.
It can either include time or not.
This is specific for migration.
- Y = Array: [I,23] OR [I,1173], Dependent variable
for regression. This contains the data
directly from Kotlikoff for mortality.
- Y_Fert = Array: [I,23] OR [I,2277], Dependent variable
for regression. This contains the data
directly from Kotlikoff for fertility.
- Y2 = Array: [I,23] OR [I,1173], Natural log of Y
- Y3 = Array: [I,23] OR [I,2277], Natural log of
Y_Fert
- Tot_Mort = Array: [23,Dem_Degree+2] OR
[1173,2*Dem_Degree+2], concatenated
array that contains the independent
and dependent variables for a regression
on Mortality rates.
- Tot_Fert = Array: [23,Dem_Degree+2] OR
[2277, 2*Dem_Degree+2],concatenated
array that contains the independent
and dependent variables for a regression
on Fertility rates.
- Tot_Imm = Array: [65,Dem_Degree+2], concatenated
array that contains the independent
and dependent variables for a regression
on immigration.
- fertname = String: Desired filename for the
.CSV file associated with fertility.
- mortname = String: Desired filename for the
.CSV file associated with mortality.
- immname = String: Desired filename for the
.CSV file associated with immigration.
Outputs:
- None
"""
self.frange=self.LastFertilityAge+1-self.FirstFertilityAge
self.N=np.zeros((self.I,self.S,self.T))
self.Nhat=np.zeros((self.I,self.S,self.T))
self.all_FertilityRates = np.zeros((self.I, self.S, self.frange+self.T))
self.FertilityRates = np.zeros((self.I, self.S, self.T))
self.MortalityRates = np.zeros((self.I, self.S, self.T))
self.Migrants = np.zeros((self.I, self.S, self.T))
I_all = list(sorted(self.I_dict, key=self.I_dict.get))
#Based on the size of the .CSV files provided by Kotlikoff
Mort_Temp = np.zeros((self.I,51,23)) #Raw import from .CSV files of Mortality Rates
Fert_Temp = np.zeros((self.I,99,23)) #Raw import from .CSV files of Fertility Rates
def Polynomial_Fit(Coefficients,s,t,S):
"""
Description:
- Takes the calculated coefficients from the regression
and the desired cohort level and generates the fitted
rates. Works for mortality, fertility and immigration
rates. This function includes time in the fit.
Inputs:
- Coeffcients = Array: [2*Dem_Degree+1], array of coefficents
generated by a regressions of age
and time coefficients on the given
data.
- s = Integer: Given ages index
- t = Integer: Given time index
- S = Integer: Desired number of cohorts
Variables Called from Object:
- None
Variables Stored in Object:
- None
Other Functions Called:
- None
Objects in Function:
- Length = Integer: The number of coefficients
Outputs:
- Result = Scalar: The calculated rate
"""
Length = len(Coefficients)
Result = Coefficients[0]
for j in xrange(1,int(((Length-1)/2)+1)):
Result+=Coefficients[j]*(s/S)**j
Result+=Coefficients[j+int((Length-1)/2)]*(t/S)**j
return Result
def Polynomial_Fit2(Coefficients,s,S):
"""
Description:
- Takes the calculated coefficients from the regression
and the desired cohort level and generates the fitted
rates. Works for mortality, fertility and immigration
rates. This function does not include time in the fit.
Inputs:
- Coeffcients = Array: [Dem_Degree+1], array of coefficents
generated by a regressions of age
and time coefficients on the given
data.
- s = Integer: Given ages index
- t = Integer: Given time index
- S = Integer: Desired number of cohorts
Variables Called from Object:
- None
Variables Stored in Object:
- None
Other Functions Called:
- None
Objects in Function:
- Length = Integer: The number of coefficients
Outputs:
- Result = Scalar: The calculated rate
"""
Length = len(Coefficients)
Result = Coefficients[0]
for j in xrange(1,Length):
Result+=Coefficients[j]*(s/S)**j
return Result
if self.AddTime==False:
X = np.ones((self.I,23,self.Dem_Degree+1))
X_Fert = np.ones((self.I,23,self.Dem_Degree+1))
X_Migrant = np.ones((self.I,65,self.Dem_Degree+1))
Y = np.zeros((self.I,23))
Y_Fert = np.zeros((self.I,23))
for i in xrange(self.I):
Mort_Temp[i,:,:]=np.loadtxt(str("Data_Files/"+I_all[i]+\
"_mortality_edited.csv"),delimiter=',')
Fert_Temp[i,:,:]=np.loadtxt(str("Data_Files/"+I_all[i]+\
"_fertility_edited.csv")\
,delimiter=',')
Y[i,:] = Mort_Temp[i,0,:]
Y_Fert[i,:] = Fert_Temp[i,0,:]
for j in xrange(1,self.Dem_Degree+1):
X[i,:,j]= (np.arange(68,91)/100.)**j
X_Fert[i,:,j]=(np.arange(23,46)/100.)**j
X_Migrant[i,:,j]=(np.arange(1,66)/100)**j
Raw_Migrants = np.transpose(np.loadtxt(("Data_Files/net_migration_edited.csv"),\
delimiter=','))*100
#We're regressing on log mortality and log fertility
Y2 = np.log(Y)
Y3 = np.log(Y_Fert)
#Empty list to store the output of the least squares function.
#Each country's fits are done individually.
self.Mort_Output=[]
self.Fert_Output=[]
self.Migr_Output=[]
for i in xrange(self.I):
#Regression Occurs Here
self.Mort_Output.append(np.linalg.lstsq(X[i,:,:],Y2[i,:]))
self.Fert_Output.append(np.linalg.lstsq(X_Fert[i,:,:],Y3[i,:]))
self.Migr_Output.append(np.linalg.lstsq(X_Migrant[i,:,:],Raw_Migrants[i,:]))
if self.STATA:
for i in xrange(self.I):
#Concatenates dependent/independent variables and saves as a
#.CSV file for a program like STATA/R. Getting all the regression
#details is more complicated in python, so this is an easy shortcut.
Tot_Mort = np.c_[Y2[i,:],X[i,:,:]]
Tot_Fert = np.c_[Y3[i,:],X_Fert[i,:,:]]
Tot_Imm = np.c_[Raw_Migrants[i,:],X_Migrant[i,:,:]]
fertname="STATA_Files/fertility_reg_country_"+str(i)+".csv"
mortname="STATA_Files/mortality_reg_country_"+str(i)+".csv"
immname="STATA_Files/immigration_reg_country_"+str(i)+".csv"
np.savetxt(immname,Tot_Imm,delimiter=',')
np.savetxt(fertname,Tot_Fert,delimiter=',')
np.savetxt(mortname,Tot_Mort,delimiter=',')
self.Mortality_Test=np.zeros((self.I,self.S,self.T))
self.Fertility_Test=np.zeros((self.I,self.S,self.T))
self.Migration_Test=np.zeros((self.I,self.S,self.T))
#Gets the polynomial fit then adjusts them based on the size of S.
for i in xrange(self.I):
for s in xrange(self.S):
if s in xrange(self.FirstDyingAge,self.S):
MTemp = Polynomial_Fit2(self.Mort_Output[i][0],s,self.S)
self.Mortality_Test[i,s,:] = 1-(1-np.exp(MTemp))**(100/self.S)
if s in xrange(self.FirstFertilityAge, self.LastFertilityAge+1):
FTemp = Polynomial_Fit2(self.Fert_Output[i][0],s,self.S)
self.Fertility_Test[i,s,:] = (1+ np.exp(FTemp))**(100/self.S)-1
if s in xrange(1,self.MaxImmigrantAge):
ITemp = Polynomial_Fit2(self.Migr_Output[i][0],s,self.S)
self.Migration_Test[i,s,:]=ITemp
else:
X = np.ones((self.I,51*23,2*self.Dem_Degree+1))
X_Fert = np.ones((self.I,99*23,2*self.Dem_Degree+1))
X_Migrant = np.ones((self.I,65,self.Dem_Degree+1))
Y = np.zeros((self.I,23*51))
Y_Fert = np.zeros((self.I,23*99))
for i in xrange(self.I):
Mort_Temp[i,:,:]=np.loadtxt(str("Data_Files/"+I_all[i]+\
"_mortality_edited.csv"),delimiter=',')
Fert_Temp[i,:,:]=np.loadtxt(str("Data_Files/"+I_all[i]+\
"_fertility_edited.csv"),delimiter=',')
for x in xrange(51):
Y[i,23*x:23*(x+1)]=Mort_Temp[i,x,:]
for j in xrange(1,self.Dem_Degree+1):
X[i,23*x:23*(x+1),j]=(np.arange(68,91)/100.)**j
X[i,23*x:23*(x+1),j+self.Dem_Degree]=(x/100.)**j
for y in xrange(99):
Y_Fert[i,23*y:23*(y+1)]=Fert_Temp[i,y,:]
for j in xrange(1,self.Dem_Degree+1):
X_Fert[i,23*y:23*(y+1),j]=(np.arange(23,46)/100.)**j
X_Fert[i,23*y:23*(y+1),j+self.Dem_Degree]=((y-48)/100.)**j
for j in xrange(1,self.Dem_Degree+1):
X_Migrant[i,:,j]=(np.arange(1,66)/100)**j
Raw_Migrants = np.transpose(np.loadtxt(("Data_Files/net_migration_edited.csv"),\
delimiter=','))*100
#We're regressing on log mortality and log fertility
Y2 = np.log(Y)
Y3 = np.log(Y_Fert)
#Empty list to store the output of the least squares function.
#Each country's fits are done individually.
self.Mort_Output=[]
self.Fert_Output=[]
self.Migr_Output=[]
for i in xrange(self.I):
#Regression Occurs Here
self.Mort_Output.append(np.linalg.lstsq(X[i,:,:],Y2[i,:]))
self.Fert_Output.append(np.linalg.lstsq(X_Fert[i,:,:],Y3[i,:]))
self.Migr_Output.append(np.linalg.lstsq(X_Migrant[i,:,:],Raw_Migrants[i,:]))
if self.STATA:
for i in xrange(self.I):
#Concatenates dependent/independent variables and saves as a
#.CSV file for a program like STATA/R. Getting all the regression
#details is more complicated in python, so this is an easy shortcut.
Tot_Mort = np.c_[Y2[i,:],X[i,:,:]]
Tot_Fert = np.c_[Y3[i,:],X_Fert[i,:,:]]
Tot_Imm = np.c_[Raw_Migrants[i,:],X_Migrant[i,:,:]]
fertname="STATA_Files/fertility_reg_country_"+str(i)+".csv"
mortname="STATA_Files/mortality_reg_country_"+str(i)+".csv"
immname="STATA_Files/immigration_reg_country_"+str(i)+".csv"
np.savetxt(immname,Tot_Imm,delimiter=',')
np.savetxt(fertname,Tot_Fert,delimiter=',')
np.savetxt(mortname,Tot_Mort,delimiter=',')
self.Mortality_Test=np.zeros((self.I,self.S,self.T))
self.Fertility_Test=np.zeros((self.I,self.S,self.T))
self.Migration_Test=np.zeros((self.I,self.S,self.T))
#Gets the polynomial fit then adjusts them based on the size of S.
for i in xrange(self.I):
for s in xrange(self.S):
if s in xrange(self.FirstDyingAge,self.S):
MTemp = Polynomial_Fit2(self.Mort_Output[i][0],s,self.S)
self.Mortality_Test[i,s,:] = 1-(1-np.exp(MTemp))**(100/self.S)
if s in xrange(self.FirstFertilityAge, self.LastFertilityAge+1):
FTemp = Polynomial_Fit2(self.Fert_Output[i][0],s,self.S)
self.Fertility_Test[i,s,:] = (1+ np.exp(FTemp))**(100/self.S)-1
if s in xrange(1,self.MaxImmigrantAge):
ITemp = Polynomial_Fit2(self.Migr_Output[i][0],s,self.S)
self.Migration_Test[i,s,:]=ITemp
if self.coeff:
for i in xrange(self.I):
print "Mortality Country " + str(i) +" coefficients", self.Mort_Output[i][0]
print "Fertility Country " + str(i) +" coefficients", self.Fert_Output[i][0]
print "Immigration Country " + str(i) +" coefficients", self.Migr_Output[i][0]
#We loop over each country to import its demographic data
for i in xrange(self.I):
#If the bool UseDiffDemog is True, we get the unique country index
#number for importing from the .CSVs
if self.UseDiffDemog:
index = self.I_dict[self.I_touse[i]]
#Otherwise we just only use the data for one specific country
else:
index = 0
if self.S>90:
#Importing the data and correctly storing it in our demographics matrices
self.N[i,:90,0] = np.loadtxt(("Data_Files/population.csv"),delimiter=',',\
skiprows=1, usecols=[index+1])[self.agestopull]*1000
for j in xrange(90, self.S):
self.N[i,j,0]=self.N[i,j-1,0]
self.N[i,j,0]*=(1-self.Mortality_Test[i,j,0])
else:
self.N[i,:,0] = np.loadtxt(("Data_Files/population.csv"),delimiter=',',\
skiprows=1, usecols=[index+1])[self.agestopull]*1000
self.all_FertilityRates[i,self.FirstFertilityAge:self.LastFertilityAge+1,\
:self.frange+self.T_1] = np.transpose(np.loadtxt(str("Data_Files/" +\
I_all[index] + "_fertility.csv"),delimiter=',',skiprows=1,\
usecols=(self.agestopull[self.FirstFertilityAge:self.LastFertilityAge+1]\
-22))[48-self.frange:48+self.T_1,:])
if self.S<=80:
self.MortalityRates[i,self.FirstDyingAge:,:self.T_1] = np.transpose(\
np.loadtxt(str("Data_Files/" + I_all[index] + "_mortality.csv")\
,delimiter=',',skiprows=1, \
usecols=(self.agestopull[self.FirstDyingAge:]-67))[:self.T_1,:])
self.Migrants[i,:self.MaxImmigrantAge,:self.T_1] = np.einsum("s,t->st",\
np.loadtxt(("Data_Files/net_migration.csv"),delimiter=',',skiprows=1,\
usecols=[index+1])[self.agestopull[:self.MaxImmigrantAge]]*100,\
np.ones(self.T_1))
#Gets initial population share
self.Nhat[:,:,0] = self.N[:,:,0]/np.sum(self.N[:,:,0])
#Increases fertility rates to account for different number of periods lived
self.all_FertilityRates = self.all_FertilityRates*80/self.S
self.MortalityRates = self.MortalityRates*80/self.S
#The last generation dies with probability 1
self.Mortality_Test[:,-1,:] = 1.0
self.MortalityRates[:,-1,:] = 1.0
#Gets steady-state values for all countries by taking the mean
#at year T_1-1 across countries
f_bar = np.mean(self.all_FertilityRates[:,:,self.frange+self.T_1-1], axis=0)
rho_bar = np.mean(self.MortalityRates[:,:,self.T_1-1], axis=0)
#Set to the steady state for every year beyond year T_1
self.all_FertilityRates[:,:,self.frange+self.T_1:] = np.tile(np.expand_dims\
(f_bar, axis=2), (self.I,1,self.T-self.T_1))
self.MortalityRates[:,:,self.T_1:] = np.tile(np.expand_dims(rho_bar, axis=2),\
(self.I,1,self.T-self.T_1))
#FertilityRates is exactly like all_FertilityRates except it begins
#at time t=0 rather than time t=-self.frange
self.FertilityRates[:,self.FirstFertilityAge:self.LastFertilityAge+1,:] =\
self.all_FertilityRates[:,self.FirstFertilityAge:self.LastFertilityAge+1,\
self.frange:]
#Adjusts the rates if the fit goes outside of 0,1
for i in xrange(self.I):
for s in xrange(self.S):
for t in xrange(self.T):
if self.Mortality_Test[i,s,t]>1. or np.isnan(self.Mortality_Test[i,s,t]):
self.Mortality_Test[i,s,t]=.99
def Demographics(self, demog_ss_tol, UseSSDemog=False):
"""
Description:
- This function calculates the population dynamics and steady state
from the imported data by doing the following:
1. For each year from now until year T, uses equations 3.11
and 3.12 to find the net population in a new year.
(Note however that after year T_1 the fertility, mortality,
and immigration rates are set to be the same across countries)
2. Divides the new population by the world population to get the
population share for each country and cohort
3. While doing steps 1-2, finds the immigration rate since the data
only gives us net migration
4. After getting the population dynamics until year T, we continue to
get population shares of future years beyond time T
as explained in steps 1-2 until it converges to a steady state
5. Stores the new steady state and non-steady state variables of
population shares and mortality in the OLG object
Inputs:
- UseSSDemog = Boolean: True uses the steady state demographics
in calculating the transition path.
Mostly used for debugging purposes
- demog_ss_tol = Scalar: The tolerance for the greatest
absolute difference between 2 years'
population shares before it is
considered to be the steady state
Variables Called from Object:
- self.N = Array: [I,S,T], Population of each country
for each age cohort and year
- self.Nhat = Array: [I,S,T], World opulation share of
each country for each age cohort and year
- self.FertilityRates = Array: [I,S,T], Fertility rates from the
present time to year T
- self.Migrants = Array: [I,S,T], Number of immigrants
- self.MortalityRates = Array: [I,S,T], Mortality rates of each
country for each age cohort and year
- self.I = Int: Number of Countries
- self.S = Int: Number of Cohorts
- self.T = Int: Number of Time Periods
- self.T_1 = Int: Transition year for the demographics
- self.ShowCompGraphs = Boolean: True activates showing the polynomial
fits for mortality, fertility and
immigration
Variables Stored in Object:
- self.ImmigrationRates = Array: [I,S,T], Immigration rates of each
country for each age cohort and year
- self.Kids = Array: [I,S,T], Matrix that stores the per-
household number of kids in each
country and each time period
- self.Kids_ss = Array: [I,S], Steady state per-household
number of kids for each country
at each age
- self.N = Array: [I,S,T], UPDATED population of each
country for each age cohort and year
- self.Nhat = Array: [I,S,T+S], UPDATED world population
share of each country for each
age cohort and year
- self.Nhat_ss = Array: [I,S], Population of each country
for each age cohort in the steady state
- self.Mortality_ss = Array: [I,S], Mortality rates of each country
for each age cohort in the steady state
- self.MortalityRates = Array: [I,S,T+S], UPDATED mortality rates
of each country for each age cohort
and year
- self.Migrant_Test = Array: [I,S,T], Copy of Migration_Test. Contains
the fitted values from the Migration
regression
Other Functions Called:
- None
Objects in Function:
- pop_old = Array: [I,S,T], Population shares in a
given year beyond T that is compared
with pop_new to determine the steady state
- pop_new = Array: [I,S,T], Population shares in a given
year beyond T that is compared with
pop_old to determine the steady state
- kidsvec = Array: [I,f_range], extracts each cohorts
number of kids in each period
- future_year_iter = Int: Counter that keeps track of how many
years beyond T it take for the
population shares to converge to
the steady state
- label1 = String: Label for the plot of the polynomial
fit.
- label2 = String: Label for the original plots.
- filesave = String: Desired filename to save Mortality
graphs.
- filesave2 = String: Desired filename to save Fertility
graphs.
- filesave3 = String: Desired filename to save Migration
graphs.
Outputs:
- None
"""
#Initializes immigration rates
self.ImmigrationRates = np.zeros((self.I,self.S,self.T))
self.Kids=np.zeros((self.I,self.S,self.T))
self.Migrant_Test=np.copy(self.Migration_Test)
#Getting the population and population shares from the present to year T
for t in xrange(1,self.T):
#Gets new babies born this year (Equation 3.11)
self.N[:,0,t] = np.sum((self.N[:,:,t-1]*self.FertilityRates[:,:,t-1]), axis=1)
#Get the immigration RATES for the past year
#If before the transition year T_1, just divide total migrants by population
if t <= self.T_1:
self.ImmigrationRates[:,:,t-1] = self.Migrants[:,:,t-1]/\
self.N[:,:,t-1]*80/self.S
#FIX NUMBER 1
self.Migration_Test[:,:,t-1] = self.Migrant_Test[:,:,t-1]/\
self.N[:,:,t-1]*100/self.S
#If beyond the transition year T_1, average the immigration rates
#in year T_1 itself
else:
self.ImmigrationRates[:,:,t-1] = np.mean(\
self.ImmigrationRates[:,:,self.T_1-1],axis=0)*80/self.S
#FIX NUMBER 2
self.Migration_Test[:,:,t-1] = np.mean(self.Migrant_Test[:,:,self.T_1-1])\
*100/self.S
#Gets the non-newborn population for the next year (Equation 3.12)
self.N[:,1:,t] = self.N[:,:-1,t-1]*(1+self.ImmigrationRates[:,:-1,t-1]-\
self.MortalityRates[:,:-1,t-1])
#Gets the population share by taking a fraction of the total
#world population this year
self.Nhat[:,:,t] = self.N[:,:,t]/np.sum(self.N[:,:,t])
#Gets the number of kids each agent has in this period
for s in xrange(self.FirstFertilityAge,self.LastFertilityAge+self.LeaveHouseAge):
kidsvec = np.diagonal(self.all_FertilityRates[:,s-self.LeaveHouseAge+1:s+1,\
t:t+self.LeaveHouseAge],axis1=1, axis2=2)
self.Kids[:,s,t-1] = np.sum(kidsvec,axis=1)
#Gets Immigration rates for the final year
self.ImmigrationRates[:,:,-1] = np.mean(self.ImmigrationRates[:,:,self.T_1-1],\
axis=0)*80/self.S
#FIX NUMBER 3
self.Migration_Test[:,:,-1] = np.mean(self.Migrant_Test[:,:,self.T_1-1])*100/self.S
#Gets Kids for the final year (just the steady state)
self.Kids[:,:,-1] = self.Kids[:,:,-2]
#Initialize iterating variables to find the steady state population shares
pop_old = self.N[:,:,-1]
pop_new = self.N[:,:,-1]
future_year_iter = 0
if self.UseCalcDemog:
self.MortalityRates=self.Mortality_Test
self.FertilityRates=self.Fertility_Test
self.ImmigrationRates=self.Migration_Test
#Calculates new years of population shares until the greatest absolute
#difference between 2 consecutive years is less than demog_ss_tol
while np.max(np.abs(self.Nhat[:,:,-1] - self.Nhat[:,:,-2])) > demog_ss_tol:
pop_new[:,0] = np.sum((pop_old[:,:]*self.FertilityRates[:,:,-1]),axis=1)
pop_new[:,1:] = pop_old[:,:-1]*(1+self.ImmigrationRates[:,:-1,-1]-\
self.MortalityRates[:,:-1,-1])
self.Nhat = np.dstack((self.Nhat,pop_new/np.sum(pop_new)))
future_year_iter += 1
#Stores the steady state year in a seperate matrix
self.Nhat_ss = self.Nhat[:,:,-1]
self.Mortality_ss=self.MortalityRates[:,:,-1]
self.Kids_ss = self.Kids[:,:,-1]
#Deletes all the years between t=T and the steady state calculated in the while loop
self.Nhat = self.Nhat[:,:,:self.T]
#Imposing the ss for years after self.T
self.Nhat = np.dstack(( self.Nhat[:,:,:self.T], np.einsum("is,t->ist",\
self.Nhat_ss,np.ones(self.S)) ))
#Imposing the ss for years after self.T
self.MortalityRates = np.dstack(( self.MortalityRates[:,:,:self.T],\
np.einsum("is,t->ist",self.Mortality_ss, np.ones(self.S)) ))
#Imposing the ss for years after self.T
self.Kids = np.dstack(( self.Kids[:,:,:self.T], np.einsum("is,t->ist",\
self.Kids_ss, np.ones(self.S)) ))
#Overwrites all the years in the transition path with the steady state if
#UseSSDemog == True
if UseSSDemog == True:
self.Nhat = np.einsum("is,t->ist",self.Nhat_ss,np.ones(self.T+self.S))
self.MortalityRates = np.einsum("is,t->ist",self.Mortality_ss,\
np.ones(self.T+self.S))
self.Kids = np.einsum("is,t->ist",self.Kids_ss,np.ones(self.T+self.S))
if self.ShowCompGraphs:
plt.title("Regression Fit Mortality Year 0 "+str(self.S)+" Gens")
for i in xrange(self.I):
label1 = "Country "+str(i)+" New Fit"
label2 = "Country "+str(i)+" Old Style"
plt.plot(xrange(self.FirstDyingAge,self.S),\
self.Mortality_Test[i,self.FirstDyingAge:self.S,0],label=label1)
# plt.plot(xrange(self.FirstDyingAge,self.S),\
# self.MortalityRates[i,self.FirstDyingAge:self.S,0],label=label2)
plt.legend(loc='upper left')
plt.title("Mortality Rate fit"+str(self.S)+" Gens")
plt.xlabel('Ages')
plt.ylabel('Mortality Rates')
filesave = "MortalityGraphs/Mortality_"+str(self.S)+"gens.png"
savefig(filesave)
plt.show()
plt.title("Regression Fit Fertility Year 0 "+str(self.S)+" Gens")
for i in xrange(self.I):
label1 = "Country "+str(i)+ " New Fit"
label2 = "Country "+str(i)+" Old Style"
plt.plot(xrange(self.FirstFertilityAge,self.LastFertilityAge),\
self.Fertility_Test[i,self.FirstFertilityAge:\
self.LastFertilityAge,0],label=label1)
#plt.plot(xrange(self.FirstFertilityAge,self.LastFertilityAge),\
#self.FertilityRates[i,self.FirstFertilityAge:\
#self.LastFertilityAge,0],label=label2)
plt.legend()
plt.title("FertilityGraphs/Fertility Rate fit"+str(self.S)+" Gens")
plt.xlabel('Ages')
plt.ylabel('Fertility Rates')
filesave2 = "FertilityGraphs/Fertility_"+str(self.S)+"gens.png"
savefig(filesave2)
plt.show()
plt.title("Regression Fit Immigration Year 0 "+str(self.S)+" Gens")
for i in xrange(self.I):
label1 = "Country "+str(i)+ " New Fit"
label2 = "Country "+str(i)+" Old Style"
plt.plot(xrange(self.MaxImmigrantAge),\
self.Migration_Test[i,:self.MaxImmigrantAge,0],label=label1)
#plt.plot(xrange(self.MaxImmigrantAge),\
#self.ImmigrationRates[i,:self.MaxImmigrantAge,0],label=label2)
plt.legend()
plt.title("ImmigrationGraphs/Immgration fit"+str(self.S)+" Gens")
plt.xlabel('Ages')
plt.ylabel('Immigration')
filesave3 = "ImmigrationGraphs/Immigration_"+str(self.S)+"gens.png"
savefig(filesave3)
plt.show()
def plotDemographics(self, T_touse="default", compare_across="T", data_year=0):
"""
Description: This calls the plotDemographics function from the
AuxiliaryDemographics.py file. See it for details
"""
ages = self.LeaveHouseAge, self.FirstFertilityAge, self.LastFertilityAge, \
self.FirstDyingAge, self.MaxImmigrantAge
datasets = self.FertilityRates, self.MortalityRates, self.ImmigrationRates,\
self.Nhat, self.Kids
#Calls the Auxiliary Demographics file for this function
demog.plotDemographics(ages, datasets, self.I, self.S, self.T, self.I_touse,\
T_touse, compare_across, data_year)
def immigrationplot(self):
subplotdim_dict = {2:221, 3:221, 4:221, 5:231, 6:231, 7:241}
colors = ["blue","green","red","cyan","purple","yellow","brown"]
fig = plt.figure()
fig.suptitle("Immigration Rates")
for i in range(self.I):
ax = fig.add_subplot(subplotdim_dict[self.I]+i, projection='3d')
S, T = np.meshgrid(range(self.S), range(self.T))
ax.plot_surface(S, T, np.transpose(self.ImmigrationRates[i,:,:self.T]),\
color=colors[i])
ax.set_zlim3d(np.min(self.ImmigrationRates[i,:,:self.T]), \
np.max(self.ImmigrationRates[:,:,:self.T])*1.05)
ax.set_title(self.I_touse[i])
ax.set_xlabel('S')
ax.set_ylabel('T')
plt.show()
#STEADY STATE
#Getter functions
def get_w(self,y,n):
"""
Description:
-Calculates the wage rates based on equation (3.15)
Inputs:
- y = Array: [I,S,T+S] or [I,S], Domestic owned capital path
for either the transition path or steady-state.
- n = Array: [I,S,T+S] or [I,S], Aggregate labor productivity
for either the transition path or the steady steady-state
Variables Called from Object:
- self.alpha = Scalar: Capital share of production
Variables Stored in Object:
- None
Other Functions Called:
- None
Objects in Function:
- None
Outputs:
- w = Array: [I,S,T+S] or [I,S], wages for either the transition
path or the steady steady-state
"""
w = (1-self.alpha)*(y/n)
return w
def get_r(self, y, k):
"""
Description:
-Calculates the interest rates based on equation.
Inputs:
- y = Array: [I,S,T+S] or [I,S], Domestic owned capital path
for either the transition path or steady-state.
- k = Array: [I,S,T+S] or [I,S], Aggregate labor productivity
for either the transition path or the steady steady-state
Variables Called from Object:
- self.alpha = Scalar: Capital share of production
Variables Stored in Object:
- None
Other Functions Called:
- None
Objects in Function:
- None
Outputs:
- r = Array: [I,S,T+S] or [I,S], interest rates for either the
transition path or the steady steady-state
"""
r = self.alpha*(y/k)
return r
def get_Y(self, k, n):
"""
Description:
-Calculates the aggregate output based on equation (3.15)
Inputs:
- k = Array: [I,S,T+S] or [I,S], Domestic owned capital path
for either the transition path or steady-state.
- n = Array: [I,S,T+S] or [I,S], Aggregate labor productivity
for either the transition path or the steady steady-state
Variables Called from Object:
- self.A = Array: [I], Technology level for each country
- self.alpha = Scalar: Capital share of production
Variables Stored in Object:
- None
Other Functions Called:
- None
Objects in Function:
- None
Outputs:
- Y = Array: [I,S,T+S] or [I,S], Total output from
firms for either the transition path or the
steady steady-state
"""
if k.ndim ==1:
Y = (k**self.alpha) * ((self.A*n)**(1-self.alpha))
elif k.ndim== 2:
Y = (k**self.alpha) * (np.einsum("i,is->is",self.A,n)**(1-self.alpha))
return Y
#Steady State Calclations
def get_lifetime_decisionsSS(self, c_1, w_ss, r_ss, bq_ss):
"""
Description:
- 1. Solves for future consumption decisions as a function of
initial consumption (Equation 3.22)
- 2. Solves for savings decisions as a function of consumption
decisions and previous savings decisions (Equation 3.19)
Inputs:
- cK_1 = Array: [I], Kids Consumption of
first cohort for each country
- Gamma_ss = Array: [I,S], Gamma variable, used
in Equation 4.22
- w_ss = Array: [I], Steady state wage rate
- r_ss = Scalar: Steady-state intrest rate
Variables Called from Object:
- self.e_ss = Array: [I,S], Labor produtivities for
the Steady State
- self.Mortality_ss = Array: [I,S], Mortality rates of each
country for each age cohort in
the steady state
- self.I = Int: Number of Countries
- self.S = Int: Number of Cohorts
- self.beta = Scalar: Calculated overall future
discount rate
- self.chi = Scalar: Leisure preference parameter
- self.delta = Scalar: Calulated overall depreciation rate
- self.g_A = Scalar: Growth rate of technology
- self.sigma = Scalar: Rate of Time Preference
Variables Stored in Object:
- None
Other Functions Called:
- None
Objects in Function:
- None
Outputs:
- avec_ss = Array: [I,S+1], Vector of steady
state assets
- cKvec_ss = Array: [I,S], Vector of steady state
kids consumption
- cvec_ss = Array: [I,S], Vector of steady state
consumption
"""
cKvec_ss = np.zeros((self.I,self.S))
cvec_ss = np.zeros((self.I,self.S))
lhat_ss = np.zeros((self.I,self.S))
avec_ss = np.zeros((self.I,self.S+1))
we = np.einsum("i,is->is",w_ss,self.e_ss)
cvec_ss[:,0] = c_1
cKvec_ss[:,0] = c_1*self.chik**(1/self.sigma)
lhat_ss[:,0] = (1.0-np.abs(1.0+((cvec_ss[:,0]**(-self.sigma)*we[:,0])\
/self.chil)**(self.mu/(1-self.mu)))**(-1/self.mu))*self.lbar_ss
for s in xrange(self.S-1):
#Equation 4.24
cvec_ss[:,s+1] = ((self.beta*(1-self.Mortality_ss[:,s])*\
(1+r_ss-self.delta))**(1/self.sigma))*cvec_ss[:,s]\
*np.exp(-self.g_A)
#Equation 4.22
cKvec_ss[:,s+1] = cvec_ss[:,s+1]*self.chik**(1/self.sigma)
#Equation 4.21
lhat_ss[:,s+1] = self.lbar_ss*(1.0-np.abs(1.0+((cvec_ss[:,s+1]**(-self.sigma)\
*we[:,s+1])/self.chil)**(self.mu/(1-self.mu)))**(-1/self.mu))
#Equation 4.23
avec_ss[:,s+1] = np.exp(-self.g_A)*(we[:,s]*(self.lbar_ss-lhat_ss[:,s])+\
(1+r_ss-self.delta)*avec_ss[:,s]+bq_ss[:,s]-cvec_ss[:,s]-\
self.Kids_ss[:,s]*cKvec_ss[:,s])
#Equation 4.23 for final assets
avec_ss[:,s+2] = np.exp(-self.g_A)*(we[:,s+1]*(self.lbar_ss-lhat_ss[:,s+1])+\
(1+r_ss-self.delta)*avec_ss[:,s+1]+bq_ss[:,s+1]-cvec_ss[:,s+1]-\
self.Kids_ss[:,s+1]*cKvec_ss[:,s+1])
return cvec_ss, cKvec_ss, lhat_ss, avec_ss
def GetSSComponents(self, k_guess,kf_guess,n_guess, bq_ss, PrintSSEulErrors=False):
"""
Description:
- Solves for all the other variables in the model using bq_ss and r_ss
Inputs:
- bq_ss = Array: [I,S],
- r_ss = Scalar: Steady-state intrest rate
Variables Called from Object:
- self.A = Array: [I], Technology level for each country
- self.e_ss = Array: [I,S], Labor produtivities for the
Steady State
- self.Nhat_ss = Array: [I,S,T+S], World population share
of each country for each age cohort
and year
- self.I = Int: Number of Countries
- self.alpha = Scalar: Capital share of production
Variables Stored in Object:
- None
Other Functions Called:
- get_lhat = Solves for leisure as in Equation 4.24
- get_n = Solves for labor supply as in Equation 4.17
- get_Gamma = Solves for the Gamma variable as in Equation 4.22
- get_Y = Solves for output as in Equation 4.18
- householdEuler_SS = System of Euler equations to solve the
household problem. Used by opt.fsolve
Objects in Function:
- avec_ss = Array: [I,S], Steady state assets holdings
for each country and cohort
- cKvec_ss = Array: [I,S], Steady state kids consumption
for each country and cohort
- cvec_ss = Array: [I,S], Steady state consumption for
each country and cohort
- c1_guess = Array: [I,S], Initial guess for consumption
of the youngest cohort
- kd_ss = Array: [I], Steady state total capital holdings
for each country
- kf_ss = Array: [I], Steady state foreign capital in
each country
- lhat_ss = Array: [I,S], Steady state leisure decision
for each country and cohort
- n_ss = Array: [I], Steady state labor supply
- opt_c1 = Array: [I,S], Optimal consumption of the
youngest cohort
- Gamma_ss = Array: [I,S], Steady state Gamma variable
(see equation 4.22)
- w_ss = Array: [I], Steady state wage rate
- y_ss = Array: [I], Steady state output of each country
Outputs:
- w_ss, cvec_ss, cKvec_ss, avec_ss, kd_ss, kf_ss, n_ss, y_ss, and lhat_ss
"""
def householdEuler_SS(c_1, w_ss, r_ss, bq_ss):
"""
Description:
- This is the function called by opt.fsolve.
Will stop iterating until a correct value of initial
consumption for each country makes the final assets holdings
of each country equal to 0
Inputs:
- cK_1 = Array: [I], Kids Consumption of
first cohort for each country
- psi_ss = Array: [I,S], Psi variable, used in
Equation 3.21
- w_ss = Array: [I], Steady state wage rate
- r_ss = Scalar: Steady-state intrest rate
Variables Called from Object:
- None
Variables Stored in Object:
- None
Other Functions Called:
- get_lifetimedecisionsSS = calls the above function for the purpose
of solving for its root in an fsolve.
Objects in Function:
- cpath = Array: [I,S], Vector of steady state
consumption
- cK_path = Array: [I,S], Vector of steady state
kids consumption
- aseets_path = Array: [I,S+1], Vector of steady state assets
Outputs:
- Euler = Array: [I], Final assets for each country.
Must = 0 for system to solve
"""
cpath, cK_path, lpath, assets_path = self.get_lifetime_decisionsSS\
(c_1, w_ss, r_ss, bq_ss)
Euler = assets_path[:,-1]
if np.any(cpath<0):
print "WARNING! The fsolve for initial optimal consumption guessed\
a negative number"
Euler = np.ones(Euler.shape[0])*9999.
return Euler
def checkSSEulers(cvec_ss, cKvec_ss, lhat_ss, avec_ss, w_ss, r_ss, bq_ss):
"""
Description:
-Verifies the Euler conditions are statisified for solving for the steady
Inputs:
- cvec_ss = Array: [I,S], Steady state consumption
for each country and cohort
- cKvec_ss = Array: [I,S], Steady state kids consumption
for each country and cohort
- avec_ss = Array: [I,S], Steady state assets holdings
for each country and cohort
- w_ss = Array: [I], Steady state wage rate
- r_ss = Scalar: Steady state interest rate
- bq_ss = Array: [I,S], Steady state bequests level
- Gamma_ss = Array: [I,S], Steady state shorthand
variable, See 4.22
Variables Called from Object:
- self.avec_ss = Array: [I,S], Steady state assets
- self.bqvec_ss = Array: [I,S], Distribution of bequests in
the steady state
- self.cKvec_ss = Array: [I,S], Steady state kids' consumption
- self.cvec_ss = Array: [I,S], Steady state consumption
- self.e_ss = Array: [I,S], Labor produtivities for
the Steady State
- self.Gamma_ss = Array: [I,S], Steady state value of shorthand
calculation variable
- self.Mortality_ss = Array: [I,S], Mortality rates of each country
for each age cohort in the steady state
- self.w_ss = Array: [I], Steady state wage rate
- self.beta = Scalar: Calculated overall future discount rate
- self.delta = Scalar: Calulated overall depreciation rate
- self.g_A = Scalar: Growth rate of technology
- self.r_ss = Scalar: Steady state intrest rate
- self.sigma = Scalar: Rate of Time Preference
Variables Stored in Object:
- None
Other Functions Called:
- None
Objects in Function:
- we = Array: [I,S], Matrix product of w and e
Outputs:
- None
"""
we = np.einsum("i,is->is",w_ss,self.e_ss)
r_ss2 = np.einsum("i,s->is",r_ss,np.ones(self.S-1))
r_ss3 = np.einsum("i,s->is",r_ss,np.ones(self.S))
Household_Euler = avec_ss[:,-1]
Chained_C_Condition = cvec_ss[:,:-1]**(-self.sigma) - self.beta*\
(1-self.Mortality_ss[:,:-1])*(cvec_ss[:,1:]*np.exp(self.g_A))**\
(-self.sigma) * (1+r_ss2-self.delta)
Leisure_Condition = lhat_ss - self.lbar_ss*(1-np.abs(1+((cvec_ss**(-self.sigma)\
*we)/self.chil)**(self.mu/(1-self.mu)))**(-1/self.mu))
Modified_Budget_Constraint = cvec_ss-(we*(self.lbar_ss-lhat_ss) +\
(1+r_ss3-self.delta)*avec_ss[:,:-1] + bq_ss - avec_ss[:,1:]*\
np.exp(self.g_A)-self.Kids_ss*cKvec_ss)
Consumption_Ratio = cKvec_ss - cvec_ss*self.chik**(1/self.sigma)
return Household_Euler, Chained_C_Condition, Modified_Budget_Constraint,\
Consumption_Ratio, Leisure_Condition
y_ss = self.get_Y(k_guess,n_guess)
w_ss = self.get_w(y_ss,n_guess)
r_ss = self.get_r(y_ss,k_guess)
#Initial guess for the first cohort's kids consumption
c1_guess = self.innerguess
#Finds the optimal kids consumption for the first cohort
opt_c1 = opt.fsolve(householdEuler_SS,c1_guess,args =(w_ss, r_ss, bq_ss))
#Gets the optimal paths for consumption, kids consumption and assets
#as a function of the first cohort's consumption
cvec_ss, cKvec_ss, lhat_ss, avec_ss = self.get_lifetime_decisionsSS\
(opt_c1, w_ss, r_ss, bq_ss)
if PrintSSEulErrors:
Household_Euler,Chained_C_Condition,Modified_Budget_Constraint,\
Consumption_Ratio, Leisure_Condition= \
checkSSEulers(cvec_ss, cKvec_ss, lhat_ss, avec_ss, w_ss,r_ss, bq_ss)
print "\nZero final period assets satisfied:", \
np.isclose(np.max(np.absolute(Household_Euler)), 0)
print "Chained C satisfied:", \
np.isclose(np.max(np.absolute(Chained_C_Condition)), 0)
print "Budget Constraint satisfied:",\
np.isclose(np.max(np.absolute(Modified_Budget_Constraint)), 0)
print "Consumption Ratio satisfied",\
np.isclose(np.max(np.absolute(Consumption_Ratio)), 0)
print "Leisure Condition satisfied",\
np.isclose(np.max(np.absolute(Leisure_Condition)),0)
#print Chained_C_Condition[0,:]
#print Modified_Budget_Constraint[0,:]
#Snips off the final entry of assets since it is just 0 if
#the equations solved correctly
avec_ss = avec_ss[:,:-1]
return w_ss, cvec_ss, cKvec_ss, avec_ss, r_ss, y_ss, lhat_ss
def EulerSystemSS(self, guess, PrintSSEulErrors=False):
"""
Description:
- System of Euler equations that must be satisfied (or = 0)
for the ss to solve.
Inputs:
- guess = Array: [I+1], Contains guesses for individual
bequests in each country and the guess
for the world intrest rate
- PrintSSEulErrors = Boolean: True prints the Euler Errors in each
iteration of calculating the steady
state
Variables Called from Object:
- self.Mortality_ss = Array: [I,S], Mortality rates of each country
for each age cohort in the steady state
- self.Nhat_ss = Array: [I,S,T+S], World population share of
each country for each age cohort and year
- self.FirstDyingAge = Int: First age where mortality rates effect
agents
- self.FirstFertilityAge = Int: First age where agents give birth
- self.I = Int: Number of Countries
- self.S = Int: Number of Cohorts
Variables Stored in Object:
- None
Other Functions Called:
- GetSSComponents = System of equations that solves for wages, consumption,
assets capital stocks, labor input, domestic output,
and leisure in terms of the world intrest rate and bequests
Objects in Function:
- alldeadagent_assets = Array: [I], Sum of assets of all the individuals
who die in the steady state. Evenly
distributed to eligible-aged cohorts.
- avec_ss = Array: [I,S], Current guess for the ss assets
holdings for each country and cohort
- bqindiv_ss = Array: [I], Current guess for the amount of
bequests each eligible-aged individual
will receive in each country
- bq_ss = Array: [I,S], Vector of bequests received
for each cohort and country. Basically
bqindiv_ss copied for each eligible-aged
individual.
- cKvec_ss = Array: [I,S], Current guess for ss kids'
consumption for each country and cohort.
- cvec_ss = Array: [I,S], Current guess for ss consumption
for each country and cohort
- kd_ss = Array: [I], Current guess for ss total
domestically-held capital for each country
- kf_ss = Array: [I], Current guess for ss foreign
capital in each country
- lhat_ss = Array: [I,S], Current guess for ss leisure
decision for each country and cohort.
- n_ss = Array: [I], Current guess for ss labor supply
- w_ss = Array: [I], Current guess for each countries
ss wage rate as a function of r_ss
and bqvec_ss
- y_ss = Array: [I], Current guess for ss output of
each country
- r_ss = Scalar: Current guess for the steady-state
intrest rate
- Euler_bq = Array: [I], Distance between bqindiv_ss and
the actual bqindiv_ss calculated in
the system. Must = 0 for the ss to
correctly solve.
- Euler_kf = Scalar: Sum of the foreign capital stocks.
Must = 0 for the ss to correctly solve
Outputs:
- Euler_all = Array: [I+1], Euler_bq and Euler_kf stacked
together. Must = 0 for the ss to
correctly solve
"""
self.B = 2*self.I-1
self.C = 3*self.I-1
k_guess = guess[:self.I]
kf_guess = guess[self.I:self.B]
n_guess = guess[self.B:self.C]
bqindiv_ss = guess[self.C:]
kf_full = np.zeros(self.I)
kf_full[0] = -np.sum(kf_guess)
kf_full[1:] = kf_guess
#Initializes a vector of bequests received for each individial.
#Will be = 0 for a block of young and a block of old cohorts
bq_ss = np.zeros((self.I,self.S))
bq_ss[:,self.FirstFertilityAge:self.FirstDyingAge]=\
np.einsum("i,s->is", bqindiv_ss, \
np.ones(self.FirstDyingAge-self.FirstFertilityAge))
#Calls self.GetSSComponents, which solves for all the other ss variables
#in terms of bequests and intrest rate
w_ss, cvec_ss, cKvec_ss, avec_ss, r_ss, y_ss, lhat_ss = \
self.GetSSComponents(k_guess, kf_guess,n_guess,bq_ss, PrintSSEulErrors)
#Sum of all assets holdings of dead agents to be distributed
#evenly among all eligible agents
alldeadagent_assets = np.sum(avec_ss[:,self.FirstDyingAge:]*\
self.Mortality_ss[:,self.FirstDyingAge:]*self.Nhat_ss[:,self.FirstDyingAge:]\
, axis=1)
total_bq = np.sum(self.Nhat_ss[:,self.FirstFertilityAge:self.FirstDyingAge],axis=1)
#Equation 4.20
Euler_bq = bqindiv_ss - alldeadagent_assets/total_bq
#Equation 4.15
Euler_kd = k_guess-kf_full-np.sum(avec_ss*self.Nhat_ss,axis=1)
#Equation 4.16
Euler_n = n_guess - np.sum(self.e_ss*(self.lbar_ss-lhat_ss)*self.Nhat_ss,axis=1)
#Equation 4.25
Euler_kf = r_ss[1:] - r_ss[0]*np.ones(self.I-1)
Euler_all = np.concatenate((Euler_kd,Euler_kf,Euler_n,Euler_bq))
if PrintSSEulErrors: print "Euler Errors:", Euler_all
return Euler_all
def SteadyState(self, k_ss_guess,kf_ss_guess,n_ss_guess, bqss_guess,\
c_guess, PrintSSEulErrors=False):
"""
Description:
- Finds the steady state of the OLG Model by doing the following:
1. Searches over values of r and bq that satisfy Equations 3.19 and 3.24
2. Uses the correct ss values of r and bq to find all the other
ss variables
3. Checks to see of the system has correctly solved
Inputs:
- bqindiv_ss_guess = Array: [I], Initial guess for ss bequests
that each eligible-aged individual
will receive
- PrintSSEulErrors = Boolean: True prints the Euler Errors in
each iteration of calculating the
steady state
- rss_guess = Scalar: Initial guess for the ss intrest rate
Variables Called from Object:
- self.I = Int: Number of Countries
- self.FirstFertilityAge = Int: First age where agents give birth
- self.FirstDyingAge = Int: First age where agents begin to die
- self.S = Int: Number of Cohorts
Variables Stored in Object:
- self.avec_ss = Array: [I,S], Steady state assets
- self.bqindiv_ss = Array: [I], Bequests that each eligible-aged
individual will receive in the steady
state
- self.bqvec_ss = Array: [I,S], Distribution of bequests in
the steady state
- self.cKvec_ss = Array: [I,S], Steady State kid's consumption
- self.cvec_ss = Array: [I,S], Steady state consumption
- self.kd_ss = Array: [I], Steady state total domestically-
owned capital holdings for each country
- self.kf_ss = Array: [I], Steady state foreign capital in
each country
- self.lhat_ss = Array: [I,S], Steady state leisure decision
for each country and cohort
- self.n_ss = Array: [I], Steady state aggregate labor
productivity in each country
- self.Gamma_ss = Array: [I,S], Steady state value of shorthand
calculation variable
- self.w_ss = Array: [I], Steady state wage rate
- self.y_ss = Array: [I], Steady state output in each country
- self.r_ss = Scalar: Steady state intrest rate
Other Functions Called:
- self.EulerSystemSS = Initiates the whole process of solving for the steady
state, starting with this function
- self.GetSSComponenets = Once the bequests and interest rates are solved for,
this function gives us we the implied individual
pieces would be. Then, we have those pieces stored
in the object.
- self.get_Gamma = given wage and productivity paths, this function calculates
the shorthand variable path.
Objects in Function:
- alldeadagent_assets = Array: [I], Sum of assets of all the
individuals who die in the steady state.
Evenly distributed to eligible-aged
cohorts.
- Euler_bq = Array: [I], Distance between bqindiv_ss and
the actual bqindiv_ss calculated in
the system. Must = 0 for the ss to
correctly solve.
- Euler_kf = Scalar: Sum of the foreign capital stocks.
Must = 0 for the ss to correctly solve
Outputs:
- None
"""
self.ss_iter = 0
#Prepares the initial guess for the fsolve
self.innerguess = c_guess
guess = np.concatenate((k_ss_guess,kf_ss_guess,n_ss_guess,bqss_guess))
#Searches over bq and r to find values that satisfy the Euler Equations (3.19 and 3.24)
ss = opt.fsolve(self.EulerSystemSS, guess, args=PrintSSEulErrors)
#Breaking up the output into its 4 components
self.k_ss = ss[:self.I]
kf_temp_ss = ss[self.I:self.B]
self.n_ss = ss[self.B:self.C]
self.bqindiv_ss = ss[self.C:]
self.kf_ss = np.zeros((self.I))
self.kf_ss[0] = -np.sum(kf_temp_ss)
self.kf_ss[1:] = kf_temp_ss
#Initializes a vector for bequests distribution. Will be = 0 for a block of
#young and a block of old cohorts who don't get bequests
self.bqvec_ss = np.zeros((self.I,self.S))
self.bqvec_ss[:,self.FirstFertilityAge:self.FirstDyingAge]=\
np.einsum("i,s->is",self.bqindiv_ss,\
np.ones(self.FirstDyingAge-self.FirstFertilityAge))
#Calls self.GetSSComponents, which solves for all the other ss variables
#in terms of bequests and intrest rate
self.w_ss,self.cvec_ss,self.cKvec_ss,self.avec_ss,self.r_ss,self.y_ss,self.lhat_ss =\
self.GetSSComponents(self.k_ss,self.kf_ss,self.n_ss,self.bqvec_ss)
#Sum of all assets holdings of dead agents to be distributed evenly
#among all eligible agents
alldeadagent_assets = np.sum(self.avec_ss[:,self.FirstDyingAge:]*\
self.Mortality_ss[:,self.FirstDyingAge:]*self.Nhat_ss[:,self.FirstDyingAge:],\
axis=1)
print "\n\nSTEADY STATE FOUND!"
#Checks to see if the Euler_bq and Euler_kf equations are sufficiently close to 0
if self.CheckerMode==False:
#Equation 3.29
Euler_bq = self.bqindiv_ss - alldeadagent_assets/\
np.sum(self.Nhat_ss[:,self.FirstFertilityAge:self.FirstDyingAge],axis=1)
Euler_kd = self.k_ss - self.kf_ss - np.sum(self.avec_ss*self.Nhat_ss,axis=1)
Euler_n = self.n_ss - \
np.sum(self.e_ss*(self.lbar_ss-self.lhat_ss)*self.Nhat_ss,axis=1)
#Equation 3.24
Euler_kf = self.r_ss[1:]-self.r_ss[0]*np.ones(self.I-1)
print "-Euler for bq satisfied:", np.isclose(np.max(np.absolute(Euler_bq)), 0)
print "-Euler bq", Euler_bq
print "-Euler for kd satisfied:", np.isclose(np.max(Euler_kd),0)
print "-Euler kd", Euler_kd
print "-Euler for n satisfied:", np.isclose(np.max(Euler_n),0)
print "-Euler n", Euler_n
print "-Euler for r satisfied:", np.isclose(np.max(Euler_kf), 0)
print "-Euler r", Euler_kf, "\n\n"
def PrintSSResults(self):
"""
Description:
-Prints the final result of steady state calculations
Inputs:
- None
Variables Called from Object:
- self.avec_ss = Array: [I,S], Steady state assets
- self.cK_vec_ss = Array: [I,S], Steady state kids consumption
- self.cvec_ss = Array: [I,S], Steady state consumption
- self.kf_ss = Array: [I], Steady state foreign capital
in each country
- self.kd_ss = Array: [I], Steady state total capital
holdings for each country
- self.n_ss = Array: [I], Steady state aggregate productivity
in each country
- self.w_ss = Array: [I], Steady state wage rate
- self.y_ss = Array: [I], Steady state output in each country
- self.r_ss = Scalar: Steady state intrest rate
Variables Stored in Object:
- None
Other Functions Called:
- None
Objects in Function:
- None
Outputs:
- None
"""
print "assets steady state:", self.avec_ss
print "kf steady state", self.kf_ss
print "k steady state", self.k_ss
print "bq steady state", self.bqindiv_ss
print "n steady state", self.n_ss
print "y steady state", self.y_ss
print "r steady state", self.r_ss
print "w steady state", self.w_ss
print "c_vec steady state", self.cvec_ss
print "cK_vec steady state", self.cKvec_ss
def plotSSResults(self):
"""
Description:
- Plots the final calculations of the Steady State
Inputs:
- None
Variables Called from Object:
- self.avec_ss = Array: [I,S], Steady state assets
- self.bqvec_ss = Array: [I,S], Distribution of bequests in
the steady state
- self.cKvec_ss = Array: [I,S], Steady state kids consumption
- self.cvec_ss = Array: [I,S], Steady state consumption
- self.I = Int: Number of Countries
- self.S = Int: Number of Cohorts
Variables Stored in Object:
- None
Other Functions Called:
- None
Objects in Function:
- None
Outputs:
- None
"""
plt.title("Steady state")
plt.subplot(231)
for i in range(self.I):
plt.plot(range(self.S),self.cvec_ss[i,:])
plt.title("Consumption")
#plt.legend(self.I_touse[:self.I])
plt.subplot(232)
for i in range(self.I):
plt.plot(range(self.S),self.cKvec_ss[i,:])
plt.title("Kids' Consumption")
#plt.legend(self.I_touse[:self.I])
#plt.show()
plt.subplot(233)
for i in range(self.I):
plt.plot(range(self.S),self.avec_ss[i,:])
plt.title("Assets")
#plt.legend(self.I_touse[:self.I])
#plt.show()
plt.subplot(234)
for i in range(self.I):
plt.plot(range(self.S),self.lhat_ss[i,:])
plt.title("Leisure")
#plt.legend(self.I_touse[:self.I])
#plt.show()
plt.subplot(235)
for i in range(self.I):
plt.plot(range(self.S),self.bqvec_ss[i,:])
plt.title("Bequests")
#plt.legend(self.I_touse[:self.I])
plt.show()
#TIMEPATH-ITERATION
def set_initial_values(self, r_init, bq_init, a_init):
"""
Description:
- Saves the initial guesses of r, bq and a given by the user into the object
Inputs:
- a_init = Array: [I,S], Initial asset distribution given by User
- bq_init = Array: [I], Initial bequests given by User
- r_init = Scalar: Initial interest rate given by User
Variables Called from Object:
- None
Variables Stored in Object:
- self.a_init = Array: [I,S], Initial asset distribution given by Users
- self.bq_init = Array: [I], Initial bequests given by User
- self.r_init = Scalar: Initial interest rate given by User
Other Functions Called:
- None
Objects in Function:
- None
Outputs:
- None
"""
self.r_init = r_init
self.bq_init = bq_init
self.a_init = a_init
def get_initialguesses(self):
"""
Description:
- Generates an initial guess path used for beginning TPI calculation.
The guess for the transition path for r follows the form of a quadratic
function given by y = aa x^2 + bb x + cc, while the guess for the
bequests transition path is linear
Inputs:
- None
Variables Called from Object:
- self.bq_init = Array: [I], Initial bequests given by User
- self.I = Int: Number of Countries
- self.T = Int: Number of Time Periods
- self.r_init = Scalar: Initial interest rate given by User
- self.r_ss = Scalar: Steady state interest rate
Variables Stored in Object:
- None
Other Functions Called:
- None
Objects in Function:
- aa = Scalar: coefficient for x^2 term
- bb = Scalar: coefficient for x term
- cc = Scalar: coefficient for constant term
Outputs:
- bqpath_guess = Array: [I,T], Initial path of bequests in quadratic form
- rpath_guess = Array: [T], Initial path of interest rates in quadratic form
"""
rpath_guess = np.zeros(self.T)
bqpath_guess = np.zeros((self.I,self.T))
func = lambda t, a, b: a/t + b
t = np.linspace(1,self.T, self.T-1)
x = np.array([0.0001,self.T])
y = np.array([self.r_init, self.r_ss])
popt, pcov = opt.curve_fit(func,x,y)
rtest = np.hstack(( self.r_init, func(t,popt[0],popt[1]) ))
plt.plot(range(self.T), rtest)
#plt.show()
cc = self.r_init
bb = -2 * (self.r_init-self.r_ss)/(self.T-1)
aa = -bb / (2*(self.T-1))
rpath_guess[:self.T] = aa * np.arange(0,self.T)**2 + bb*np.arange(0,self.T) + cc
#rpath_guess = rtest
for i in range(self.I):
bqpath_guess[i,:self.T] = np.linspace(self.bq_init[i], self.bqindiv_ss[i], self.T)
return rpath_guess, bqpath_guess
def GetTPIComponents(self, bqvec_path, r_path, Print_HH_Eulers, Print_caTimepaths):
"""
Description:
- Gets the transition paths for all the other variables in the model
as a function of bqvec_path and r_path
Inputs:
- bqvec_path = Array: [I,S,T+S], Transition path for
distribution of bequests for each country
- r_path = Array: [T], Transition path for the intrest rate
- Print_caTimepaths = Boolean: True prints out the timepaths of
consumption and assets. For debugging
purposes
- Print_HH_Eulers = Boolean: True prints out if all of the household
equations were satisfied or not
Variables Called from Object:
- None
Variables Stored in Object:
- None
Other Functions Called:
- get_c_cK_a_matrices = Gets consumption, kids consumption and assets
decisions as a function of r, w, and bq
- get_lhat = Gets leisure as a function of c, w, and e
- get_n = Gets aggregate labor supply
- get_Gamma = Application of Equation 4.22
- get_Y = Gets output
- NOTE: This function also contains the functions
get_lifetime_decisions_Future, get_lifetime_decisions_Alive,
HHEulerSystem, and check_household_conditions, all of which are
called in get_c_a_matrices
Objects in Function:
- Gamma = Array: [I,S,T+S], Transition path of shorthand
calculation variable Gamma (Equation 4.22)
Outputs:
- a_matrix = Array: [I,S,T+S], Transition path for assets
holdings in each country
- c_matrix = Array: [I,S,T+S], Transition path for
consumption in each country
- cK_matrix = Array: [I,S,T+S], Transition path for kids
consumption in each country
- kd_path = Array: [I,T], Transition path for total
domestically-owned capital in each country
- kf_path = Array: [I,T], Transition path for foreign
capital in each country
- lhat_path = Array: [I,S,T+S], Transition path for leisure
for each cohort and country
- n_path = Array: [I,T], Transition path for total labor
supply in each country
- w_path = Array: [I,T], Transition path for the wage
rate in each country
- y_path = Array: [I,T], Transition path for output in
each country
"""
#Functions that solve lower-diagonal household decisions in vectors
def get_lifetime_decisions_Future(cK0, c_uppermat, cK_uppermat, a_uppermat, w_path,\
r_path, Gamma, bqvec_path):
"""
Description:
- Gets household decisions for consumption and assets for each agent
to be born in the future
Inputs:
- a_uppermat = Array: [I,S+1,T+S], Like c_uppermat, but for
assets. Contains S+1 dimensions so we
can consider any leftover assets each
agent has at the end of its lifetime.
- bqvec_path = Array: [I,S,T+S], Transition path for
distribution of bequests for each country
- cK0 = Array: [I*T], Initial consumption in each
agent's lifetime
- cK_uppermat = Array: [I,S,T+S], Kids consumption matrix
that already contains the kids
consumptions decisions for agents
currently alive and is 0 for all
agents born in the future
- c_uppermat = Array: [I,S,T+S], Consumption matrix that
already contains the consumption
decisions for agents currently alive
and is all 0s for agents born in the
future. This function fills in the
rest of this matrix.
- Gamma = Array: [I,S,T+S], Transition path of
shorthand calculation variable Psi
(Equation 3.21)
- r_path = Array: [T], Transition path for the intrest rate
- w_path = Array: [I,T], Transition path for the wage
rate in each country
Variables Called from Object:
- self.e = Array: [I,S,T+S], Labor Productivities
- self.MortalityRates = Array: [I,S,T+S], Mortality rates of
each country for each age cohort and year
- self.I = Int: Number of Countries
- self.S = Int: Number of Cohorts
- self.T = Int: Number of time periods
- self.beta = Scalar: Calculated overall future discount rate
- self.chi = Scalar: Leisure preference parameter
- self.delta = Scalar: Calulated overall depreciation rate
- self.g_A = Scalar: Growth rate of technology
- self.rho = Scalar: The intratemporal elasticity
of substitution between consumption
and leisure
- self.sigma = Scalar: Rate of Time Preference
Variables Stored in Object:
- None
Other Functions Called:
- cy_fillca = External cython module that's equivilent to the for
loop called in this function. It's marginally faster
compared to the loop that's in this code. This part will
likely be replaced in the future. See pure_cython.pyx
for more details
Objects in Function:
- we = Array: [I,S,T+S] Matrix product of w and e
Outputs:
- a_matrix = Array: [I,S+1,T+S], Filled in a_uppermat now
with assets for cohorts to be born
in the future
- cK_matrix = Array: [I,S,T+S], Filled in cK_uppermat now
with kids consumption for cohorts to
be born in the future
- c_matrix = Array: [I,S,T+S], Filled in c_uppermat now
with consumption for cohorts to be born
in the future
"""
#Initializes consumption and assets with all of the upper triangle
#already filled in
c_matrix = c_uppermat
cK_matrix = cK_uppermat
a_matrix = a_uppermat
cK_matrix[:,0,:self.T] = cK0.reshape(self.I,self.T)
c_matrix[:,0,:self.T] = cK_matrix[:,0,:self.T]/Gamma[:,0,:self.T]
#Gets we ahead of time for easier calculation
we = np.einsum("it,ist->ist",w_path,self.e)
if self.ShaveTime:
cy_fillca(c_matrix,cK_matrix,a_matrix,r_path,self.MortalityRates,bqvec_path,\
we,Gamma,self.lbar,self.Kids,self.beta,self.chi,self.delta,\
self.g_A,self.rho,self.sigma)
#Loops through each year (across S) and gets decisions for every agent
#in the next year
else:
for s in xrange(self.S-1):
#Gets consumption for every agents' next year using Equation 3.22
cK_matrix[:,s+1,s+1:self.T+s+1] = ((self.beta * \
(1-self.MortalityRates[:,s,s:self.T+s]) * \
(1 + r_path[s+1:self.T+s+1] - self.delta))**(1/self.sigma)*\
cK_matrix[:,s,s:self.T+s])*np.exp(-self.g_A)
c_matrix[:,s+1,s+1:self.T+s+1] = cK_matrix[:,s+1,s+1:self.T+s+1]\
/Gamma[:,s+1,s+1:self.T+s+1]
#Gets assets for every agents' next year using Equation 3.19
a_matrix[:,s+1,s+1:self.T+s+1] = ( (we[:,s,s:self.T+s]*\
self.lbar[s:self.T+s] + (1 + r_path[s:self.T+s] - self.delta)\
*a_matrix[:,s,s:self.T+s] + bqvec_path[:,s,s:self.T+s])-\
c_matrix[:,s,s:self.T+s]*(1+self.Kids[:,s,s:self.T+s]\
*Gamma[:,s,s:self.T+s]+we[:,s,s:self.T+s]*\
(self.chi/we[:,s,s:self.T+s])**(self.rho)) )*np.exp(-self.g_A)
#Gets assets in the final period of every agents' lifetime
s=self.S-2
a_matrix[:,-1,s+2:self.T+s+2] = ( (we[:,-1,s+1:self.T+s+1]*\
self.lbar[s+1:self.T+s+1] + (1 + r_path[s+1:self.T+s+1] - self.delta)\
*a_matrix[:,-2,s+1:self.T+s+1])-c_matrix[:,-1,s+1:self.T+s+1]*\
(1+self.Kids[:,-1,s+1:self.T+s+1]*Gamma[:,-1,s+1:self.T+s+1]+\
we[:,-1,s+1:self.T+s+1]*(self.chi/we[:,-1,s+1:self.T+s+1])\
**(self.rho) ) )*np.exp(-self.g_A)
return c_matrix, cK_matrix, a_matrix
#Functions that solve upper-diagonal household decisions in vectors
def get_lifetime_decisions_Alive(cK0, c_matrix, cK_matrix, a_matrix, w_path,\
r_path, Gamma, bqvec_path):
"""
Description:
- Gets household decisions for consumption and assets for each cohort
currently alive (except for the oldest cohort, whose household
problem is a closed form solved in line 1435)
Inputs:
- a_matrix = Array: [I,S+1,T+S], Empty matrix that
gets filled in with savings decisions
each cohort currently alive
- bqvec_path = Array: [I,S,T+S], Transition path for
distribution of bequests for
each country
- c0 = Array: [I*(S-1)], Today's consumption
for each cohort
- cK_matrix = Array: [I,S,T+S], Empty matrix that gets
filled with kids consumption decisions
for each cohort currently living
- c_matrix = Array: [I,S,T+S], Empty matrix that gets
filled in with consumption decisions
for each cohort currently alive
- psi = Array: [I,S,T+S], Transition path of shorthand
calculation variable Psi (Equation 3.21)
- r_path = Array: [T], Transition path for the intrest
rate
- w_path = Array: [I,T], Transition path for the wage
rate in each country
Variables Called from Object:
- self.MortalityRates = Array: [I,S,T], Mortality rates of each
country for each age cohort and year
- self.beta = Scalar: Calculated overall future discount rate
- self.chi = Scalar: Leisure preference parameter
- self.delta = Scalar: Calulated overall depreciation rate
- self.g_A = Scalar: Growth rate of technology
- self.rho = Scalar: The intratemporal elasticity of
substitution between consumption
and leisure
- self.sigma = Scalar: Rate of Time Preference
Variables Stored in Object:
- None
Other Functions Called:
- None
Objects in Function:
- we = Array: [I,S,T+S], Matrix product of w and e
Outputs:
- a_matrix = Array: [I,S+1,T+S], Savings decisions,
now including those who are alive
in time 0
- cK_matrix = Array: [I,S,T+S], Kids Consumption decisions,
now including those who are alive
in time 0
- c_matrix = Array: [I,S,T+S], Consumption decisions,
now including those who are alive
in time 0
"""
cK_matrix[:,:-1,0] = cK0.reshape(self.I,self.S-1)
c_matrix[:,:-1,0] = cK_matrix[:,:-1,0]/Gamma[:,:-1,0]
we = np.einsum("it,ist->ist",w_path,self.e)
for s in xrange(self.S):
t = s
cK_matrix[:,s+1:,t+1] = (self.beta * (1-self.MortalityRates[:,s:-1,t]) *\
(1 + r_path[t+1] - self.delta))**(1/self.sigma)* \
cK_matrix[:,s:-1,t]*np.exp(-self.g_A)
c_matrix[:,s+1:,t+1] = cK_matrix[:,s+1:,t+1]/Gamma[:,s+1:,t+1]
a_matrix[:,s+1:,t+1] = ( (we[:,s:,t]*self.lbar[t] + (1+r_path[t]-self.delta)\
*a_matrix[:,s:-1,t] + bqvec_path[:,s:,t])-c_matrix[:,s:,t]*\
(1+self.Kids[:,s:,t]*Gamma[:,s:,t]+we[:,s:,t]*\
(self.chi/we[:,s:,t])**(self.rho) ) )*np.exp(-self.g_A)
#Gets assets in the final period of every agents' lifetime
a_matrix[:,-1,t+2] = ( (we[:,-1,t+1] + (1 + r_path[t+1] - self.delta)\
*a_matrix[:,-2,t+1])-c_matrix[:,-1,t+1]*(1+self.Kids[:,-1,t+1]\
*Gamma[:,-1,t+1]+we[:,-1,t+1]*(self.chi/we[:,-1,t+1])**(self.rho)))\
*np.exp(-self.g_A)
return c_matrix, cK_matrix, a_matrix
def Alive_EulerSystem(cK0_guess, c_matrix, cK_matrix, a_matrix, w_path, r_path,\
Gamma, bqvec_path):
"""
Description: This is essentially the objective function for households
decisions. This function is called by opt.fsolve and searches
over levels of intial consumption that lead to the agents
not having any assets when they die.
Inputs:
- a_matrix = Array: [I,S+1,T+S], Savings decisions
each cohort
- bqvec_path = Array: [I,S,T+S], Transition path for
distribution of bequests for
each country
- cK0_guess = Array: [I*(T+S)] or [I*(S-1)], Guess for
initial consumption, either for
future agents or agents currently
alive
- cK_matrix = Array: [I,S,T+S], Kids Consumption
decisions for each cohort
- c_matrix = Array: [I,S,T+S], Consumption decisions
for each cohort
- psi = Array: [I,S,T+S], Transition path of
shorthand calculation variable
Psi (Equation 3.21)
- r_path = Array: [T+S], Transition path for the
intrest rate
- w_path = Array: [I,T+S], Transition path for the
wage rate in each country
- Alive = Boolean: True means this function was
called to solve for agents'
decisions who are currently alive
False means this function was
called to solve for agents'
decisions will be born in
future time periods
Variables Called from Object:
- None
Variables Stored in Object:
- None
Other Functions Called:
- get_lifetime_decisions_Alive = Gets consumption and assets decisions
for agents currently alive as a
function of consumption in the initial
period (t=0).
- get_lifetime_decisions_Future = Gets consumption and assets decisions
each agent to be born in the future as a
function of each agent's initial
consumption (s=0).
Objects in Function:
- a_matrix = Array: [I,S+1,T], Savings decisions
each cohort
- c_matrix = Array: [I,S,T], Consumption decisions
each cohort
Outputs:
- Euler = Array: [T] or [S], Remaining assets when
each cohort dies off. Must = 0
for the Euler system to correctly
solve.
"""
#Gets the decisions paths for each agent
c_matrix, cK_matrix, a_matrix = get_lifetime_decisions_Alive\
(cK0_guess, c_matrix, cK_matrix, a_matrix, w_path, r_path,\
Gamma, bqvec_path)
#Household Eulers are solved when the agents have no assets at
#the end of their life
Euler = np.ravel(a_matrix[:,-1,1:self.S])
#print "Max Euler", max(Euler)
return Euler
def Future_EulerSystem(cK0_guess, c_matrix, cK_matrix, a_matrix, w_path,\
r_path, Gamma, bqvec_path):
"""
Description: This is essentially the objective function for households
decisions. This function is called by opt.fsolve and searches
over levels of intial consumption that lead to the agents not
having any assets when they die.
Inputs:
- a_matrix = Array: [I,S+1,T+S], Savings decisions
each cohort
- bqvec_path = Array: [I,S,T+S], Transition path for
distribution of bequests for
each country
- c0_guess = Array: [I*(T+S)] or [I*(S-1)], Guess for
initial consumption, either for
future agents or agents currently
alive
- c_matrix = Array: [I,S,T+S], Consumption decisions
each cohort
- psi = Array: [I,S,T+S], Transition path of
shorthand calculation variable
Psi (Equation 3.21)
- r_path = Array: [T], Transition path for the
intrest rate
- w_path = Array: [I,T+S], Transition path for
the wage rate in each country
- Alive = Boolean: True means this function was
called to solve for agents'
decisions who are currently alive
False means this function was
called to solve for agents'
decisions will be born in future
time periods
Variables Called from Object:
- None
Variables Stored in Object:
- None
Other Functions Called:
- get_lifetime_decisions_Alive = Gets consumption and assets decisions
for agents currently alive as a
function of consumption in the initial
period (t=0).
- get_lifetime_decisions_Future = Gets consumption and assets decisions
each agent to be born in the future as a
function of each agent's initial
consumption (s=0).
Objects in Function:
- a_matrix = Array: [I,S+1,T], Savings decisions each
cohort
- c_matrix = Array: [I,S,T], Consumption decisions
each cohort
Outputs:
- Euler = Array: [T] or [S], Remaining assets when
each cohort dies off. Must = 0
for the Euler system to correctly
solve.
"""
#Gets the decisions paths for each agent
c_matrix, cK_matrix, a_matrix = get_lifetime_decisions_Future\
(cK0_guess, c_matrix, cK_matrix, a_matrix, w_path, r_path, Gamma,\
bqvec_path)
#Household Eulers are solved when the agents have no assets at the end
#of their life
Euler = np.ravel(a_matrix[:,-1,self.S:])
#print "Max Euler", max(Euler)
return Euler
#Checks various household condidions
def check_household_conditions(w_path, r_path, c_matrix, cK_matrix, a_matrix,\
Gamma, bqvec_path):
"""
Description:
- Essentially returns a matrix of residuals of the left and right sides of the Houehold Euler equations
to make sure the system solved correctly. Mostly used for debugging.
Inputs:
- a_matrix = Array: [I,S+1,T+S], Savings decisions
each cohort
- bqvec_path = Array: [I,S,T+S], Transition path for
distribution of bequests for each
country
- cK_matrix = Array: [I,S,T+S], Kids Consumption decisions
for each cohort
- c_matrix = Array: [I,S,T+S], Consumption decisions
for each each cohort
- Gammma = Array: [I,S,T+S], Transition path of
shorthand calculation variable
Psi (Equation 4.22)
- r_path = Array: [T], Transition path for the intrest
rate
- w_path = Array: [I,T+S], Transition path for the
wage rate in each country
Variables Called from Object:
- self.e = Array: [I,S,T+S], Labor Productivities
- self.T = Int: Number of time periods
- self.beta = Scalar: Calculated overall future
discount rate
- self.chi = Scalar: Leisure preference parameter
- self.delta = Scalar: Calulated overall depreciation rate
- self.g_A = Scalar: Growth rate of technology
- self.rho = Scalar: The intratemporal elasticity of
substitution between consumption
and leisure
- self.sigma = Scalar: Rate of Time Preference
Variables Stored in Object:
- None
Other Functions Called:
- None
Objects in Function:
- we = Array: [I,S,T+S], Matrix product of w and e
Outputs:
- Chained_C_Condition = Array: [I,S-1,T+S-1], Matrix of residuals
in Equation 3.22
- Household_Euler = Array: [I,T+S], Matrix of residuals in of
the 0 remaining assets equation
- Modified_Budget_Constraint= Array: [I,S-1,T+S-1], Matrix of residuals
in Equation 3.19
"""
#Multiplies wages and productivities ahead of time for easy calculations
#of the first two equations below
we = np.einsum("it,ist->ist",w_path[:,:self.T-1],self.e[:,:-1,:self.T-1])
#Disparity between left and right sides of Equation 4.26
Chained_C_Condition = cK_matrix[:,:-1,:self.T-1]**(-self.sigma)\
- self.beta*(1-self.MortalityRates[:,:-1,:self.T-1])\
*(cK_matrix[:,1:,1:self.T]*np.exp(self.g_A))\
**(-self.sigma)*(1+r_path[1:self.T]-self.delta)
#Disparity between left and right sides of Equation 4.23
Modified_Budget_Constraint = c_matrix[:,:-1,:self.T-1]\
- (we*self.lbar[:self.T-1] + \
(1+r_path[:self.T-1]-self.delta)\
*a_matrix[:,:-2,:self.T-1] +\
bqvec_path[:,:-1,:self.T-1]\
- a_matrix[:,1:-1,1:self.T]*np.exp(self.g_A))\
/(1 + self.Kids[:,:-1,:self.T-1]*\
Gamma[:,:-1,:self.T-1] + we*(self.chi/we)**(self.rho))
#Disparity between left and right sides of Equation 4.25
Consumption_Ratio = cK_matrix - c_matrix*Gamma
#Any remaining assets each agent has at the end of its lifetime.
#Should be 0 if other Eulers are solving correctly
Household_Euler = a_matrix[:,-1,:]
return Chained_C_Condition, Modified_Budget_Constraint,\
Consumption_Ratio, Household_Euler
#Gets consumption and assets matrices using fsolve
def get_c_cK_a_matrices(w_path, r_path, Gamma, bqvec_path, Print_HH_Eulers,\
Print_caTimepaths):
"""
Description:
- Solves for the optimal consumption and assets paths
by searching over initial consumptions for agents alive and unborn
Inputs:
- bqvec_path = Array: [I,S,T+S], Transition path
for distribution of bequests
for each country
- Gamma = Array: [I,S,T+S], Transition path of
shorthand calculation variable
Gamma (Equation 4.22)
- r_path = Array: [T], Transition path for the
intrest rate
- w_path = Array: [I,T], Transition path for the
wage rate in each country
- Print_caTimepaths = Boolean: True prints out the timepaths
of consumption and assets.
For de-bugging purposes.
- Print_HH_Eulers = Boolean: True prints out if all of
the household equations were
satisfied or not
Variables Called from Object:
- self.a_init = Array: [I,S], Initial asset
distribution given by User
- self.cKvec_ss = Array: [I,S], Steady state Kids
consumption
- self.e = Array: [I,S,T+S], Labor Productivities
- self.MortalityRates = Array: [I,S,T+S], Mortality rates of
each country for each age
cohort and year
- self.I = Int: Number of Countries
- self.S = Int: Number of Cohorts
- self.T = Int: Number of time periods
- self.chi = Scalar: Leisure preference parameter
- self.delta = Scalar: Calulated overall depreciation
rate
- self.rho = Scalar: The intratemporal elasticity
of substitution between
consumption and leisure
Variables Stored in Object:
- None
Other Functions Called:
- HHEulerSystem = Objective function for households
(final assets at death = 0).
Must solve for HH conditions to be satisfied
- get_lifetime_decisions_Alive = Gets lifetime consumption
and assets decisions for agents
alive in the initial time period
- get_lifetime_decisions_Future = Gets lifetime consumption and
assets decisions for agents to
be born in the future
Objects in Function:
- ck0alive_guess = Array: [I,S-1], Initial guess
for kids consumption in this
period for each agent alive
- ck0future_guess = Array: [I,T+S], Initial guess for
initial kids consumption for each
agent to be born in the future
- Chained_C_Condition = Array: [I,S,T+S], Disparity between
left and right sides of Equation
3.22. Should be all 0s if the
household problem was solved
correctly.
- Household_Euler = Array: [I,T+S], Leftover assets at
the end of the final period
each agents lives. Should be
all 0s if the household problem
was solved correctly
- Modified_Budget_Constraint = Array: [I,S,T+S], Disparity between
left and right sides of
Equation 3.19. Should be all
0s if the household problem
was solved correctly.
Outputs:
- a_matrix[:,:-1,:self.T] = Array: [I,S,T+S], Assets transition
path for each country and cohort
- c_matrix[:,:,:self.T] = Array: [I,S,T+S], Consumption
transition path for each
country and cohort
- cK_matrix[:,:,:self.T] = Array: [I,S,T+S], Kids Consumption
transition path for each
country cohort
"""
#Initializes the consumption and assets matrices
c_matrix = np.zeros((self.I,self.S,self.T+self.S))
cK_matrix = np.zeros((self.I,self.S,self.T+self.S))
a_matrix = np.zeros((self.I,self.S+1,self.T+self.S))
a_matrix[:,:-1,0] = self.a_init
#Equation 3.19 for the oldest agent in time t=0. Note that this agent chooses
#to consume everything so that it has no assets in the following period
c_matrix[:,self.S-1,0] = (w_path[:,0]*self.e[:,self.S-1,0]*self.lbar[self.S-1]\
+ (1 + r_path[0] - self.delta)*self.a_init[:,self.S-1] + \
bqvec_path[:,self.S-1,0])/(1+self.Kids[:,-1,0]*Gamma[:,-1,0]+\
w_path[:,0]*self.e[:,self.S-1,0]*(self.chi/(w_path[:,0]*\
self.e[:,self.S-1,0]))**(self.rho))
cK_matrix[:,self.S-1,0] = c_matrix[:,self.S-1,0]*Gamma[:,-1,0]
#Initial guess for agents currently alive
cK0alive_guess = np.ones((self.I, self.S-1))*.3
#Fills in c_matrix and a_matrix with the correct decisions for agents
#currently alive
start=time.time()
opt.root(Alive_EulerSystem, cK0alive_guess, args=(c_matrix, cK_matrix, a_matrix,\
w_path, r_path, Gamma, bqvec_path), method="krylov", tol=1e-8)
if self.Matrix_Time: print "\nFill time: NEW UPPER USING KRYLOV", time.time()-start
#Initializes a guess for the first vector for the fsolve to use
cK0future_guess = np.zeros((self.I,self.T))
for i in range(self.I):
cK0future_guess[i,:] = np.linspace(cK_matrix[i,1,0], self.cKvec_ss[i,-1],\
self.T)
#Solves for the entire consumption and assets matrices for agents not
#currently born
start=time.time()
opt.root(Future_EulerSystem, cK0future_guess, args=(c_matrix, cK_matrix, a_matrix\
, w_path, r_path, Gamma, bqvec_path), method="krylov", tol=1e-8)
if self.Matrix_Time: print "lower triangle fill time NOW USING KRYLOV",\
time.time()-start
#Prints consumption and assets matrices for country 0.
#NOTE: the output is the transform of the original matrices,
#so each row is time and each col is cohort
if Print_caTimepaths:
print "Consumption Matrix for country 0", str("("+self.I_touse[0]+")")
print np.round(np.transpose(c_matrix[0,:,:self.T]), decimals=3)
print "Assets Matrix for country 0", str("("+self.I_touse[0]+")")
print np.round(np.transpose(a_matrix[0,:,:self.T]), decimals=3)
#Prints if each set of conditions are satisfied or not
if Print_HH_Eulers:
#Gets matrices for the disparities of critical household conditions
#and constraints
Chained_C_Condition, Modified_Budget_Constraint, Consumption_Ratio,\
Household_Euler = check_household_conditions(w_path, r_path, c_matrix,\
cK_matrix, a_matrix, Gamma, bqvec_path)
#Checks to see if all of the Eulers are close enough to 0
print "\nEuler Household satisfied:", \
np.isclose(np.max(np.absolute(Household_Euler)), 0),\
np.max(np.absolute(Household_Euler))
print "Equation 4.26 satisfied:", \
np.isclose(np.max(np.absolute(Chained_C_Condition)), 0),\
np.max(np.absolute(Chained_C_Condition))
print "Equation 4.23 satisfied:", np.isclose(np.max(\
np.absolute(Modified_Budget_Constraint)), 0),\
np.max(np.absolute(Modified_Budget_Constraint))
print "Equation 4.25 satisfied", np.isclose(np.max\
(np.absolute(Consumption_Ratio)), 0), \
np.max(np.absolute(Consumption_Ratio))
#print np.round(np.transpose(Household_Euler[0,:]), decimals=8)
#print np.round(np.transpose(Modified_Budget_Constraint[0,:,:]), decimals=4)
#print np.round(np.transpose(Consumption_Ratio[0,:,:]), decimals=4)
#Returns only up until time T and not the vector
#print c_matrix[0,:,:self.T]
return c_matrix[:,:,:self.T], cK_matrix[:,:,:self.T], a_matrix[:,:-1,:self.T]
#GetTPIComponents continues here
#Equation 3.25, note that this hasn't changed from stage 3 to stage 4
alphvec=np.ones(self.I)*self.alpha
w_path = np.einsum("it,i->it",np.einsum("i,t->it",alphvec,1/r_path)**\
(self.alpha/(1-self.alpha)),(1-self.alpha)*self.A)
#Equation 4.22
Gamma = self.get_Gamma(w_path,self.e)
#Equations 4.25, 4.23
c_matrix, cK_matrix, a_matrix = \
get_c_cK_a_matrices(w_path, r_path, Gamma, bqvec_path, Print_HH_Eulers,\
Print_caTimepaths)
#Equation 4.24
lhat_path = self.get_lhat(c_matrix, w_path[:,:self.T], self.e[:,:,:self.T])
#Equation 4.17
n_path = self.get_n(lhat_path)
#Equation 4.16
kd_path = np.sum(a_matrix*self.Nhat[:,:,:self.T],axis=1)
#Equation 4.18
y_path = self.get_Y(kd_path,n_path)
#Equation 4.28
kf_path = np.outer(self.alpha*self.A, 1/r_path[:self.T])**( 1/(1-self.alpha) )\
*n_path - kd_path
return w_path, c_matrix, cK_matrix, a_matrix, kd_path, kf_path, n_path,\
y_path, lhat_path
def EulerSystemTPI(self, guess, Print_HH_Eulers, Print_caTimepaths):
"""
Description:
- Gives a system of Euler equations that must be satisfied (or = 0)
for the transition paths to solve.
Inputs:
- guess = Array [(I+1)*T]: Current guess for the
transition paths of bq and r
- Print_caTimepaths = Boolean: True prints out the timepaths
of consumption and assets.
For de-bugging mostly
- Print_HH_Eulers = Boolean: True prints out if all of the household
equations were satisfied or not
Variables Called from Object:
- self.MortalityRates = Array: [I,S,T+S], Mortality rates of each
country for each age cohort and year
- self.Nhat = Array: [I,S,T+S], World population share of
each country for each age cohort and year
- self.FirstDyingAge = Int: First age where mortality rates effect
agents
- self.FirstFertilityAge = Int: First age where agents give birth
- self.I = Int: Number of Countries
- self.S = Int: Number of Cohorts
- self.T = Int: Number of time periods
- self.Timepath_counter = Int: Counter that keeps track of the number
of iterations in solving for the time paths
- self.IterationsToShow = Set: A set of user inputs of iterations
of TPI graphs to show
Variables Stored in Object:
- None
Other Functions Called:
- self.GetTPIComponents = Gets the transition paths for all the
other variables in the model as a function
of bqvec_path and r_path
- self.plot_timepaths = Takes the current iteration of the
timepaths and plots them into one sheet
of graphs
Objects in Function:
- a_matrix = Array: [I,S,T+S], Transition path for assets
holdings in each country
- alldeadagent_assets = Array: [I,T+S], Assets of all of the agents
who died in each period. Used to get
Euler_bq.
- bqvec_path = Array: [I,S,T], Transition path for
distribution of bequests for each country
- cK_matrix = Array: [I,S,T], Transition path for Kids
consumption in each country
- c_matrix = Array: [I,S,T], Transition path for
consumption in each country
- Euler_bq = Array: [I,T], Euler equation that must be
satisfied for the model to solve.
See Equation 3.29
- Euler_kf = Array: [T], Euler equation that must be
satisfied for the model to solve.
See Equation 3.24
- kd_path = Array: [I,T], Transition path for total
domestically-owned capital in each
country
- kf_path = Array: [I,T], Transition path for foreign
capital in each country
- lhat_path = Array: [I,S,T], Transition path for leisure
for each cohort and country
- n_path = Array: [I,T], Transition path for total labor
supply in each country
- r_path = Array: [T], Transition path for the intrest rate
- w_path = Array: [I,T], Transition path for the wage
rate in each country
- y_path = Array: [I,T], Transition path for output
in each country
Outputs:
- Euler_all = Array: [(I+1)*T], Euler_bq and Euler_kf
combined to be the same shape as
the input guess
"""
#Current guess for r and bq
guess = np.expand_dims(guess, axis=1).reshape((self.I+1,self.T))
r_path = guess[0,:]
bq_path = guess[1:,:]
#Imposes the steady state on the guesses for r and bq for S periods after T
r_path = np.hstack((r_path, np.ones(self.S)*self.r_ss))
bq_path = np.column_stack(( bq_path, np.outer(self.bqindiv_ss,np.ones(self.S)) ))
#Initilizes the bequests distribution, which essentially is a copy of bq for each eligibly-aged agent
bqvec_path = np.zeros((self.I,self.S,self.T+self.S))
bqvec_path[:,self.FirstFertilityAge:self.FirstDyingAge,:] = \
np.einsum("it,s->ist", bq_path, np.ones(self.FirstDyingAge-\
self.FirstFertilityAge))
#Gets all the other variables in the model as a funtion of bq and r
w_path, c_matrix, cK_matrix, a_matrix, kd_path, \
kf_path, n_path, y_path, lhat_path = self.GetTPIComponents(bqvec_path,\
r_path, Print_HH_Eulers, Print_caTimepaths)
#Sums up all the assets of agents that died in each period
alldeadagent_assets = np.sum(a_matrix[:,self.FirstDyingAge:,:]*\
self.MortalityRates[:,self.FirstDyingAge:,:self.T]*\
self.Nhat[:,self.FirstDyingAge:,:self.T], axis=1)
#Difference between assets of dead agents and our guesss for bequests.
#See Equation 3.29
Euler_bq = bq_path[:,:self.T] - alldeadagent_assets/\
np.sum(self.Nhat[:,self.FirstFertilityAge:self.FirstDyingAge,:self.T],\
axis=1)
#All the foreign-held capital must sum to 0. See Equation 3.24
Euler_kf = np.sum(kf_path,axis=0)
#Both Euler equations in one vector for the fsolve to play nice
Euler_all = np.append(Euler_bq, Euler_kf)
#Prints out info for the current iteration
if self.Iterate:
print "Iteration:", self.Timepath_counter, "Min Euler:", \
np.min(np.absolute(Euler_all)), "Mean Euler:", \
np.mean(np.absolute(Euler_all))\
, "Max Euler_bq:", np.max(np.absolute(Euler_bq)), "Max Euler_kf",\
np.max(np.absolute(Euler_kf))
#Will plot one of the graphs if the user specified outside the class
if self.Timepath_counter in self.IterationsToShow:
self.plot_timepaths(SAVE=False, Paths = \
(r_path, bq_path, w_path, c_matrix, cK_matrix, lhat_path,\
n_path, kd_path, kf_path))
#Keeps track of the current iteration of solving the transition path for the model
self.Timepath_counter += 1
return Euler_all
def Timepath_optimize(self, Print_HH_Eulers, Print_caTimepaths, Iters_to_show = set([])):
"""
Description:
- Solves for the transition path for each variable in the model
Inputs:
- Print_caTimepaths = Boolean: True prints out the timepaths of
consumption and assets. For
de-bugging mostly
- Print_HH_Eulers = Boolean: True prints out if all of the
household equations were satisfied or not
- to_plot = Set: Set of integers that represent iterations
of the transition path solver that the
user wants plotted
Variables Called from Object:
- self.bqindiv_ss = Array: [I], Bequests each individual receives
in the steady-state in each country
- self.FirstDyingAge = Int: First age where mortality rates effect agents
- self.FirstFertilityAge = Int: First age where agents give birth
- self.I = Int: Number of Countries
- self.S = Int: Number of Cohorts
- self.T = Int: Number of time periods
- self.r_ss = Scalar: Steady state intrest rate
- self.IterationsToShow = Set: A set of user inputs of iterations
of TPI graphs to show
Variables Stored in Object:
- self.a_matrix = Array: [I,S,T], Transition path for assets
holdings for each cohort in each country
- self.bqindiv_path = Array: [I,T+S], Transition path of bq that is
given to each individual
- self.bqvec_path = Array: [I,S,T], Transition path for distribution
of bequests for each country
- self.cK_matrix = Array: [I,S,T], Transition path for Kids
consumption for each cohort in each country
- self.c_matrix = Array: [I,S,T], Transition path for consumption
for each cohort in each country
- self.kd_path = Array: [I,T], Transition path for total
domestically-owned capital in each country
- self.kf_path = Array: [I,T], Transition path for foreign capital
in each country
- self.lhat_path = Array: [I,S,T+S], Transition path for leisure
for each cohort and country
- self.n_path = Array: [I,T], Transition path for total labor
supply in each country
- self.r_path = Array: [T+S], Transition path of r from year t=0
to t=T and imposes the steady state
intrest rate for S periods beyond T
- self.w_path = Array: [I,T+S], Transition path for the wage
rate in each country with the Steady
state imposed for an additional S periods
beyond T
- self.y_path = Array: [I,T], Transition path for output in
each country
Other Functions Called:
- self.get_initialguesses = Gets initial guesses for the transition paths
for r and bq
- self.EulerSystemTPI = Used by opt.solve in order to search over paths
for r and bq that satisfy the Euler equations
for the model
- self.GetTPIComponents = Gets all the other variables in the model once
we already have found the correct paths for r
and bq
Objects in Function:
- bqindiv_path_guess = Array: [I,T], Initial guess for the transition
path for bq
- guess = Array: [(I+1)*T], Initial guess of r and bq
to feed into opt.fsolve
- paths = Array: [I+1,T], Output of opt.fsolve. Contains
the correct transition paths for r and bq
- rpath_guess = Array: [T], Initial guess for the transition path
for r
Outputs:
- None
"""
#This is a set that will display the plot of the transition paths
#for all the variables in whatever iterations are in the set
self.IterationsToShow = Iters_to_show
#Gets an initial guess for the transition paths
rpath_guess, bqindiv_path_guess = self.get_initialguesses()
#Appends the guesses to feed into the opt.fsolve
guess = np.append(rpath_guess, bqindiv_path_guess)
#Solves for the correct transition paths
paths = opt.fsolve(self.EulerSystemTPI, guess, args=(Print_HH_Eulers,\
Print_caTimepaths))#, method="krylov", tol=1e-8)["x"]
#Reshapes the output of the opt.fsolve so that the first row is the
#transition path for r and the second through I rows are the transition
#paths of bq for each country
paths = np.expand_dims(paths, axis=1).reshape((self.I+1,self.T))
#Imposes the steady state for S years beyond time T
self.r_path = np.hstack((paths[0,:], np.ones(self.S)*self.r_ss))
self.bqindiv_path = np.column_stack(( paths[1:,:], \
np.outer(self.bqindiv_ss,np.ones(self.S)) ))
#Initialize bequests distribution
self.bqvec_path = np.zeros((self.I,self.S,self.T+self.S))
self.bqvec_path[:,self.FirstFertilityAge:self.FirstDyingAge,:] = \
np.einsum("it,s->ist", self.bqindiv_path, \
np.ones(self.FirstDyingAge-self.FirstFertilityAge))
#Gets the other variables in the model
self.w_path, self.c_matrix, self.cK_matrix, self.a_matrix, self.kd_path, \
self.kf_path, self.n_path, self.y_path, self.lhat_path = \
self.GetTPIComponents(self.bqvec_path, self.r_path, Print_HH_Eulers,\
Print_caTimepaths)
def plot_timepaths(self, SAVE=False, Paths = None):
"""
Description:
- Take the timepaths and plots them into an image with windows of different graphs
Inputs:
- bq_path = Array:[I,T+S], Given bequests path
- cK_matrix = Array:[I,S,T+S], Given kids consumption matrix
- c_matrix = Array:[I,S,T+S], Given consumption matrix
- kd_path = Array:[I,T+S], Given domestic capital path
- kf_path = Array:[I,T+S], Given foreign capital path
- lhat_path = Array:[I,S,T+S], Given time endowment
- n_path = Array:[I,T+S], Given aggregate labor productivity
- r_path = Array:[T+S], Given interest rate path
- SAVE = Boolean: Switch that determines whether we
save the graphs or simply show it.
Variables Called from Object:
- self.cKmatrix = Array: [I,S], Steady State kids consumption
- self.cvec_ss = Array: [I,S], Steady state consumption
- self.kd_ss = Array: [I], Steady state total capital holdings
for each country
- self.lhat_ss = Array: [I,S], Steady state leisure decision for
each country and cohort
- self.n_ss = Array: [I], Steady state foreign capital in
each country
- self.I = Int: Number of Countries
- self.S = Int: Number of Cohorts
- self.T = Int: Number of time periods
- self.Timepath_counter = Int: Counter that keeps track of the number of
iterations in solving for the time paths
- self.I_touse = List: [I], Roster of countries that are being used
Variables Stored in Object:
- None
Other Functions Called:
- None
Objects in Function:
- name = String: Name of the .png file that will save
the graphs.
- title = String: Overall title of the sheet of graphs
Outputs:
- None
"""
if Paths is None:
r_path, bq_path, w_path, c_matrix, cK_matrix, lhat_path, n_path, kd_path, kf_path\
=self.r_path, self.bqindiv_path, self.w_path, self.c_matrix, \
self.cK_matrix, self.lhat_path, self.n_path, self.kd_path, self.kf_path
else:
r_path, bq_path, w_path, c_matrix, cK_matrix, lhat_path, n_path,\
kd_path, kf_path = Paths
title = str("S = " + str(self.S) + ", T = " + str(self.T) + ", Iter: " + \
str(self.Timepath_counter))
plt.suptitle(title)
ax = plt.subplot(331)
for i in range(self.I):
plt.plot(range(self.S+self.T), r_path)
plt.title("r_path")
#plt.legend(self.I_touse)
ax.set_xticklabels([])
ax = plt.subplot(332)
for i in range(self.I):
plt.plot(range(self.S+self.T), bq_path[i,:])
plt.title("bqvec_path")
ax.set_xticklabels([])
ax = plt.subplot(333)
for i in range(self.I):
plt.plot(range(self.S+self.T), w_path[i,:])
plt.title("w_path")
ax.set_xticklabels([])
ax = plt.subplot(334)
for i in range(self.I):
plt.plot(range(self.S+self.T), np.hstack((np.sum(c_matrix[i,:,:],axis=0),\
np.ones(self.S)*np.sum(self.cvec_ss[i,:]))) )
plt.title("C_path")
ax.set_xticklabels([])
ax = plt.subplot(335)
for i in range(self.I):
plt.plot(range(self.S+self.T), np.hstack((np.sum(cK_matrix[i,:,:],axis=0),\
np.ones(self.S)*np.sum(self.cKvec_ss[i,:]))) )
plt.title("CK_path")
ax.set_xticklabels([])
ax = plt.subplot(336)
for i in range(self.I):
plt.plot( range(self.S+self.T), np.hstack((np.sum(lhat_path[i,:,:],axis=0),\
np.ones(self.S)*np.sum(self.lhat_ss[i,:]))) )
plt.title("Lhat_path")
ax.set_xticklabels([])
plt.subplot(337)
for i in range(self.I):
plt.plot(range(self.S+self.T), np.hstack((n_path[i,:],\
np.ones(self.S)*self.n_ss[i])))
plt.xlabel("Year")
plt.title("n_path")
plt.subplot(338)
for i in range(self.I):
plt.plot(range(self.S+self.T), np.hstack((kd_path[i,:]\
,np.ones(self.S)*self.kd_ss[i])) )
plt.xlabel("Year")
plt.title("kd_path")
plt.subplot(339)
for i in range(self.I):
plt.plot(range(self.S+self.T), np.hstack((kf_path[i,:]\
,np.ones(self.S)*self.kf_ss[i])))
plt.xlabel("Year")
plt.title("kf_path")
if SAVE:
name= "Graphs/OLGresult_Iter"+str(self.Timepath_counter)+\
"_"+str(self.I)+"_"+str(self.S)+"_"+str(self.sigma)+".png"
plt.savefig(name)
plt.clf()
else:
plt.show()
| mit |
simpeg/simpeg | examples/02-mesh/plot_cahn_hilliard.py | 1 | 4043 | """
Mesh: Operators: Cahn Hilliard
==============================
This example is based on the example in the FiPy_ library.
Please see their documentation for more information about the
Cahn-Hilliard equation.
The "Cahn-Hilliard" equation separates a field \\\\( \\\\phi \\\\)
into 0 and 1 with smooth transitions.
.. math::
\\frac{\partial \phi}{\partial t} = \\nabla \cdot D \\nabla \left( \\frac{\partial f}{\partial \phi} - \epsilon^2 \\nabla^2 \phi \\right)
Where \\\\( f \\\\) is the energy function \\\\( f = ( a^2 / 2 )\\\\phi^2(1 - \\\\phi)^2 \\\\)
which drives \\\\( \\\\phi \\\\) towards either 0 or 1, this competes with the term
\\\\(\\\\epsilon^2 \\\\nabla^2 \\\\phi \\\\) which is a diffusion term that creates smooth changes in \\\\( \\\\phi \\\\).
The equation can be factored:
.. math::
\\frac{\partial \phi}{\partial t} = \\nabla \cdot D \\nabla \psi \\\\
\psi = \\frac{\partial^2 f}{\partial \phi^2} (\phi - \phi^{\\text{old}}) + \\frac{\partial f}{\partial \phi} - \epsilon^2 \\nabla^2 \phi
Here we will need the derivatives of \\\\( f \\\\):
.. math::
\\frac{\partial f}{\partial \phi} = (a^2/2)2\phi(1-\phi)(1-2\phi)
\\frac{\partial^2 f}{\partial \phi^2} = (a^2/2)2[1-6\phi(1-\phi)]
The implementation below uses backwards Euler in time with an
exponentially increasing time step. The initial \\\\( \\\\phi \\\\)
is a normally distributed field with a standard deviation of 0.1 and
mean of 0.5. The grid is 60x60 and takes a few seconds to solve ~130
times. The results are seen below, and you can see the field separating
as the time increases.
.. _FiPy: http://www.ctcms.nist.gov/fipy/examples/cahnHilliard/generated/examples.cahnHilliard.mesh2DCoupled.html
"""
from __future__ import print_function
from SimPEG import Mesh, Utils, Solver
import numpy as np
import matplotlib.pyplot as plt
def run(plotIt=True, n=60):
np.random.seed(5)
# Here we are going to rearrange the equations:
# (phi_ - phi)/dt = A*(d2fdphi2*(phi_ - phi) + dfdphi - L*phi_)
# (phi_ - phi)/dt = A*(d2fdphi2*phi_ - d2fdphi2*phi + dfdphi - L*phi_)
# (phi_ - phi)/dt = A*d2fdphi2*phi_ + A*( - d2fdphi2*phi + dfdphi - L*phi_)
# phi_ - phi = dt*A*d2fdphi2*phi_ + dt*A*(- d2fdphi2*phi + dfdphi - L*phi_)
# phi_ - dt*A*d2fdphi2 * phi_ = dt*A*(- d2fdphi2*phi + dfdphi - L*phi_) + phi
# (I - dt*A*d2fdphi2) * phi_ = dt*A*(- d2fdphi2*phi + dfdphi - L*phi_) + phi
# (I - dt*A*d2fdphi2) * phi_ = dt*A*dfdphi - dt*A*d2fdphi2*phi - dt*A*L*phi_ + phi
# (dt*A*d2fdphi2 - I) * phi_ = dt*A*d2fdphi2*phi + dt*A*L*phi_ - phi - dt*A*dfdphi
# (dt*A*d2fdphi2 - I - dt*A*L) * phi_ = (dt*A*d2fdphi2 - I)*phi - dt*A*dfdphi
h = [(0.25, n)]
M = Mesh.TensorMesh([h, h])
# Constants
D = a = epsilon = 1.
I = Utils.speye(M.nC)
# Operators
A = D * M.faceDiv * M.cellGrad
L = epsilon**2 * M.faceDiv * M.cellGrad
duration = 75
elapsed = 0.
dexp = -5
phi = np.random.normal(loc=0.5, scale=0.01, size=M.nC)
ii, jj = 0, 0
PHIS = []
capture = np.logspace(-1, np.log10(duration), 8)
while elapsed < duration:
dt = min(100, np.exp(dexp))
elapsed += dt
dexp += 0.05
dfdphi = a**2 * 2 * phi * (1 - phi) * (1 - 2 * phi)
d2fdphi2 = Utils.sdiag(a**2 * 2 * (1 - 6 * phi * (1 - phi)))
MAT = (dt*A*d2fdphi2 - I - dt*A*L)
rhs = (dt*A*d2fdphi2 - I)*phi - dt*A*dfdphi
phi = Solver(MAT)*rhs
if elapsed > capture[jj]:
PHIS += [(elapsed, phi.copy())]
jj += 1
if ii % 10 == 0:
print(ii, elapsed)
ii += 1
if plotIt:
fig, axes = plt.subplots(2, 4, figsize=(14, 6))
axes = np.array(axes).flatten().tolist()
for ii, ax in zip(np.linspace(0, len(PHIS)-1, len(axes)), axes):
ii = int(ii)
M.plotImage(PHIS[ii][1], ax=ax)
ax.axis('off')
ax.set_title('Elapsed Time: {0:4.1f}'.format(PHIS[ii][0]))
if __name__ == '__main__':
run()
plt.show()
| mit |
marianotepper/csnmf | csnmf/tests/test_movies.py | 1 | 5418 | """
Copyright (c) 2015, Mariano Tepper, Duke University.
All rights reserved.
This file is part of RCNMF and is under the BSD 3-Clause License,
which can be found in the LICENSE file in the root directory, or at
http://opensource.org/licenses/BSD-3-Clause
"""
from __future__ import print_function
from operator import mul, sub
import h5py
import timeit
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.colors import ListedColormap
import cv2
from into import into
from dask.array.into import discover
from dask.array import Array
import math
import csnmf.snmf
from csnmf.third_party import mrnmf
from movies import png_movie_to_hdf5_matrix
def test_movie(hdf_filename, base_output_name, ncols=None, interval=None,
max_blockshape=(1e5, 100)):
f = h5py.File(hdf_filename, 'r')
img_shape = np.array(f['img_shape'], dtype=np.int)
f.close()
m = min(max_blockshape[0], reduce(mul, img_shape))
if interval is not None:
n = min(max_blockshape[1], -reduce(sub, interval))
else:
n = max_blockshape[1]
m = int(m)
n = int(n)
data = into(Array, hdf_filename + '::/data', blockshape=(m, n))
if interval is not None:
data = data[:, interval[0]:interval[1]]
data = np.array(data)
if ncols is None:
ncols = data.shape[1] / 120
print(data.shape, ncols, m, n)
t = timeit.default_timer()
cols, mat_h, error = csnmf.snmf.compute(data, ncols, 'SPA', compress=True)
t = timeit.default_timer() - t
print(error)
data = np.array(data)
error = mrnmf.nnls_frob(data, cols)[1]
def argsort(seq):
return sorted(range(len(seq)), key=seq.__getitem__)
cols_order = argsort(cols)
cols = sorted(cols)
mat_h = mat_h[cols_order, :]
res_dict = {'cols': cols, 'error': error, 'time': t}
base_str = 'error {error:.4f}; time {time:.2f}; cols {cols}'
print(base_str.format(**res_dict))
if interval is not None and ncols <= 10:
colors = ['#a6cee3', '#1f78b4', '#b2df8a', '#33a02c', '#fb9a99',
'#e31a1c', '#fdbf6f', '#ff7f00', '#cab2d6', '#6a3d9a']
cmap = ListedColormap(colors)
fourcc = cv2.cv.CV_FOURCC(*'mp4v')
out = cv2.VideoWriter(base_output_name + '.avi',
fourcc, 8.0, (img_shape[1], img_shape[0]), True)
max_val = np.argmax(mat_h, axis=0)
for i in range(data.shape[1]):
img = np.reshape(data[:, i], img_shape) * 255
img = img.astype(np.uint8)
norm_idx = float(max_val[i]) / ncols
c = map(lambda x: int(x*255), cmap(norm_idx))[::-1]
cv2.rectangle(img, (img_shape[1]-50, img_shape[0]-50),
(img_shape[1], img_shape[0]), c, cv2.cv.CV_FILLED)
out.write(img)
out.release()
border_width = 40
arrangement = int(math.ceil(math.sqrt(ncols)))
plt.figure()
for i, c in enumerate(cols):
img = np.reshape(data[:, c], img_shape)
norm_idx = float(i) / ncols
ax = plt.subplot(arrangement, arrangement, i+1,
axisbg=cmap(norm_idx))
ax.imshow(img, aspect='equal', origin='lower',
extent=(border_width, img_shape[1] - border_width,
border_width, img_shape[0] - border_width))
ax.imshow(img, alpha=0)
ax.get_xaxis().set_visible(False)
ax.get_yaxis().set_visible(False)
plt.tight_layout()
plt.savefig(base_output_name + '_representatives.pdf', dpi=300)
mat_h_norm = mat_h / np.sum(mat_h, axis=0)
plt.figure()
ax = plt.axes()
for i in range(ncols):
bottom = np.sum(mat_h_norm[:i, :], axis=0)
norm_idx = float(i) / ncols
ax.bar(range(data.shape[1]), mat_h_norm[i, :], 1,
color=cmap(norm_idx),
linewidth=0, bottom=bottom)
ax.set_ylim(0, 1)
plt.savefig(base_output_name + '_activation.pdf', dpi=300)
for i, c in enumerate(cols):
img = np.reshape(data[:, c], img_shape)
plt.figure()
ax = plt.axes()
ax.imshow(img)
ax.get_xaxis().set_visible(False)
ax.get_yaxis().set_visible(False)
plt.savefig(base_output_name + '_representative_{0}.png'.format(i))
plt.close()
plt.close('all')
def process_elephant_dreams(resolution, interval=None, ncols=None,
create_matrix=False):
if create_matrix:
prefix = '/Volumes/MyBookDuo/movies/ED-{0:d}'.format(resolution)
png_movie_to_hdf5_matrix(prefix + '-png/', (1, 15692), prefix + '.hdf5')
prefix = '/Volumes/MyBookDuo/movies/ED-{0:d}'.format(resolution)
base_output_name = 'elephantDreams_{0:d}p'.format(resolution)
if interval is not None:
base_output_name += '_{0:05d}_{1:05d}'.format(*interval)
test_movie(prefix + '.hdf5', base_output_name,
ncols=ncols, interval=interval)
if __name__ == '__main__':
process_elephant_dreams(360, interval=(600, 720), ncols=6)
process_elephant_dreams(360, interval=(600, 720), ncols=9)
process_elephant_dreams(360, interval=(600, 720), ncols=15)
# These two tests take a LONG time to run
process_elephant_dreams(360)
process_elephant_dreams(1080)
plt.show() | bsd-2-clause |
sketchytechky/pyfolio | setup.py | 5 | 2484 | #!/usr/bin/env python
from setuptools import setup
import sys
DISTNAME = 'pyfolio'
DESCRIPTION = "pyfolio is a Python library for performance and risk analysis of financial portfolios"
LONG_DESCRIPTION = """pyfolio is a Python library for performance and risk analysis of
financial portfolios developed by `Quantopian Inc`_. It works well with the
`Zipline`_ open source backtesting library.
At the core of pyfolio is a so-called tear sheet that consists of
various individual plots that provide a comprehensive performance
overview of a portfolio.
.. _Quantopian Inc: https://www.quantopian.com
.. _Zipline: http://zipline.io
"""
MAINTAINER = 'Quantopian Inc'
MAINTAINER_EMAIL = 'opensource@quantopian.com'
AUTHOR = 'Quantopian Inc'
AUTHOR_EMAIL = 'opensource@quantopian.com'
URL = "https://github.com/quantopian/pyfolio"
LICENSE = "Apache License, Version 2.0"
VERSION = "0.1"
classifiers = ['Development Status :: 4 - Beta',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.4',
'License :: OSI Approved :: Apache Software License',
'Intended Audience :: Science/Research',
'Topic :: Scientific/Engineering',
'Topic :: Scientific/Engineering :: Mathematics',
'Operating System :: OS Independent']
install_reqs = [
'funcsigs>=0.4',
'matplotlib>=1.4.0',
'mock>=1.1.2',
'numpy>=1.9.1',
'pandas>=0.15.0',
'pyparsing>=2.0.3',
'python-dateutil>=2.4.2',
'pytz>=2014.10',
'scikit-learn>=0.15.0',
'scipy>=0.14.0',
'seaborn>=0.6.0',
'statsmodels>=0.5.0']
extras_reqs = {
'bayesian': ['pymc3']
}
test_reqs = ['nose>=1.3.7', 'nose-parameterized>=0.5.0', 'runipy>=0.1.3']
if __name__ == "__main__":
setup(name=DISTNAME,
version=VERSION,
maintainer=MAINTAINER,
maintainer_email=MAINTAINER_EMAIL,
description=DESCRIPTION,
license=LICENSE,
url=URL,
long_description=LONG_DESCRIPTION,
packages=['pyfolio', 'pyfolio.tests'],
package_data={'pyfolio': ['data/*.*']},
classifiers=classifiers,
install_requires=install_reqs,
extras_requires=extras_reqs,
tests_require=test_reqs,
test_suite='nose.collector')
| apache-2.0 |
mjescobar/RF_Estimation | Clustering/helpers/processClusters/processClusters.py | 2 | 4623 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# processClusters.py
#
# Copyright 2015 Monica Otero <monicaot2011@gmail.com>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
# MA 02110-1301, USA.
#
#
import sys,os
sys.path.append(os.path.join(os.path.dirname(__file__), '../../..','LIB'))
import rfestimationLib as rfe #Some custom functions
import argparse #argument parsing
#import matplotlib
#matplotlib.use('Agg')
from matplotlib import pyplot as plt
from numpy import loadtxt
from numpy import shape
from numpy import histogram
from numpy import amax
from numpy import amin
from numpy import append
from numpy import zeros
from numpy import empty
#Input file format
# 0-19 Timestamps
# aRadius
# bRadius
# angle
# xCoordinate
# yCoordinate
# area
# clusterId
# peakTime
def loadClusterFile(sourceFile):
data = loadtxt(sourceFile, delimiter=',')
return data
def main():
parser = argparse.ArgumentParser(prog='processClusters.py',
description='Plot units from clustering',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('--sourceFile',
help='Source file containing the units and its data',
type=str, required=True)
parser.add_argument('--outputFolder',
help='Output folder',
type=str, required=True)
args = parser.parse_args()
#Source file of the units
sourceFile = args.sourceFile
if not os.path.exists(sourceFile):
print ''
print 'Source file does not exists ' + sourceFile
print ''
sys.exit()
#Output folder for the graphics
outputFolder = rfe.fixPath(args.outputFolder)
if not os.path.exists(outputFolder):
try:
os.makedirs(outputFolder)
except:
print ''
print 'Unable to create folder ' + outputFolder
print ''
sys.exit()
Units = loadClusterFile(sourceFile)
#Slow, Medium, Fast ?
peaks = Units[:,27]
hist,edges = histogram(peaks,bins=3)
print 'hist',hist
print 'edges',edges
slowMinimum = edges[0]
slowMaximum = edges[1]
mediumMinimum = edges[1]
mediumMaximum = edges[2]
fastMinimum = edges[2]
fastMaximum = edges[3]
slowUnits = empty([1, 28])
mediumUnits = empty([1, 28])
fastUnits = empty([1, 28])
for unitId in range(len(Units)):
if Units[unitId,27] < slowMaximum:
slowUnits = append(slowUnits,Units[unitId].reshape(1, 28), axis=0)
elif Units[unitId,27] < mediumMaximum:
mediumUnits = append(mediumUnits,Units[unitId].reshape(1, 28), axis=0)
else:
fastUnits = append(fastUnits,Units[unitId].reshape(1, 28), axis=0)
slowUnits = slowUnits[1:,:]
mediumUnits = mediumUnits[1:,:]
fastUnits = fastUnits[1:,:]
print 'slows',shape(slowUnits)
print 'mediums',shape(mediumUnits)
print 'fasts',shape(fastUnits)
print 'Units',shape(Units)
areaTotal = Units[:,25]
areaSlows = slowUnits[:,25]
areaMediums = mediumUnits[:,25]
areaFasts = fastUnits[:,25]
plt.hist(areaTotal, bins=2, histtype='stepfilled', normed=True, color='b', alpha=0.2, label='Total')
plt.hist(areaSlows, bins=2, histtype='stepfilled', normed=True, color='r', alpha=0.4, label='Slows')
plt.title("Total/Slows")
plt.xlabel("Area")
plt.ylabel("Y Value")
plt.legend()
plt.savefig(outputFolder+'slows.png', bbox_inches='tight')
plt.close()
plt.hist(areaTotal, bins=2, histtype='stepfilled', normed=True, color='b', alpha=0.2, label='Total')
plt.hist(areaMediums, bins=2, histtype='stepfilled', normed=True, color='r', alpha=0.4, label='Mediums')
plt.title("Total/Medium")
plt.xlabel("Area")
plt.ylabel("Y Value")
plt.legend()
plt.savefig(outputFolder+'mediums.png', bbox_inches='tight')
plt.close()
plt.hist(areaTotal, bins=2, histtype='stepfilled', normed=True, color='b', alpha=0.2, label='Total')
plt.hist(areaFasts, bins=2, histtype='stepfilled', normed=True, color='r', alpha=0.4, label='Fasts')
plt.title("Total/Fast")
plt.xlabel("Area")
plt.ylabel("Y Value")
plt.legend()
plt.savefig(outputFolder+'fasts.png', bbox_inches='tight')
plt.close()
return 0
if __name__ == '__main__':
main()
| gpl-2.0 |
jaimahajan1997/sympy | sympy/interactive/printing.py | 19 | 16398 | """Tools for setting up printing in interactive sessions. """
from __future__ import print_function, division
import sys
from distutils.version import LooseVersion as V
from io import BytesIO
from sympy import latex as default_latex
from sympy import preview
from sympy.core.compatibility import integer_types
from sympy.utilities.misc import debug
def _init_python_printing(stringify_func, **settings):
"""Setup printing in Python interactive session. """
import sys
from sympy.core.compatibility import builtins
def _displayhook(arg):
"""Python's pretty-printer display hook.
This function was adapted from:
http://www.python.org/dev/peps/pep-0217/
"""
if arg is not None:
builtins._ = None
print(stringify_func(arg, **settings))
builtins._ = arg
sys.displayhook = _displayhook
def _init_ipython_printing(ip, stringify_func, use_latex, euler, forecolor,
backcolor, fontsize, latex_mode, print_builtin,
latex_printer, **settings):
"""Setup printing in IPython interactive session. """
try:
from IPython.lib.latextools import latex_to_png
except ImportError:
pass
preamble = "\\documentclass[%s]{article}\n" \
"\\pagestyle{empty}\n" \
"\\usepackage{amsmath,amsfonts}%s\\begin{document}"
if euler:
addpackages = '\\usepackage{euler}'
else:
addpackages = ''
preamble = preamble % (fontsize, addpackages)
imagesize = 'tight'
offset = "0cm,0cm"
resolution = 150
dvi = r"-T %s -D %d -bg %s -fg %s -O %s" % (
imagesize, resolution, backcolor, forecolor, offset)
dvioptions = dvi.split()
debug("init_printing: DVIOPTIONS:", dvioptions)
debug("init_printing: PREAMBLE:", preamble)
latex = latex_printer or default_latex
def _print_plain(arg, p, cycle):
"""caller for pretty, for use in IPython 0.11"""
if _can_print_latex(arg):
p.text(stringify_func(arg))
else:
p.text(IPython.lib.pretty.pretty(arg))
def _preview_wrapper(o):
exprbuffer = BytesIO()
try:
preview(o, output='png', viewer='BytesIO',
outputbuffer=exprbuffer, preamble=preamble,
dvioptions=dvioptions)
except Exception as e:
# IPython swallows exceptions
debug("png printing:", "_preview_wrapper exception raised:",
repr(e))
raise
return exprbuffer.getvalue()
def _matplotlib_wrapper(o):
# mathtext does not understand certain latex flags, so we try to
# replace them with suitable subs
o = o.replace(r'\operatorname', '')
o = o.replace(r'\overline', r'\bar')
# mathtext can't render some LaTeX commands. For example, it can't
# render any LaTeX environments such as array or matrix. So here we
# ensure that if mathtext fails to render, we return None.
try:
return latex_to_png(o)
except ValueError as e:
debug('matplotlib exception caught:', repr(e))
return None
def _can_print_latex(o):
"""Return True if type o can be printed with LaTeX.
If o is a container type, this is True if and only if every element of
o can be printed with LaTeX.
"""
from sympy import Basic
from sympy.matrices import MatrixBase
from sympy.physics.vector import Vector, Dyadic
if isinstance(o, (list, tuple, set, frozenset)):
return all(_can_print_latex(i) for i in o)
elif isinstance(o, dict):
return all(_can_print_latex(i) and _can_print_latex(o[i]) for i in o)
elif isinstance(o, bool):
return False
# TODO : Investigate if "elif hasattr(o, '_latex')" is more useful
# to use here, than these explicit imports.
elif isinstance(o, (Basic, MatrixBase, Vector, Dyadic)):
return True
elif isinstance(o, (float, integer_types)) and print_builtin:
return True
return False
def _print_latex_png(o):
"""
A function that returns a png rendered by an external latex
distribution, falling back to matplotlib rendering
"""
if _can_print_latex(o):
s = latex(o, mode=latex_mode, **settings)
try:
return _preview_wrapper(s)
except RuntimeError as e:
debug('preview failed with:', repr(e),
' Falling back to matplotlib backend')
if latex_mode != 'inline':
s = latex(o, mode='inline', **settings)
return _matplotlib_wrapper(s)
def _print_latex_matplotlib(o):
"""
A function that returns a png rendered by mathtext
"""
if _can_print_latex(o):
s = latex(o, mode='inline', **settings)
return _matplotlib_wrapper(s)
def _print_latex_text(o):
"""
A function to generate the latex representation of sympy expressions.
"""
if _can_print_latex(o):
s = latex(o, mode='plain', **settings)
s = s.replace(r'\dag', r'\dagger')
s = s.strip('$')
return '$$%s$$' % s
def _result_display(self, arg):
"""IPython's pretty-printer display hook, for use in IPython 0.10
This function was adapted from:
ipython/IPython/hooks.py:155
"""
if self.rc.pprint:
out = stringify_func(arg)
if '\n' in out:
print
print(out)
else:
print(repr(arg))
import IPython
if V(IPython.__version__) >= '0.11':
from sympy.core.basic import Basic
from sympy.matrices.matrices import MatrixBase
from sympy.physics.vector import Vector, Dyadic
printable_types = [Basic, MatrixBase, float, tuple, list, set,
frozenset, dict, Vector, Dyadic] + list(integer_types)
plaintext_formatter = ip.display_formatter.formatters['text/plain']
for cls in printable_types:
plaintext_formatter.for_type(cls, _print_plain)
png_formatter = ip.display_formatter.formatters['image/png']
if use_latex in (True, 'png'):
debug("init_printing: using png formatter")
for cls in printable_types:
png_formatter.for_type(cls, _print_latex_png)
elif use_latex == 'matplotlib':
debug("init_printing: using matplotlib formatter")
for cls in printable_types:
png_formatter.for_type(cls, _print_latex_matplotlib)
else:
debug("init_printing: not using any png formatter")
for cls in printable_types:
# Better way to set this, but currently does not work in IPython
#png_formatter.for_type(cls, None)
if cls in png_formatter.type_printers:
png_formatter.type_printers.pop(cls)
latex_formatter = ip.display_formatter.formatters['text/latex']
if use_latex in (True, 'mathjax'):
debug("init_printing: using mathjax formatter")
for cls in printable_types:
latex_formatter.for_type(cls, _print_latex_text)
else:
debug("init_printing: not using text/latex formatter")
for cls in printable_types:
# Better way to set this, but currently does not work in IPython
#latex_formatter.for_type(cls, None)
if cls in latex_formatter.type_printers:
latex_formatter.type_printers.pop(cls)
else:
ip.set_hook('result_display', _result_display)
def _is_ipython(shell):
"""Is a shell instance an IPython shell?"""
# shortcut, so we don't import IPython if we don't have to
if 'IPython' not in sys.modules:
return False
try:
from IPython.core.interactiveshell import InteractiveShell
except ImportError:
# IPython < 0.11
try:
from IPython.iplib import InteractiveShell
except ImportError:
# Reaching this points means IPython has changed in a backward-incompatible way
# that we don't know about. Warn?
return False
return isinstance(shell, InteractiveShell)
def init_printing(pretty_print=True, order=None, use_unicode=None,
use_latex=None, wrap_line=None, num_columns=None,
no_global=False, ip=None, euler=False, forecolor='Black',
backcolor='Transparent', fontsize='10pt',
latex_mode='equation*', print_builtin=True,
str_printer=None, pretty_printer=None,
latex_printer=None, **settings):
"""
Initializes pretty-printer depending on the environment.
Parameters
==========
pretty_print: boolean
If True, use pretty_print to stringify or the provided pretty
printer; if False, use sstrrepr to stringify or the provided string
printer.
order: string or None
There are a few different settings for this parameter:
lex (default), which is lexographic order;
grlex, which is graded lexographic order;
grevlex, which is reversed graded lexographic order;
old, which is used for compatibility reasons and for long expressions;
None, which sets it to lex.
use_unicode: boolean or None
If True, use unicode characters;
if False, do not use unicode characters.
use_latex: string, boolean, or None
If True, use default latex rendering in GUI interfaces (png and
mathjax);
if False, do not use latex rendering;
if 'png', enable latex rendering with an external latex compiler,
falling back to matplotlib if external compilation fails;
if 'matplotlib', enable latex rendering with matplotlib;
if 'mathjax', enable latex text generation, for example MathJax
rendering in IPython notebook or text rendering in LaTeX documents
wrap_line: boolean
If True, lines will wrap at the end; if False, they will not wrap
but continue as one line. This is only relevant if `pretty_print` is
True.
num_columns: int or None
If int, number of columns before wrapping is set to num_columns; if
None, number of columns before wrapping is set to terminal width.
This is only relevant if `pretty_print` is True.
no_global: boolean
If True, the settings become system wide;
if False, use just for this console/session.
ip: An interactive console
This can either be an instance of IPython,
or a class that derives from code.InteractiveConsole.
euler: boolean, optional, default=False
Loads the euler package in the LaTeX preamble for handwritten style
fonts (http://www.ctan.org/pkg/euler).
forecolor: string, optional, default='Black'
DVI setting for foreground color.
backcolor: string, optional, default='Transparent'
DVI setting for background color.
fontsize: string, optional, default='10pt'
A font size to pass to the LaTeX documentclass function in the
preamble.
latex_mode: string, optional, default='equation*'
The mode used in the LaTeX printer. Can be one of:
{'inline'|'plain'|'equation'|'equation*'}.
print_builtin: boolean, optional, default=True
If true then floats and integers will be printed. If false the
printer will only print SymPy types.
str_printer: function, optional, default=None
A custom string printer function. This should mimic
sympy.printing.sstrrepr().
pretty_printer: function, optional, default=None
A custom pretty printer. This should mimic sympy.printing.pretty().
latex_printer: function, optional, default=None
A custom LaTeX printer. This should mimic sympy.printing.latex().
Examples
========
>>> from sympy.interactive import init_printing
>>> from sympy import Symbol, sqrt
>>> from sympy.abc import x, y
>>> sqrt(5)
sqrt(5)
>>> init_printing(pretty_print=True) # doctest: +SKIP
>>> sqrt(5) # doctest: +SKIP
___
\/ 5
>>> theta = Symbol('theta') # doctest: +SKIP
>>> init_printing(use_unicode=True) # doctest: +SKIP
>>> theta # doctest: +SKIP
\u03b8
>>> init_printing(use_unicode=False) # doctest: +SKIP
>>> theta # doctest: +SKIP
theta
>>> init_printing(order='lex') # doctest: +SKIP
>>> str(y + x + y**2 + x**2) # doctest: +SKIP
x**2 + x + y**2 + y
>>> init_printing(order='grlex') # doctest: +SKIP
>>> str(y + x + y**2 + x**2) # doctest: +SKIP
x**2 + x + y**2 + y
>>> init_printing(order='grevlex') # doctest: +SKIP
>>> str(y * x**2 + x * y**2) # doctest: +SKIP
x**2*y + x*y**2
>>> init_printing(order='old') # doctest: +SKIP
>>> str(x**2 + y**2 + x + y) # doctest: +SKIP
x**2 + x + y**2 + y
>>> init_printing(num_columns=10) # doctest: +SKIP
>>> x**2 + x + y**2 + y # doctest: +SKIP
x + y +
x**2 + y**2
"""
import sys
from sympy.printing.printer import Printer
if pretty_print:
if pretty_printer is not None:
stringify_func = pretty_printer
else:
from sympy.printing import pretty as stringify_func
else:
if str_printer is not None:
stringify_func = str_printer
else:
from sympy.printing import sstrrepr as stringify_func
# Even if ip is not passed, double check that not in IPython shell
in_ipython = False
if ip is None:
try:
ip = get_ipython()
except NameError:
pass
else:
in_ipython = (ip is not None)
if ip and not in_ipython:
in_ipython = _is_ipython(ip)
if in_ipython and pretty_print:
try:
import IPython
# IPython 1.0 deprecates the frontend module, so we import directly
# from the terminal module to prevent a deprecation message from being
# shown.
if V(IPython.__version__) >= '1.0':
from IPython.terminal.interactiveshell import TerminalInteractiveShell
else:
from IPython.frontend.terminal.interactiveshell import TerminalInteractiveShell
from code import InteractiveConsole
except ImportError:
pass
else:
# This will be True if we are in the qtconsole or notebook
if not isinstance(ip, (InteractiveConsole, TerminalInteractiveShell)) \
and 'ipython-console' not in ''.join(sys.argv):
if use_unicode is None:
debug("init_printing: Setting use_unicode to True")
use_unicode = True
if use_latex is None:
debug("init_printing: Setting use_latex to True")
use_latex = True
if not no_global:
Printer.set_global_settings(order=order, use_unicode=use_unicode,
wrap_line=wrap_line, num_columns=num_columns)
else:
_stringify_func = stringify_func
if pretty_print:
stringify_func = lambda expr: \
_stringify_func(expr, order=order,
use_unicode=use_unicode,
wrap_line=wrap_line,
num_columns=num_columns)
else:
stringify_func = lambda expr: _stringify_func(expr, order=order)
if in_ipython:
mode_in_settings = settings.pop("mode", None)
if mode_in_settings:
debug("init_printing: Mode is not able to be set due to internals"
"of IPython printing")
_init_ipython_printing(ip, stringify_func, use_latex, euler,
forecolor, backcolor, fontsize, latex_mode,
print_builtin, latex_printer, **settings)
else:
_init_python_printing(stringify_func, **settings)
| bsd-3-clause |
anirudhjayaraman/scikit-learn | sklearn/tests/test_random_projection.py | 79 | 14035 | from __future__ import division
import numpy as np
import scipy.sparse as sp
from sklearn.metrics import euclidean_distances
from sklearn.random_projection import johnson_lindenstrauss_min_dim
from sklearn.random_projection import gaussian_random_matrix
from sklearn.random_projection import sparse_random_matrix
from sklearn.random_projection import SparseRandomProjection
from sklearn.random_projection import GaussianRandomProjection
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_raise_message
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_in
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_warns
from sklearn.utils import DataDimensionalityWarning
all_sparse_random_matrix = [sparse_random_matrix]
all_dense_random_matrix = [gaussian_random_matrix]
all_random_matrix = set(all_sparse_random_matrix + all_dense_random_matrix)
all_SparseRandomProjection = [SparseRandomProjection]
all_DenseRandomProjection = [GaussianRandomProjection]
all_RandomProjection = set(all_SparseRandomProjection +
all_DenseRandomProjection)
# Make some random data with uniformly located non zero entries with
# Gaussian distributed values
def make_sparse_random_data(n_samples, n_features, n_nonzeros):
rng = np.random.RandomState(0)
data_coo = sp.coo_matrix(
(rng.randn(n_nonzeros),
(rng.randint(n_samples, size=n_nonzeros),
rng.randint(n_features, size=n_nonzeros))),
shape=(n_samples, n_features))
return data_coo.toarray(), data_coo.tocsr()
def densify(matrix):
if not sp.issparse(matrix):
return matrix
else:
return matrix.toarray()
n_samples, n_features = (10, 1000)
n_nonzeros = int(n_samples * n_features / 100.)
data, data_csr = make_sparse_random_data(n_samples, n_features, n_nonzeros)
###############################################################################
# test on JL lemma
###############################################################################
def test_invalid_jl_domain():
assert_raises(ValueError, johnson_lindenstrauss_min_dim, 100, 1.1)
assert_raises(ValueError, johnson_lindenstrauss_min_dim, 100, 0.0)
assert_raises(ValueError, johnson_lindenstrauss_min_dim, 100, -0.1)
assert_raises(ValueError, johnson_lindenstrauss_min_dim, 0, 0.5)
def test_input_size_jl_min_dim():
assert_raises(ValueError, johnson_lindenstrauss_min_dim,
3 * [100], 2 * [0.9])
assert_raises(ValueError, johnson_lindenstrauss_min_dim, 3 * [100],
2 * [0.9])
johnson_lindenstrauss_min_dim(np.random.randint(1, 10, size=(10, 10)),
0.5 * np.ones((10, 10)))
###############################################################################
# tests random matrix generation
###############################################################################
def check_input_size_random_matrix(random_matrix):
assert_raises(ValueError, random_matrix, 0, 0)
assert_raises(ValueError, random_matrix, -1, 1)
assert_raises(ValueError, random_matrix, 1, -1)
assert_raises(ValueError, random_matrix, 1, 0)
assert_raises(ValueError, random_matrix, -1, 0)
def check_size_generated(random_matrix):
assert_equal(random_matrix(1, 5).shape, (1, 5))
assert_equal(random_matrix(5, 1).shape, (5, 1))
assert_equal(random_matrix(5, 5).shape, (5, 5))
assert_equal(random_matrix(1, 1).shape, (1, 1))
def check_zero_mean_and_unit_norm(random_matrix):
# All random matrix should produce a transformation matrix
# with zero mean and unit norm for each columns
A = densify(random_matrix(10000, 1, random_state=0))
assert_array_almost_equal(0, np.mean(A), 3)
assert_array_almost_equal(1.0, np.linalg.norm(A), 1)
def check_input_with_sparse_random_matrix(random_matrix):
n_components, n_features = 5, 10
for density in [-1., 0.0, 1.1]:
assert_raises(ValueError,
random_matrix, n_components, n_features, density=density)
def test_basic_property_of_random_matrix():
# Check basic properties of random matrix generation
for random_matrix in all_random_matrix:
yield check_input_size_random_matrix, random_matrix
yield check_size_generated, random_matrix
yield check_zero_mean_and_unit_norm, random_matrix
for random_matrix in all_sparse_random_matrix:
yield check_input_with_sparse_random_matrix, random_matrix
random_matrix_dense = \
lambda n_components, n_features, random_state: random_matrix(
n_components, n_features, random_state=random_state,
density=1.0)
yield check_zero_mean_and_unit_norm, random_matrix_dense
def test_gaussian_random_matrix():
# Check some statical properties of Gaussian random matrix
# Check that the random matrix follow the proper distribution.
# Let's say that each element of a_{ij} of A is taken from
# a_ij ~ N(0.0, 1 / n_components).
#
n_components = 100
n_features = 1000
A = gaussian_random_matrix(n_components, n_features, random_state=0)
assert_array_almost_equal(0.0, np.mean(A), 2)
assert_array_almost_equal(np.var(A, ddof=1), 1 / n_components, 1)
def test_sparse_random_matrix():
# Check some statical properties of sparse random matrix
n_components = 100
n_features = 500
for density in [0.3, 1.]:
s = 1 / density
A = sparse_random_matrix(n_components,
n_features,
density=density,
random_state=0)
A = densify(A)
# Check possible values
values = np.unique(A)
assert_in(np.sqrt(s) / np.sqrt(n_components), values)
assert_in(- np.sqrt(s) / np.sqrt(n_components), values)
if density == 1.0:
assert_equal(np.size(values), 2)
else:
assert_in(0., values)
assert_equal(np.size(values), 3)
# Check that the random matrix follow the proper distribution.
# Let's say that each element of a_{ij} of A is taken from
#
# - -sqrt(s) / sqrt(n_components) with probability 1 / 2s
# - 0 with probability 1 - 1 / s
# - +sqrt(s) / sqrt(n_components) with probability 1 / 2s
#
assert_almost_equal(np.mean(A == 0.0),
1 - 1 / s, decimal=2)
assert_almost_equal(np.mean(A == np.sqrt(s) / np.sqrt(n_components)),
1 / (2 * s), decimal=2)
assert_almost_equal(np.mean(A == - np.sqrt(s) / np.sqrt(n_components)),
1 / (2 * s), decimal=2)
assert_almost_equal(np.var(A == 0.0, ddof=1),
(1 - 1 / s) * 1 / s, decimal=2)
assert_almost_equal(np.var(A == np.sqrt(s) / np.sqrt(n_components),
ddof=1),
(1 - 1 / (2 * s)) * 1 / (2 * s), decimal=2)
assert_almost_equal(np.var(A == - np.sqrt(s) / np.sqrt(n_components),
ddof=1),
(1 - 1 / (2 * s)) * 1 / (2 * s), decimal=2)
###############################################################################
# tests on random projection transformer
###############################################################################
def test_sparse_random_projection_transformer_invalid_density():
for RandomProjection in all_SparseRandomProjection:
assert_raises(ValueError,
RandomProjection(density=1.1).fit, data)
assert_raises(ValueError,
RandomProjection(density=0).fit, data)
assert_raises(ValueError,
RandomProjection(density=-0.1).fit, data)
def test_random_projection_transformer_invalid_input():
for RandomProjection in all_RandomProjection:
assert_raises(ValueError,
RandomProjection(n_components='auto').fit, [[0, 1, 2]])
assert_raises(ValueError,
RandomProjection(n_components=-10).fit, data)
def test_try_to_transform_before_fit():
for RandomProjection in all_RandomProjection:
assert_raises(ValueError,
RandomProjection(n_components='auto').transform, data)
def test_too_many_samples_to_find_a_safe_embedding():
data, _ = make_sparse_random_data(1000, 100, 1000)
for RandomProjection in all_RandomProjection:
rp = RandomProjection(n_components='auto', eps=0.1)
expected_msg = (
'eps=0.100000 and n_samples=1000 lead to a target dimension'
' of 5920 which is larger than the original space with'
' n_features=100')
assert_raise_message(ValueError, expected_msg, rp.fit, data)
def test_random_projection_embedding_quality():
data, _ = make_sparse_random_data(8, 5000, 15000)
eps = 0.2
original_distances = euclidean_distances(data, squared=True)
original_distances = original_distances.ravel()
non_identical = original_distances != 0.0
# remove 0 distances to avoid division by 0
original_distances = original_distances[non_identical]
for RandomProjection in all_RandomProjection:
rp = RandomProjection(n_components='auto', eps=eps, random_state=0)
projected = rp.fit_transform(data)
projected_distances = euclidean_distances(projected, squared=True)
projected_distances = projected_distances.ravel()
# remove 0 distances to avoid division by 0
projected_distances = projected_distances[non_identical]
distances_ratio = projected_distances / original_distances
# check that the automatically tuned values for the density respect the
# contract for eps: pairwise distances are preserved according to the
# Johnson-Lindenstrauss lemma
assert_less(distances_ratio.max(), 1 + eps)
assert_less(1 - eps, distances_ratio.min())
def test_SparseRandomProjection_output_representation():
for SparseRandomProjection in all_SparseRandomProjection:
# when using sparse input, the projected data can be forced to be a
# dense numpy array
rp = SparseRandomProjection(n_components=10, dense_output=True,
random_state=0)
rp.fit(data)
assert isinstance(rp.transform(data), np.ndarray)
sparse_data = sp.csr_matrix(data)
assert isinstance(rp.transform(sparse_data), np.ndarray)
# the output can be left to a sparse matrix instead
rp = SparseRandomProjection(n_components=10, dense_output=False,
random_state=0)
rp = rp.fit(data)
# output for dense input will stay dense:
assert isinstance(rp.transform(data), np.ndarray)
# output for sparse output will be sparse:
assert sp.issparse(rp.transform(sparse_data))
def test_correct_RandomProjection_dimensions_embedding():
for RandomProjection in all_RandomProjection:
rp = RandomProjection(n_components='auto',
random_state=0,
eps=0.5).fit(data)
# the number of components is adjusted from the shape of the training
# set
assert_equal(rp.n_components, 'auto')
assert_equal(rp.n_components_, 110)
if RandomProjection in all_SparseRandomProjection:
assert_equal(rp.density, 'auto')
assert_almost_equal(rp.density_, 0.03, 2)
assert_equal(rp.components_.shape, (110, n_features))
projected_1 = rp.transform(data)
assert_equal(projected_1.shape, (n_samples, 110))
# once the RP is 'fitted' the projection is always the same
projected_2 = rp.transform(data)
assert_array_equal(projected_1, projected_2)
# fit transform with same random seed will lead to the same results
rp2 = RandomProjection(random_state=0, eps=0.5)
projected_3 = rp2.fit_transform(data)
assert_array_equal(projected_1, projected_3)
# Try to transform with an input X of size different from fitted.
assert_raises(ValueError, rp.transform, data[:, 1:5])
# it is also possible to fix the number of components and the density
# level
if RandomProjection in all_SparseRandomProjection:
rp = RandomProjection(n_components=100, density=0.001,
random_state=0)
projected = rp.fit_transform(data)
assert_equal(projected.shape, (n_samples, 100))
assert_equal(rp.components_.shape, (100, n_features))
assert_less(rp.components_.nnz, 115) # close to 1% density
assert_less(85, rp.components_.nnz) # close to 1% density
def test_warning_n_components_greater_than_n_features():
n_features = 20
data, _ = make_sparse_random_data(5, n_features, int(n_features / 4))
for RandomProjection in all_RandomProjection:
assert_warns(DataDimensionalityWarning,
RandomProjection(n_components=n_features + 1).fit, data)
def test_works_with_sparse_data():
n_features = 20
data, _ = make_sparse_random_data(5, n_features, int(n_features / 4))
for RandomProjection in all_RandomProjection:
rp_dense = RandomProjection(n_components=3,
random_state=1).fit(data)
rp_sparse = RandomProjection(n_components=3,
random_state=1).fit(sp.csr_matrix(data))
assert_array_almost_equal(densify(rp_dense.components_),
densify(rp_sparse.components_))
| bsd-3-clause |
sonnyhu/scikit-learn | sklearn/ensemble/tests/test_iforest.py | 1 | 6932 | """
Testing for Isolation Forest algorithm (sklearn.ensemble.iforest).
"""
# Authors: Nicolas Goix <nicolas.goix@telecom-paristech.fr>
# Alexandre Gramfort <alexandre.gramfort@telecom-paristech.fr>
# License: BSD 3 clause
import numpy as np
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_warns_message
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_no_warnings
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import ignore_warnings
from sklearn.model_selection import ParameterGrid
from sklearn.ensemble import IsolationForest
from sklearn.model_selection import train_test_split
from sklearn.datasets import load_boston, load_iris
from sklearn.utils import check_random_state
from sklearn.metrics import roc_auc_score
from scipy.sparse import csc_matrix, csr_matrix
rng = check_random_state(0)
# load the iris dataset
# and randomly permute it
iris = load_iris()
perm = rng.permutation(iris.target.size)
iris.data = iris.data[perm]
iris.target = iris.target[perm]
# also load the boston dataset
# and randomly permute it
boston = load_boston()
perm = rng.permutation(boston.target.size)
boston.data = boston.data[perm]
boston.target = boston.target[perm]
def test_iforest():
"""Check Isolation Forest for various parameter settings."""
X_train = np.array([[0, 1], [1, 2]])
X_test = np.array([[2, 1], [1, 1]])
grid = ParameterGrid({"n_estimators": [3],
"max_samples": [0.5, 1.0, 3],
"bootstrap": [True, False]})
with ignore_warnings():
for params in grid:
IsolationForest(random_state=rng,
**params).fit(X_train).predict(X_test)
def test_iforest_sparse():
"""Check IForest for various parameter settings on sparse input."""
rng = check_random_state(0)
X_train, X_test, y_train, y_test = train_test_split(boston.data[:50],
boston.target[:50],
random_state=rng)
grid = ParameterGrid({"max_samples": [0.5, 1.0],
"bootstrap": [True, False]})
for sparse_format in [csc_matrix, csr_matrix]:
X_train_sparse = sparse_format(X_train)
X_test_sparse = sparse_format(X_test)
for params in grid:
# Trained on sparse format
sparse_classifier = IsolationForest(
n_estimators=10, random_state=1, **params).fit(X_train_sparse)
sparse_results = sparse_classifier.predict(X_test_sparse)
# Trained on dense format
dense_classifier = IsolationForest(
n_estimators=10, random_state=1, **params).fit(X_train)
dense_results = dense_classifier.predict(X_test)
assert_array_equal(sparse_results, dense_results)
assert_array_equal(sparse_results, dense_results)
def test_iforest_error():
"""Test that it gives proper exception on deficient input."""
X = iris.data
# Test max_samples
assert_raises(ValueError,
IsolationForest(max_samples=-1).fit, X)
assert_raises(ValueError,
IsolationForest(max_samples=0.0).fit, X)
assert_raises(ValueError,
IsolationForest(max_samples=2.0).fit, X)
# The dataset has less than 256 samples, explicitly setting
# max_samples > n_samples should result in a warning. If not set
# explicitly there should be no warning
assert_warns_message(UserWarning,
"max_samples will be set to n_samples for estimation",
IsolationForest(max_samples=1000).fit, X)
assert_no_warnings(IsolationForest(max_samples='auto').fit, X)
assert_no_warnings(IsolationForest(max_samples=np.int64(2)).fit, X)
assert_raises(ValueError, IsolationForest(max_samples='foobar').fit, X)
assert_raises(ValueError, IsolationForest(max_samples=1.5).fit, X)
def test_recalculate_max_depth():
"""Check max_depth recalculation when max_samples is reset to n_samples"""
X = iris.data
clf = IsolationForest().fit(X)
for est in clf.estimators_:
assert_equal(est.max_depth, int(np.ceil(np.log2(X.shape[0]))))
def test_max_samples_attribute():
X = iris.data
clf = IsolationForest().fit(X)
assert_equal(clf.max_samples_, X.shape[0])
clf = IsolationForest(max_samples=500)
assert_warns_message(UserWarning,
"max_samples will be set to n_samples for estimation",
clf.fit, X)
assert_equal(clf.max_samples_, X.shape[0])
clf = IsolationForest(max_samples=0.4).fit(X)
assert_equal(clf.max_samples_, 0.4*X.shape[0])
def test_iforest_parallel_regression():
"""Check parallel regression."""
rng = check_random_state(0)
X_train, X_test, y_train, y_test = train_test_split(boston.data,
boston.target,
random_state=rng)
ensemble = IsolationForest(n_jobs=3,
random_state=0).fit(X_train)
ensemble.set_params(n_jobs=1)
y1 = ensemble.predict(X_test)
ensemble.set_params(n_jobs=2)
y2 = ensemble.predict(X_test)
assert_array_almost_equal(y1, y2)
ensemble = IsolationForest(n_jobs=1,
random_state=0).fit(X_train)
y3 = ensemble.predict(X_test)
assert_array_almost_equal(y1, y3)
def test_iforest_performance():
"""Test Isolation Forest performs well"""
# Generate train/test data
rng = check_random_state(2)
X = 0.3 * rng.randn(120, 2)
X_train = np.r_[X + 2, X - 2]
X_train = X[:100]
# Generate some abnormal novel observations
X_outliers = rng.uniform(low=-4, high=4, size=(20, 2))
X_test = np.r_[X[100:], X_outliers]
y_test = np.array([0] * 20 + [1] * 20)
# fit the model
clf = IsolationForest(max_samples=100, random_state=rng).fit(X_train)
# predict scores (the lower, the more normal)
y_pred = - clf.decision_function(X_test)
# check that there is at most 6 errors (false positive or false negative)
assert_greater(roc_auc_score(y_test, y_pred), 0.98)
def test_iforest_works():
# toy sample (the last two samples are outliers)
X = [[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1], [6, 3], [-4, 7]]
# Test LOF
clf = IsolationForest(random_state=rng, contamination=0.25)
clf.fit(X)
decision_func = - clf.decision_function(X)
pred = clf.predict(X)
# assert detect outliers:
assert_greater(np.min(decision_func[-2:]), np.max(decision_func[:-2]))
assert_array_equal(pred, 6 * [1] + 2 * [-1])
| bsd-3-clause |
jkarnows/scikit-learn | examples/ensemble/plot_voting_probas.py | 316 | 2824 | """
===========================================================
Plot class probabilities calculated by the VotingClassifier
===========================================================
Plot the class probabilities of the first sample in a toy dataset
predicted by three different classifiers and averaged by the
`VotingClassifier`.
First, three examplary classifiers are initialized (`LogisticRegression`,
`GaussianNB`, and `RandomForestClassifier`) and used to initialize a
soft-voting `VotingClassifier` with weights `[1, 1, 5]`, which means that
the predicted probabilities of the `RandomForestClassifier` count 5 times
as much as the weights of the other classifiers when the averaged probability
is calculated.
To visualize the probability weighting, we fit each classifier on the training
set and plot the predicted class probabilities for the first sample in this
example dataset.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn.linear_model import LogisticRegression
from sklearn.naive_bayes import GaussianNB
from sklearn.ensemble import RandomForestClassifier
from sklearn.ensemble import VotingClassifier
clf1 = LogisticRegression(random_state=123)
clf2 = RandomForestClassifier(random_state=123)
clf3 = GaussianNB()
X = np.array([[-1.0, -1.0], [-1.2, -1.4], [-3.4, -2.2], [1.1, 1.2]])
y = np.array([1, 1, 2, 2])
eclf = VotingClassifier(estimators=[('lr', clf1), ('rf', clf2), ('gnb', clf3)],
voting='soft',
weights=[1, 1, 5])
# predict class probabilities for all classifiers
probas = [c.fit(X, y).predict_proba(X) for c in (clf1, clf2, clf3, eclf)]
# get class probabilities for the first sample in the dataset
class1_1 = [pr[0, 0] for pr in probas]
class2_1 = [pr[0, 1] for pr in probas]
# plotting
N = 4 # number of groups
ind = np.arange(N) # group positions
width = 0.35 # bar width
fig, ax = plt.subplots()
# bars for classifier 1-3
p1 = ax.bar(ind, np.hstack(([class1_1[:-1], [0]])), width, color='green')
p2 = ax.bar(ind + width, np.hstack(([class2_1[:-1], [0]])), width, color='lightgreen')
# bars for VotingClassifier
p3 = ax.bar(ind, [0, 0, 0, class1_1[-1]], width, color='blue')
p4 = ax.bar(ind + width, [0, 0, 0, class2_1[-1]], width, color='steelblue')
# plot annotations
plt.axvline(2.8, color='k', linestyle='dashed')
ax.set_xticks(ind + width)
ax.set_xticklabels(['LogisticRegression\nweight 1',
'GaussianNB\nweight 1',
'RandomForestClassifier\nweight 5',
'VotingClassifier\n(average probabilities)'],
rotation=40,
ha='right')
plt.ylim([0, 1])
plt.title('Class probabilities for sample 1 by different classifiers')
plt.legend([p1[0], p2[0]], ['class 1', 'class 2'], loc='upper left')
plt.show()
| bsd-3-clause |
krez13/scikit-learn | examples/datasets/plot_random_multilabel_dataset.py | 278 | 3402 | """
==============================================
Plot randomly generated multilabel dataset
==============================================
This illustrates the `datasets.make_multilabel_classification` dataset
generator. Each sample consists of counts of two features (up to 50 in
total), which are differently distributed in each of two classes.
Points are labeled as follows, where Y means the class is present:
===== ===== ===== ======
1 2 3 Color
===== ===== ===== ======
Y N N Red
N Y N Blue
N N Y Yellow
Y Y N Purple
Y N Y Orange
Y Y N Green
Y Y Y Brown
===== ===== ===== ======
A star marks the expected sample for each class; its size reflects the
probability of selecting that class label.
The left and right examples highlight the ``n_labels`` parameter:
more of the samples in the right plot have 2 or 3 labels.
Note that this two-dimensional example is very degenerate:
generally the number of features would be much greater than the
"document length", while here we have much larger documents than vocabulary.
Similarly, with ``n_classes > n_features``, it is much less likely that a
feature distinguishes a particular class.
"""
from __future__ import print_function
import numpy as np
import matplotlib.pyplot as plt
from sklearn.datasets import make_multilabel_classification as make_ml_clf
print(__doc__)
COLORS = np.array(['!',
'#FF3333', # red
'#0198E1', # blue
'#BF5FFF', # purple
'#FCD116', # yellow
'#FF7216', # orange
'#4DBD33', # green
'#87421F' # brown
])
# Use same random seed for multiple calls to make_multilabel_classification to
# ensure same distributions
RANDOM_SEED = np.random.randint(2 ** 10)
def plot_2d(ax, n_labels=1, n_classes=3, length=50):
X, Y, p_c, p_w_c = make_ml_clf(n_samples=150, n_features=2,
n_classes=n_classes, n_labels=n_labels,
length=length, allow_unlabeled=False,
return_distributions=True,
random_state=RANDOM_SEED)
ax.scatter(X[:, 0], X[:, 1], color=COLORS.take((Y * [1, 2, 4]
).sum(axis=1)),
marker='.')
ax.scatter(p_w_c[0] * length, p_w_c[1] * length,
marker='*', linewidth=.5, edgecolor='black',
s=20 + 1500 * p_c ** 2,
color=COLORS.take([1, 2, 4]))
ax.set_xlabel('Feature 0 count')
return p_c, p_w_c
_, (ax1, ax2) = plt.subplots(1, 2, sharex='row', sharey='row', figsize=(8, 4))
plt.subplots_adjust(bottom=.15)
p_c, p_w_c = plot_2d(ax1, n_labels=1)
ax1.set_title('n_labels=1, length=50')
ax1.set_ylabel('Feature 1 count')
plot_2d(ax2, n_labels=3)
ax2.set_title('n_labels=3, length=50')
ax2.set_xlim(left=0, auto=True)
ax2.set_ylim(bottom=0, auto=True)
plt.show()
print('The data was generated from (random_state=%d):' % RANDOM_SEED)
print('Class', 'P(C)', 'P(w0|C)', 'P(w1|C)', sep='\t')
for k, p, p_w in zip(['red', 'blue', 'yellow'], p_c, p_w_c.T):
print('%s\t%0.2f\t%0.2f\t%0.2f' % (k, p, p_w[0], p_w[1]))
| bsd-3-clause |
manashmndl/scikit-learn | examples/plot_isotonic_regression.py | 303 | 1767 | """
===================
Isotonic Regression
===================
An illustration of the isotonic regression on generated data. The
isotonic regression finds a non-decreasing approximation of a function
while minimizing the mean squared error on the training data. The benefit
of such a model is that it does not assume any form for the target
function such as linearity. For comparison a linear regression is also
presented.
"""
print(__doc__)
# Author: Nelle Varoquaux <nelle.varoquaux@gmail.com>
# Alexandre Gramfort <alexandre.gramfort@inria.fr>
# Licence: BSD
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.collections import LineCollection
from sklearn.linear_model import LinearRegression
from sklearn.isotonic import IsotonicRegression
from sklearn.utils import check_random_state
n = 100
x = np.arange(n)
rs = check_random_state(0)
y = rs.randint(-50, 50, size=(n,)) + 50. * np.log(1 + np.arange(n))
###############################################################################
# Fit IsotonicRegression and LinearRegression models
ir = IsotonicRegression()
y_ = ir.fit_transform(x, y)
lr = LinearRegression()
lr.fit(x[:, np.newaxis], y) # x needs to be 2d for LinearRegression
###############################################################################
# plot result
segments = [[[i, y[i]], [i, y_[i]]] for i in range(n)]
lc = LineCollection(segments, zorder=0)
lc.set_array(np.ones(len(y)))
lc.set_linewidths(0.5 * np.ones(n))
fig = plt.figure()
plt.plot(x, y, 'r.', markersize=12)
plt.plot(x, y_, 'g.-', markersize=12)
plt.plot(x, lr.predict(x[:, np.newaxis]), 'b-')
plt.gca().add_collection(lc)
plt.legend(('Data', 'Isotonic Fit', 'Linear Fit'), loc='lower right')
plt.title('Isotonic regression')
plt.show()
| bsd-3-clause |
AlwaysLearningDeeper/Project | src/Old files/randomGame.py | 2 | 4558 | import gym,time
import numpy as np
import random
from statistics import median, mean
from collections import Counter
import sys
import pickle
from matplotlib import pyplot as plt
#1 NO Op, 2-3 Either Right or Left
env = gym.make('Breakout-v0')
env.reset()
goal_steps=50000000000000000000000000000000
score_requirement = 50
initial_games = 100000000000000000000000000000000000000000000000000000000000000
def save_object(object, file_name):
with open(file_name, 'wb') as fh:
pickle.dump(object, fh)
class Stack:
def __init__(self):
self.items = []
def isEmpty(self):
return self.items == []
def push(self, item):
self.items.append(item)
def pop(self):
return self.items.pop()
def peek(self):
return self.items[len(self.items) - 1]
def size(self):
return len(self.items)
def stateExploration():
env.reset()
for _ in range(1000):
env.render()
time.sleep(0.1)
observation, reward, done, info = env.step(3)
if _ == 50:
nobs = np.reshape(observation,(160,210,3))
print(observation.shape)
print(nobs.shape)
if done:
print("Episode finished after {} timesteps".format(_ + 1))
break
def initial_population():
"""
Extracts good runs from random games. Code from sentdex
:return training_data:
"""
# [OBS, MOVES]
training_data = []
# all scores:
scores = []
cc=0
# just the scores that met our threshold:
accepted_scores = []
# iterate through however many games we want:
for game in range(initial_games):
env.reset()
score = 0
# moves specifically from this environment:
game_memory = []
# previous observation that we saw
prev_observation = []
#Episodes of 10 frames
stack=Stack()
# for each frame in 200
for step in range(goal_steps):
# choose random action (0 or 1)
#env.render()
action = random.randrange(1, 4)
# do it!
observation, reward, done, info = env.step(action)
# notice that the observation is returned FROM the action
# so we'll store the previous observation here, pairing
# the prev observation to the action we'll take.
if len(prev_observation) > 0:
game_memory.append([prev_observation, action])
prev_observation = observation
score += reward
stack.push([prev_observation,action])
if(stack.size()>10):
stack.pop()
#print('Game: ' + str(game)+' Frame: ' + str(step) + ' Reward: ' + str(reward))
if reward==1:
training_data.extend(stack.items)
if done: break
#Check if the list has >4gb size
cuatrogb=1e+9
if sys.getsizeof(training_data)>cuatrogb:
save_object(training_data,'File'+str(cc))
print('Size of training data: ' + str(sys.getsizeof(training_data)))
cc +=1
training_data= []
print('Save file')
#sys.exit(0)
print('Size of training data: ' + str(sys.getsizeof(training_data)))
if(cc>50):
sys.exit(0)
# IF our score is higher than our threshold, we'd like to save
# every move we made
# NOTE the reinforcement methodology here.
# all we're doing is reinforcing the score, we're not trying
# to influence the machine in any way as to HOW that score is
# reached.
if score >= score_requirement:
accepted_scores.append(score)
for data in game_memory:
# convert to one-hot (this is the output layer for our neural network)
if data[1] == 1:
output = [0, 1]
elif data[1] == 0:
output = [1, 0]
# saving our training data
training_data.append([data[0], output])
# reset env to play again
env.reset()
# save overall scores
scores.append(score)
print(training_data)
# some stats here, to further illustrate the neural network magic!
print('Average accepted score:', mean(accepted_scores))
print('Median score for accepted scores:', median(accepted_scores))
print(Counter(accepted_scores))
return training_data
pop = initial_population() | mit |
probml/pyprobml | scripts/spam_dtree_size.py | 1 | 5430 |
#Performance of tree ensembles. Based on the email spam example from chapter 10 of "Elements of statistical learning". Code is from Andrey Gaskov's site:
#https://github.com/empathy87/The-Elements-of-Statistical-Learning-Python-Notebooks/blob/master/examples/Spam.ipynb
from one_standard_error_rule_model import OneStandardErrorRuleModel
from sklearn import tree
# Commented out IPython magic to ensure Python compatibility.
import pandas as pd
from matplotlib import transforms, pyplot as plt
import numpy as np
from sklearn.metrics import accuracy_score
# omit numpy warnings (don't do it in real work)
np.seterr(divide='ignore', invalid='ignore')
np.warnings.filterwarnings('ignore')
# %matplotlib inline
# define plots common properties and color constants
plt.rcParams['font.family'] = 'Arial'
plt.rcParams['axes.linewidth'] = 0.5
ORANGE, BLUE, PURPLE = '#FF8C00', '#0000FF', '#A020F0'
GRAY1, GRAY4, GRAY7 = '#231F20', '#646369', '#929497'
# we will calculate train and test error rates for all models
def error_rate(y_true, y_pred):
return 1 - accuracy_score(y_true, y_pred)
"""Get data"""
df = pd.read_csv("https://github.com/empathy87/The-Elements-of-Statistical-Learning-Python-Notebooks/blob/master/data/Spam.txt?raw=True")
df.head()
# PAGE 301. We coded spam as 1 and email as zero. A test set of size 1536 was
# randomly chosen, leaving 3065 observations in the training set.
target = 'spam'
columns = ['word_freq_make', 'word_freq_address', 'word_freq_all',
'word_freq_3d', 'word_freq_our', 'word_freq_over',
'word_freq_remove', 'word_freq_internet', 'word_freq_order',
'word_freq_mail', 'word_freq_receive', 'word_freq_will',
'word_freq_people', 'word_freq_report', 'word_freq_addresses',
'word_freq_free', 'word_freq_business', 'word_freq_email',
'word_freq_you', 'word_freq_credit', 'word_freq_your',
'word_freq_font', 'word_freq_000', 'word_freq_money',
'word_freq_hp', 'word_freq_hpl', 'word_freq_george',
'word_freq_650', 'word_freq_lab', 'word_freq_labs',
'word_freq_telnet', 'word_freq_857', 'word_freq_data',
'word_freq_415', 'word_freq_85', 'word_freq_technology',
'word_freq_1999', 'word_freq_parts', 'word_freq_pm',
'word_freq_direct', 'word_freq_cs', 'word_freq_meeting',
'word_freq_original', 'word_freq_project', 'word_freq_re',
'word_freq_edu', 'word_freq_table', 'word_freq_conference',
'char_freq_;', 'char_freq_(', 'char_freq_[', 'char_freq_!',
'char_freq_$', 'char_freq_#', 'capital_run_length_average',
'capital_run_length_longest', 'capital_run_length_total']
# let's give columns more compact names
features = ['make', 'address', 'all', '3d', 'our', 'over', 'remove',
'internet', 'order', 'mail', 'receive', 'will', 'people',
'report', 'addresses', 'free', 'business', 'email', 'you',
'credit', 'your', 'font', '000', 'money', 'hp', 'hpl',
'george', '650', 'lab', 'labs', 'telnet', '857', 'data',
'415', '85', 'technology', '1999', 'parts', 'pm', 'direct',
'cs', 'meeting', 'original', 'project', 're', 'edu', 'table',
'conference', 'ch_;', 'ch(', 'ch[', 'ch!', 'ch$', 'ch#',
'CAPAVE', 'CAPMAX', 'CAPTOT']
X, y = df[columns].values, df[target].values
# split by test column value
is_test = df.test.values
X_train, X_test = X[is_test == 0], X[is_test == 1]
y_train, y_test = y[is_test == 0], y[is_test == 1]
#max_leaf_nodes = [2, 3, 4, 5, 6, 7, 8, 9, 10, 17, 18, 21, 26, 30, 33, 37, 42]
max_leaf_nodes = [int(x) for x in np.linspace(2,200,10)]
tree_based_clf = OneStandardErrorRuleModel(
tree.DecisionTreeClassifier(criterion='entropy', random_state=5),
'max_leaf_nodes', max_leaf_nodes,
is_regression=False, random_state=26,
).fit(X_train, y_train)
print(f'Selected max_leaf_nodes: {tree_based_clf.model_.max_leaf_nodes}')
print(f'Test error rate: {tree_based_clf.assess(X_test, y_test)[0]*100:.1f}%')
# calculate test error rate for each parameter value
test_error_rates = [
tree_based_clf.refit(X_train, y_train, i).assess(X_test, y_test)[0]
for i in range(len(max_leaf_nodes))]
# PAGE 313. Figure 9.4 shows the 10-fold cross-validation error rate as a
# function of the size of the pruned tree, along with ±2 standard
# errors of the mean, from the ten replications. The test error curve
# is shown in orange.
fig, ax = plt.subplots(figsize=(4.75, 3.15), dpi=150)
ax.plot(max_leaf_nodes, tree_based_clf.cv_mean_errors_, c=BLUE, linewidth=0.6)
ax.errorbar(max_leaf_nodes, tree_based_clf.cv_mean_errors_,
color=BLUE, linestyle='None', marker='o', elinewidth=0.2,
markersize=1.5, yerr=tree_based_clf.cv_mean_errors_std_,
ecolor=BLUE, capsize=2)
ax.axhline(y=tree_based_clf.cv_min_error_ + tree_based_clf.cv_min_error_std_,
c=GRAY1, linewidth=0.6, linestyle=':')
for e in ax.get_yticklabels() + ax.get_xticklabels():
e.set_fontsize(6)
ax.set_xlabel('Tree size', color=GRAY4, fontsize=7)
ax.set_ylabel('Misclassification Rate', color=GRAY4, fontsize=7)
ax.scatter(max_leaf_nodes, test_error_rates, color=ORANGE,
s=3, zorder=10)
ax.plot(max_leaf_nodes, test_error_rates, color=ORANGE,
linewidth=0.6)
_ = ax.set_ylim(-0.02, 0.47)
plt.tight_layout()
| mit |
procoder317/scikit-learn | sklearn/metrics/ranking.py | 44 | 25479 | """Metrics to assess performance on classification task given scores
Functions named as ``*_score`` return a scalar value to maximize: the higher
the better
Function named as ``*_error`` or ``*_loss`` return a scalar value to minimize:
the lower the better
"""
# Authors: Alexandre Gramfort <alexandre.gramfort@inria.fr>
# Mathieu Blondel <mathieu@mblondel.org>
# Olivier Grisel <olivier.grisel@ensta.org>
# Arnaud Joly <a.joly@ulg.ac.be>
# Jochen Wersdorfer <jochen@wersdoerfer.de>
# Lars Buitinck <L.J.Buitinck@uva.nl>
# Joel Nothman <joel.nothman@gmail.com>
# Noel Dawe <noel@dawe.me>
# License: BSD 3 clause
from __future__ import division
import warnings
import numpy as np
from scipy.sparse import csr_matrix
from ..utils import check_consistent_length
from ..utils import column_or_1d, check_array
from ..utils.multiclass import type_of_target
from ..utils.fixes import isclose
from ..utils.fixes import bincount
from ..utils.fixes import array_equal
from ..utils.stats import rankdata
from ..utils.sparsefuncs import count_nonzero
from .base import _average_binary_score
from .base import UndefinedMetricWarning
def auc(x, y, reorder=False):
"""Compute Area Under the Curve (AUC) using the trapezoidal rule
This is a general function, given points on a curve. For computing the
area under the ROC-curve, see :func:`roc_auc_score`.
Parameters
----------
x : array, shape = [n]
x coordinates.
y : array, shape = [n]
y coordinates.
reorder : boolean, optional (default=False)
If True, assume that the curve is ascending in the case of ties, as for
an ROC curve. If the curve is non-ascending, the result will be wrong.
Returns
-------
auc : float
Examples
--------
>>> import numpy as np
>>> from sklearn import metrics
>>> y = np.array([1, 1, 2, 2])
>>> pred = np.array([0.1, 0.4, 0.35, 0.8])
>>> fpr, tpr, thresholds = metrics.roc_curve(y, pred, pos_label=2)
>>> metrics.auc(fpr, tpr)
0.75
See also
--------
roc_auc_score : Computes the area under the ROC curve
precision_recall_curve :
Compute precision-recall pairs for different probability thresholds
"""
check_consistent_length(x, y)
x = column_or_1d(x)
y = column_or_1d(y)
if x.shape[0] < 2:
raise ValueError('At least 2 points are needed to compute'
' area under curve, but x.shape = %s' % x.shape)
direction = 1
if reorder:
# reorder the data points according to the x axis and using y to
# break ties
order = np.lexsort((y, x))
x, y = x[order], y[order]
else:
dx = np.diff(x)
if np.any(dx < 0):
if np.all(dx <= 0):
direction = -1
else:
raise ValueError("Reordering is not turned on, and "
"the x array is not increasing: %s" % x)
area = direction * np.trapz(y, x)
return area
def average_precision_score(y_true, y_score, average="macro",
sample_weight=None):
"""Compute average precision (AP) from prediction scores
This score corresponds to the area under the precision-recall curve.
Note: this implementation is restricted to the binary classification task
or multilabel classification task.
Read more in the :ref:`User Guide <precision_recall_f_measure_metrics>`.
Parameters
----------
y_true : array, shape = [n_samples] or [n_samples, n_classes]
True binary labels in binary label indicators.
y_score : array, shape = [n_samples] or [n_samples, n_classes]
Target scores, can either be probability estimates of the positive
class, confidence values, or binary decisions.
average : string, [None, 'micro', 'macro' (default), 'samples', 'weighted']
If ``None``, the scores for each class are returned. Otherwise,
this determines the type of averaging performed on the data:
``'micro'``:
Calculate metrics globally by considering each element of the label
indicator matrix as a label.
``'macro'``:
Calculate metrics for each label, and find their unweighted
mean. This does not take label imbalance into account.
``'weighted'``:
Calculate metrics for each label, and find their average, weighted
by support (the number of true instances for each label).
``'samples'``:
Calculate metrics for each instance, and find their average.
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
average_precision : float
References
----------
.. [1] `Wikipedia entry for the Average precision
<http://en.wikipedia.org/wiki/Average_precision>`_
See also
--------
roc_auc_score : Area under the ROC curve
precision_recall_curve :
Compute precision-recall pairs for different probability thresholds
Examples
--------
>>> import numpy as np
>>> from sklearn.metrics import average_precision_score
>>> y_true = np.array([0, 0, 1, 1])
>>> y_scores = np.array([0.1, 0.4, 0.35, 0.8])
>>> average_precision_score(y_true, y_scores) # doctest: +ELLIPSIS
0.79...
"""
def _binary_average_precision(y_true, y_score, sample_weight=None):
precision, recall, thresholds = precision_recall_curve(
y_true, y_score, sample_weight=sample_weight)
return auc(recall, precision)
return _average_binary_score(_binary_average_precision, y_true, y_score,
average, sample_weight=sample_weight)
def roc_auc_score(y_true, y_score, average="macro", sample_weight=None):
"""Compute Area Under the Curve (AUC) from prediction scores
Note: this implementation is restricted to the binary classification task
or multilabel classification task in label indicator format.
Read more in the :ref:`User Guide <roc_metrics>`.
Parameters
----------
y_true : array, shape = [n_samples] or [n_samples, n_classes]
True binary labels in binary label indicators.
y_score : array, shape = [n_samples] or [n_samples, n_classes]
Target scores, can either be probability estimates of the positive
class, confidence values, or binary decisions.
average : string, [None, 'micro', 'macro' (default), 'samples', 'weighted']
If ``None``, the scores for each class are returned. Otherwise,
this determines the type of averaging performed on the data:
``'micro'``:
Calculate metrics globally by considering each element of the label
indicator matrix as a label.
``'macro'``:
Calculate metrics for each label, and find their unweighted
mean. This does not take label imbalance into account.
``'weighted'``:
Calculate metrics for each label, and find their average, weighted
by support (the number of true instances for each label).
``'samples'``:
Calculate metrics for each instance, and find their average.
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
auc : float
References
----------
.. [1] `Wikipedia entry for the Receiver operating characteristic
<http://en.wikipedia.org/wiki/Receiver_operating_characteristic>`_
See also
--------
average_precision_score : Area under the precision-recall curve
roc_curve : Compute Receiver operating characteristic (ROC)
Examples
--------
>>> import numpy as np
>>> from sklearn.metrics import roc_auc_score
>>> y_true = np.array([0, 0, 1, 1])
>>> y_scores = np.array([0.1, 0.4, 0.35, 0.8])
>>> roc_auc_score(y_true, y_scores)
0.75
"""
def _binary_roc_auc_score(y_true, y_score, sample_weight=None):
if len(np.unique(y_true)) != 2:
raise ValueError("Only one class present in y_true. ROC AUC score "
"is not defined in that case.")
fpr, tpr, tresholds = roc_curve(y_true, y_score,
sample_weight=sample_weight)
return auc(fpr, tpr, reorder=True)
return _average_binary_score(
_binary_roc_auc_score, y_true, y_score, average,
sample_weight=sample_weight)
def _binary_clf_curve(y_true, y_score, pos_label=None, sample_weight=None):
"""Calculate true and false positives per binary classification threshold.
Parameters
----------
y_true : array, shape = [n_samples]
True targets of binary classification
y_score : array, shape = [n_samples]
Estimated probabilities or decision function
pos_label : int, optional (default=None)
The label of the positive class
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
fps : array, shape = [n_thresholds]
A count of false positives, at index i being the number of negative
samples assigned a score >= thresholds[i]. The total number of
negative samples is equal to fps[-1] (thus true negatives are given by
fps[-1] - fps).
tps : array, shape = [n_thresholds <= len(np.unique(y_score))]
An increasing count of true positives, at index i being the number
of positive samples assigned a score >= thresholds[i]. The total
number of positive samples is equal to tps[-1] (thus false negatives
are given by tps[-1] - tps).
thresholds : array, shape = [n_thresholds]
Decreasing score values.
"""
check_consistent_length(y_true, y_score)
y_true = column_or_1d(y_true)
y_score = column_or_1d(y_score)
if sample_weight is not None:
sample_weight = column_or_1d(sample_weight)
# ensure binary classification if pos_label is not specified
classes = np.unique(y_true)
if (pos_label is None and
not (array_equal(classes, [0, 1]) or
array_equal(classes, [-1, 1]) or
array_equal(classes, [0]) or
array_equal(classes, [-1]) or
array_equal(classes, [1]))):
raise ValueError("Data is not binary and pos_label is not specified")
elif pos_label is None:
pos_label = 1.
# make y_true a boolean vector
y_true = (y_true == pos_label)
# sort scores and corresponding truth values
desc_score_indices = np.argsort(y_score, kind="mergesort")[::-1]
y_score = y_score[desc_score_indices]
y_true = y_true[desc_score_indices]
if sample_weight is not None:
weight = sample_weight[desc_score_indices]
else:
weight = 1.
# y_score typically has many tied values. Here we extract
# the indices associated with the distinct values. We also
# concatenate a value for the end of the curve.
# We need to use isclose to avoid spurious repeated thresholds
# stemming from floating point roundoff errors.
distinct_value_indices = np.where(np.logical_not(isclose(
np.diff(y_score), 0)))[0]
threshold_idxs = np.r_[distinct_value_indices, y_true.size - 1]
# accumulate the true positives with decreasing threshold
tps = (y_true * weight).cumsum()[threshold_idxs]
if sample_weight is not None:
fps = weight.cumsum()[threshold_idxs] - tps
else:
fps = 1 + threshold_idxs - tps
return fps, tps, y_score[threshold_idxs]
def precision_recall_curve(y_true, probas_pred, pos_label=None,
sample_weight=None):
"""Compute precision-recall pairs for different probability thresholds
Note: this implementation is restricted to the binary classification task.
The precision is the ratio ``tp / (tp + fp)`` where ``tp`` is the number of
true positives and ``fp`` the number of false positives. The precision is
intuitively the ability of the classifier not to label as positive a sample
that is negative.
The recall is the ratio ``tp / (tp + fn)`` where ``tp`` is the number of
true positives and ``fn`` the number of false negatives. The recall is
intuitively the ability of the classifier to find all the positive samples.
The last precision and recall values are 1. and 0. respectively and do not
have a corresponding threshold. This ensures that the graph starts on the
x axis.
Read more in the :ref:`User Guide <precision_recall_f_measure_metrics>`.
Parameters
----------
y_true : array, shape = [n_samples]
True targets of binary classification in range {-1, 1} or {0, 1}.
probas_pred : array, shape = [n_samples]
Estimated probabilities or decision function.
pos_label : int, optional (default=None)
The label of the positive class
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
precision : array, shape = [n_thresholds + 1]
Precision values such that element i is the precision of
predictions with score >= thresholds[i] and the last element is 1.
recall : array, shape = [n_thresholds + 1]
Decreasing recall values such that element i is the recall of
predictions with score >= thresholds[i] and the last element is 0.
thresholds : array, shape = [n_thresholds <= len(np.unique(probas_pred))]
Increasing thresholds on the decision function used to compute
precision and recall.
Examples
--------
>>> import numpy as np
>>> from sklearn.metrics import precision_recall_curve
>>> y_true = np.array([0, 0, 1, 1])
>>> y_scores = np.array([0.1, 0.4, 0.35, 0.8])
>>> precision, recall, thresholds = precision_recall_curve(
... y_true, y_scores)
>>> precision # doctest: +ELLIPSIS
array([ 0.66..., 0.5 , 1. , 1. ])
>>> recall
array([ 1. , 0.5, 0.5, 0. ])
>>> thresholds
array([ 0.35, 0.4 , 0.8 ])
"""
fps, tps, thresholds = _binary_clf_curve(y_true, probas_pred,
pos_label=pos_label,
sample_weight=sample_weight)
precision = tps / (tps + fps)
recall = tps / tps[-1]
# stop when full recall attained
# and reverse the outputs so recall is decreasing
last_ind = tps.searchsorted(tps[-1])
sl = slice(last_ind, None, -1)
return np.r_[precision[sl], 1], np.r_[recall[sl], 0], thresholds[sl]
def roc_curve(y_true, y_score, pos_label=None, sample_weight=None):
"""Compute Receiver operating characteristic (ROC)
Note: this implementation is restricted to the binary classification task.
Read more in the :ref:`User Guide <roc_metrics>`.
Parameters
----------
y_true : array, shape = [n_samples]
True binary labels in range {0, 1} or {-1, 1}. If labels are not
binary, pos_label should be explicitly given.
y_score : array, shape = [n_samples]
Target scores, can either be probability estimates of the positive
class or confidence values.
pos_label : int
Label considered as positive and others are considered negative.
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
fpr : array, shape = [>2]
Increasing false positive rates such that element i is the false
positive rate of predictions with score >= thresholds[i].
tpr : array, shape = [>2]
Increasing true positive rates such that element i is the true
positive rate of predictions with score >= thresholds[i].
thresholds : array, shape = [n_thresholds]
Decreasing thresholds on the decision function used to compute
fpr and tpr. `thresholds[0]` represents no instances being predicted
and is arbitrarily set to `max(y_score) + 1`.
See also
--------
roc_auc_score : Compute Area Under the Curve (AUC) from prediction scores
Notes
-----
Since the thresholds are sorted from low to high values, they
are reversed upon returning them to ensure they correspond to both ``fpr``
and ``tpr``, which are sorted in reversed order during their calculation.
References
----------
.. [1] `Wikipedia entry for the Receiver operating characteristic
<http://en.wikipedia.org/wiki/Receiver_operating_characteristic>`_
Examples
--------
>>> import numpy as np
>>> from sklearn import metrics
>>> y = np.array([1, 1, 2, 2])
>>> scores = np.array([0.1, 0.4, 0.35, 0.8])
>>> fpr, tpr, thresholds = metrics.roc_curve(y, scores, pos_label=2)
>>> fpr
array([ 0. , 0.5, 0.5, 1. ])
>>> tpr
array([ 0.5, 0.5, 1. , 1. ])
>>> thresholds
array([ 0.8 , 0.4 , 0.35, 0.1 ])
"""
fps, tps, thresholds = _binary_clf_curve(
y_true, y_score, pos_label=pos_label, sample_weight=sample_weight)
if tps.size == 0 or fps[0] != 0:
# Add an extra threshold position if necessary
tps = np.r_[0, tps]
fps = np.r_[0, fps]
thresholds = np.r_[thresholds[0] + 1, thresholds]
if fps[-1] <= 0:
warnings.warn("No negative samples in y_true, "
"false positive value should be meaningless",
UndefinedMetricWarning)
fpr = np.repeat(np.nan, fps.shape)
else:
fpr = fps / fps[-1]
if tps[-1] <= 0:
warnings.warn("No positive samples in y_true, "
"true positive value should be meaningless",
UndefinedMetricWarning)
tpr = np.repeat(np.nan, tps.shape)
else:
tpr = tps / tps[-1]
return fpr, tpr, thresholds
def label_ranking_average_precision_score(y_true, y_score):
"""Compute ranking-based average precision
Label ranking average precision (LRAP) is the average over each ground
truth label assigned to each sample, of the ratio of true vs. total
labels with lower score.
This metric is used in multilabel ranking problem, where the goal
is to give better rank to the labels associated to each sample.
The obtained score is always strictly greater than 0 and
the best value is 1.
Read more in the :ref:`User Guide <label_ranking_average_precision>`.
Parameters
----------
y_true : array or sparse matrix, shape = [n_samples, n_labels]
True binary labels in binary indicator format.
y_score : array, shape = [n_samples, n_labels]
Target scores, can either be probability estimates of the positive
class, confidence values, or binary decisions.
Returns
-------
score : float
Examples
--------
>>> import numpy as np
>>> from sklearn.metrics import label_ranking_average_precision_score
>>> y_true = np.array([[1, 0, 0], [0, 0, 1]])
>>> y_score = np.array([[0.75, 0.5, 1], [1, 0.2, 0.1]])
>>> label_ranking_average_precision_score(y_true, y_score) \
# doctest: +ELLIPSIS
0.416...
"""
check_consistent_length(y_true, y_score)
y_true = check_array(y_true, ensure_2d=False)
y_score = check_array(y_score, ensure_2d=False)
if y_true.shape != y_score.shape:
raise ValueError("y_true and y_score have different shape")
# Handle badly formated array and the degenerate case with one label
y_type = type_of_target(y_true)
if (y_type != "multilabel-indicator" and
not (y_type == "binary" and y_true.ndim == 2)):
raise ValueError("{0} format is not supported".format(y_type))
y_true = csr_matrix(y_true)
y_score = -y_score
n_samples, n_labels = y_true.shape
out = 0.
for i, (start, stop) in enumerate(zip(y_true.indptr, y_true.indptr[1:])):
relevant = y_true.indices[start:stop]
if (relevant.size == 0 or relevant.size == n_labels):
# If all labels are relevant or unrelevant, the score is also
# equal to 1. The label ranking has no meaning.
out += 1.
continue
scores_i = y_score[i]
rank = rankdata(scores_i, 'max')[relevant]
L = rankdata(scores_i[relevant], 'max')
out += (L / rank).mean()
return out / n_samples
def coverage_error(y_true, y_score, sample_weight=None):
"""Coverage error measure
Compute how far we need to go through the ranked scores to cover all
true labels. The best value is equal to the average number
of labels in ``y_true`` per sample.
Ties in ``y_scores`` are broken by giving maximal rank that would have
been assigned to all tied values.
Read more in the :ref:`User Guide <coverage_error>`.
Parameters
----------
y_true : array, shape = [n_samples, n_labels]
True binary labels in binary indicator format.
y_score : array, shape = [n_samples, n_labels]
Target scores, can either be probability estimates of the positive
class, confidence values, or binary decisions.
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
coverage_error : float
References
----------
.. [1] Tsoumakas, G., Katakis, I., & Vlahavas, I. (2010).
Mining multi-label data. In Data mining and knowledge discovery
handbook (pp. 667-685). Springer US.
"""
y_true = check_array(y_true, ensure_2d=False)
y_score = check_array(y_score, ensure_2d=False)
check_consistent_length(y_true, y_score, sample_weight)
y_type = type_of_target(y_true)
if y_type != "multilabel-indicator":
raise ValueError("{0} format is not supported".format(y_type))
if y_true.shape != y_score.shape:
raise ValueError("y_true and y_score have different shape")
y_score_mask = np.ma.masked_array(y_score, mask=np.logical_not(y_true))
y_min_relevant = y_score_mask.min(axis=1).reshape((-1, 1))
coverage = (y_score >= y_min_relevant).sum(axis=1)
coverage = coverage.filled(0)
return np.average(coverage, weights=sample_weight)
def label_ranking_loss(y_true, y_score, sample_weight=None):
"""Compute Ranking loss measure
Compute the average number of label pairs that are incorrectly ordered
given y_score weighted by the size of the label set and the number of
labels not in the label set.
This is similar to the error set size, but weighted by the number of
relevant and irrelevant labels. The best performance is achieved with
a ranking loss of zero.
Read more in the :ref:`User Guide <label_ranking_loss>`.
Parameters
----------
y_true : array or sparse matrix, shape = [n_samples, n_labels]
True binary labels in binary indicator format.
y_score : array, shape = [n_samples, n_labels]
Target scores, can either be probability estimates of the positive
class, confidence values, or binary decisions.
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
loss : float
References
----------
.. [1] Tsoumakas, G., Katakis, I., & Vlahavas, I. (2010).
Mining multi-label data. In Data mining and knowledge discovery
handbook (pp. 667-685). Springer US.
"""
y_true = check_array(y_true, ensure_2d=False, accept_sparse='csr')
y_score = check_array(y_score, ensure_2d=False)
check_consistent_length(y_true, y_score, sample_weight)
y_type = type_of_target(y_true)
if y_type not in ("multilabel-indicator",):
raise ValueError("{0} format is not supported".format(y_type))
if y_true.shape != y_score.shape:
raise ValueError("y_true and y_score have different shape")
n_samples, n_labels = y_true.shape
y_true = csr_matrix(y_true)
loss = np.zeros(n_samples)
for i, (start, stop) in enumerate(zip(y_true.indptr, y_true.indptr[1:])):
# Sort and bin the label scores
unique_scores, unique_inverse = np.unique(y_score[i],
return_inverse=True)
true_at_reversed_rank = bincount(
unique_inverse[y_true.indices[start:stop]],
minlength=len(unique_scores))
all_at_reversed_rank = bincount(unique_inverse,
minlength=len(unique_scores))
false_at_reversed_rank = all_at_reversed_rank - true_at_reversed_rank
# if the scores are ordered, it's possible to count the number of
# incorrectly ordered paires in linear time by cumulatively counting
# how many false labels of a given score have a score higher than the
# accumulated true labels with lower score.
loss[i] = np.dot(true_at_reversed_rank.cumsum(),
false_at_reversed_rank)
n_positives = count_nonzero(y_true, axis=1)
with np.errstate(divide="ignore", invalid="ignore"):
loss /= ((n_labels - n_positives) * n_positives)
# When there is no positive or no negative labels, those values should
# be consider as correct, i.e. the ranking doesn't matter.
loss[np.logical_or(n_positives == 0, n_positives == n_labels)] = 0.
return np.average(loss, weights=sample_weight)
| bsd-3-clause |
tsherwen/AC_tools | AC_tools/AC_time.py | 1 | 16364 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
Time processing functions for use with GEOS-Chem/Data analysis
Use help(<name of function>) to get details on a particular function.
Notes
-----
- This module is underdevelopment vestigial/inefficient code is being removed/updated.
- Where external code is used, credit is given.
"""
import logging
import numpy as np
import pandas as pd
import time
import calendar
import datetime as datetime
from datetime import datetime as datetime_
import sys
# Attempt to import ephem if installed
if sys.version_info.major < 3:
try:
import ephem
except ImportError:
print('ephem package not installed')
def get_day_fraction(date):
"""
Get day fraction from a datetime object
Notes
-----
- for working with numpy arrays of datetimes, instead of pandas dataframes
"""
secs = (date.hour * 60.*60.)+(date.minute*60.)+(date.second)
dsecs = 24.*60.*60.
return secs/dsecs
def dt64_2_dt(dt64):
"""
Convert numpy.datetime64 to datetime.datetime (assuming UTC )
Parameters
-----
dt64 (numpy.datetime64): datetime to convert
Notes
-----
- TODO: Convert this to work as a lamdba function for scalability
"""
ns = 1e-9 # number of seconds in a nanosecond
return [datetime_.utcfromtimestamp(i.astype(int) * ns) for i in dt64]
def nonISOdate2ISO(ds):
"""
Convert a non ISO date string to a ISO date string
Parameters
-----
ds(str): date string
"""
import re
logging.info('nonISOdate2ISO called')
regex = re.compile('(\d\d\d\d-\d-\d\d)')
regexII = re.compile('(.*\s\d:.*)')
print(ds)
for d in ds:
# print 0, d, len(d[0]), [ re.match(regexII, d[0]) ]
d = d[0]
# swap ' ?:00:00' for '00:00:00'
d = d.replace(' 0:', ' 00:')
if re.match(regexII, d):
d = d[:-7]+'0'+d[-7:]
# print 1, d, len(d)
if len(d) != 19:
# if one digit for day and month
if len(d) == 17:
d = d[:5]+'0'+d[5:7]+'0'+d[7:]
# print 1.1, d, len(d), [ re.match(regex, d) ]
# if one digit for day
if (re.match(regex, d)):
d = d[:5]+'0'+d[5:]
# print 1.2, d, len(d)
# if one digit for month
if len(d) != 19:
d = d[:8]+'0'+d[8:]
if len(d) != 19:
print((1.3, d, len(d[0])))
d = [d]
print((2, d, len(d[0])))
return ds
def nearest(ts, s):
"""
Find the nearest values (e.g. timestamp)
Parameters
-------
ts (float, int, timestamp): point as object that nearest to which is being sought
s (list): list of objects of the same type to be searched
Returns
-------
(timestamp)
Notes
-------
- Credit: Raymond Hettinger
http://stackoverflow.com/questions/8162379/python-locating-the-closest-timestamp
"""
# Given a presorted list of timestamps: s = sorted(index)
i = bisect_left(s, ts)
return min(s[max(0, i-1): i+2], key=lambda t: abs(ts - t))
def YYYYMMDD_HHMM_2_datetime(str1=None, str2=None, combined=False,
verbose=False, debug=False):
"""
Mappable converter of strings to datetime.
Parameters
-------
str1 (list): list of strings of times
str2 (list): list of strings of dates
combined (bool): if True, then a single list of strings is provided
debug (bool): print debugging options to screen
Returns
-------
(list)
"""
# Combined as one string
if combined:
dtime = str1
# Translate from str to datetime
dtime = [time.strptime(i, '%Y%m%d%H%M') for i in dtime]
dtime = [datetime_.fromtimestamp(time.mktime(i)) for i in dtime]
# Combine to one string
else:
# Make pandas dataframe
data = np.array([str1, str2])
if debug:
print((data.shape, data[:5, :], [type(i) for i in (str1, str2)]))
df = pd.DataFrame(data=data.T, columns=['YYYYMMDD', 'HHMM'])
# Convert to datetime
dtime = DF_YYYYMMDD_HHMM_2_dt(df=df)
dtime = dtime.index
return dtime
def add_months(sourcedate, months):
"""
Incremental increase of datetime by given months
"""
month = sourcedate.month - 1 + months
year = sourcedate.year + month / 12
month = month % 12 + 1
day = min(sourcedate.day, calendar.monthrange(year, month)[1])
return datetime.datetime(year, month, day)
def add_days(sourcedate, days_):
"""
Incremental increase of datetime by given days
"""
sourcedate += datetime.timedelta(days=float(days_))
return sourcedate
def add_hrs(sourcedate, hrs_, debug=False):
"""
Incremental increase of datetime by given hours
"""
if debug:
print((sourcedate, hrs_))
sourcedate += datetime.timedelta(hours=float(hrs_))
return sourcedate
def add_minutes(sourcedate, min_, debug=False):
"""
Incremental increase of datetime by given minutes
"""
sourcedate += datetime.timedelta(minutes=float(min_))
return sourcedate
def add_secs(sourcedate, secs_, debug=False):
"""
Incremental increase of datetime by given seconds
"""
sourcedate += datetime.timedelta(seconds=float(secs_))
return sourcedate
def secs_in_month(months=None, years=None):
"""
Get number of seconds in a specific month for a specific year (default=2009)
"""
# Get generica months and year (2009) if not given
if not isinstance(months, list):
months = list(range(1, 13))
if not isinstance(years, list):
years = [2009] * len(months)
# Get number of seconds in specific month in year
# conversion: sec => min => hours => days => months
ars = []
for i, m_ in enumerate(months):
ars += [60*60*24*calendar.monthrange(int(years[i]), int(m_))[1]]
# Return as a np.array
return np.array(ars)
def get_dt4run(time_span='year', period=1, startyear=2005, endyear=2005,
endhour=23, a=None, b=None):
"""
Make list of datetimes for a given range or between two datetimes
Parameters
-------
a, b (datetime.datetime): dates to create list of dates between (a=first date)
endhour (int): last hour to use in list of dates
startyear, endyear (int): first and last year to output list of dates for
time_span (str): string of time period (e.g. days)
period (int): periodicity of returned list of dates (1= 1 hour)
Returns
-------
(list)
"""
# Set dates
if isinstance(a, type(None)):
a = datetime.datetime(startyear, 2, 1, 0, 0)
if time_span == '3days':
b = datetime.datetime(endyear, 2, 3, endhour, 0) # 3 day
if time_span == 'week':
b = datetime.datetime(endyear, 2, 7, endhour, 0) # week
if time_span == 'month':
b = datetime.datetime(endyear, 3, 1, endhour, 0) # one month
if time_span == '6months':
b = datetime.datetime(endyear, 8, 1, endhour, 0) # 6 month(s)
if time_span == 'year':
endyear = 2006 # Kludge as Data ran from Feb to Feb
b = datetime.datetime(endyear, 1, 31, endhour, 0) # full year
# Make list of dates to view (hourly intervals between a and b)
dates = dt_hrs_a2b(a, b)
return dates
def dt_hrs_a2b(a, b, period=1, debug=False):
"""
Returns list of hour spaced datetimes between two given datetimes
Parameters
-------
a, b (datetime.datetime): dates to create list of dates between (a=first date)
period (int): periodicity of returned list of dates (1= 1 hour)
Returns
-------
(list)
"""
dates = [a]
if debug:
print((dates, a, b, period))
while dates[-1] < b:
dates += [add_hrs(dates[-1], period)]
if debug:
print((dates[0], dates[-1]))
return dates
# def normalise2dailymax(dates, data, debug=False):
# """
# Normalise data to daily maximiun.
#
# ARGUMENTS:
# - list of dates as datetime.datetime objects.
# - list of of
# """
# logging.info('normalise2dailymax called')
# if debug:
# logging.debug([(type(i), i.shape) for i in (data, dates)])
#
# # Get list of unique dates & remove mean from dates
# dates = np.ma.array([datetime.datetime(*i.timetuple()[:3]) for i in dates])
# idates = np.ma.array((sorted(set(dates))))
#
# if debug:
# logging.debug([(np.min(i), np.max(i), np.mean(i)) for i in [data]])
# for s in idates:
# # print s, len(data[np.ma.where( dates == s) ]), np.ma.max(data[np.ma.where( dates == s )] )
# data[np.ma.where(dates == s)] = data[np.ma.where(
# dates == s)] - np.ma.max(data[np.ma.where(dates == s)])
# if debug:
# logging.debug([(np.min(i), np.max(i), np.mean(i)) for i in [data]])
# return data
def time2datetime(dates):
"""
Convert time object to datetime object
"""
assert type(dates) == list, 'Please provide a list of times to unc'
return [datetime_.fromtimestamp(time.mktime(i)) for i in dates]
def num2month(input=None, reverse=False, rtn_dict=False):
"""
Convert number (1-12) to abbreviated name of month
Parameters
-------
reverse (bool): invert dictionary if reverse==True.
rtn_dict (bool): return the entire dictionary instead of a value for a key
Notes
-------
- input is either a 3 character month string or an integer 1=>12
"""
d = {
1: 'Jan',
2: 'Feb',
3: 'Mar',
4: 'Apr',
5: 'May',
6: 'Jun',
7: 'Jul',
8: 'Aug',
9: 'Sep',
10: 'Oct',
11: 'Nov',
12: 'Dec'
}
if reverse:
d = {v: k for k, v in list(d.items())}
if rtn_dict:
return d
else:
return d[input]
def DF_YYYYMMDD_HHMM_2_dt(df, date_header='YYYYMMDD', time_header='HHMM',
rmvars=None, epoch=False):
"""
Convert times to datetime from time strings of HHMM and YYYYMMDD
Parameters
-------
df (pd.DataFrame): dataframe containing columns of datetimes in string format
time_header, date_header (str): column titles for time and date (?_header)
rmvars (list): list of variables to remove from dataframe
epoch (bool): return the values in terms of epoch (unix) time
Returns
-------
(pd.DataFrame)
"""
# Function to map integer to 4 char str
def format(x): return '{:0>4}'.format(int(x))
# Use mapped function for speed.
df[time_header] = df[time_header].map(format)
# Combine to make datetime.
# ( go via integer for dates, to ensure no floating zeros appear )
df['Datetime'] = df[date_header].astype(int).astype(str) + \
df[time_header].astype(str)
logging.debug('1st 10 dates: '.format(logging.debug(df['Datetime'][:10])))
df['Datetime'] = pd.to_datetime(df['Datetime'], format='%Y%m%d%H%M')
# Remove variables if list provided as "rmvars"
if isinstance(rmvars, list):
[df.drop(i, 1) for i in rmvars]
# Convert to Epoch if requested
if epoch:
def format(x): return unix_time(x)
df['Epoch'] = df['Datetime'].map(format).astype('i8')
del df['Datetime']
else:
df.index = df['Datetime']
return df
def unix_time(dt):
"""
Convert datetime to Unix time.
Parameters
-------
dt (datetime.datetime): Single datetime object
Notes
-------
- epoch is counted from a reference time of:
datetime.datetime(1970, 1, 1, 0, 0)
"""
epoch = datetime.datetime.utcfromtimestamp(0)
delta = dt - epoch
return delta.days*86400+delta.seconds+delta.microseconds/1e6
def dt_days_a2b(a, b, period=1, debug=False):
"""
Calculate days between two dattime.datetime format dates
Parameters
-------
a, b (datetime.datetime): dates to create list of dates between (a=first date)
period (int): periodicity of returned list of dates (1= 1 hour)
Returns
-------
(list)
"""
dates = [a]
if debug:
print((dates, a, b, period))
while dates[-1] < b:
dates += [add_days(dates[-1], period)]
if debug:
print((dates[0], dates[-1]))
return dates
def get_nighttime_values(dates=None, data=None, select_nighttime=True,
select_daytime=False,
daybreak=datetime.datetime(1970, 1, 1, 6),
dayend=datetime.datetime(1970, 1, 1, 18)):
"""
Calculate nighttime values using dates array and pandas
"""
# use dataframe to map daytime boolean
df = pd.DataFrame(np.array(dates))
print(df)
df.columns = ['Datetime']
# function to generate boolean for daytime
def is_daytime(input, daybreak=daybreak, dayend=dayend):
"""
Takes datetime.datetime and retruns True (bool) if daytime
"""
daytime = False
# after daybreak
if (input.hour >= daybreak.hour):
daytime = True
# ... and after nightfall
if (input.hour > dayend.hour):
daytime = False
return daytime
df['ind'] = df.index.values
df['daytime'] = df['Datetime'].map(is_daytime)
# Just select nighttime or daytime
if select_nighttime:
df = df[df['daytime'] == False]
if select_daytime: # select daytime
df = df[df['daytime'] == True]
# Select just indexed values
data = np.array(data)[df['ind'].values, ...]
dates = np.array(dates)[df['ind'].values]
return data, dates
def get_daily_maximum(dates=None, data=None):
"""
Calculate daily maximum values using dates array and pandas
"""
# Use dataframe to hold dates and name column datetime
df = pd.DataFrame(np.array(dates))
df.columns = ['Datetime']
# Add column of index numbers to allow for later indexing...
df['ind'] = df.index.values
# Add column for days
def convert_datetime2days(input):
return datetime.datetime(*input.timetuple()[:3])
df['days'] = df['Datetime'].map(convert_datetime2days)
# - loop days
daily_max_data = []
# Make sure data is a numpy array
data = np.array(data)
for day in sorted(set(df['days'])):
print((day, df['days'][:5]))
# Select data for day
a_day_ind = df[df['days'] == day]
# Select data for day
a_day_data = data[a_day_ind['ind'].values, ...]
print([i.shape for i in (a_day_data, a_day_ind, data)])
# Get daily maximum
daily_max_data += [a_day_data.max(axis=0)]
# Get average daily maximum
avg_data = np.array(daily_max_data).mean(axis=0)
return avg_data
def get_8hr_rolling_mean(df, window=8):
"""
Get 8 hour rolling mean of pandas dataframe/series.
Parameters
-------
df (pd.DataFrame):
window (int): the window (hrs) over which to calculate mean (default=8 hrs)
Returns
-------
(pd.DataFrame)
"""
# loop columns if Dataframe
dfs = []
try:
for col in df.columns:
# apply mean
dfs += [df[col].rolling(window=window, center=False).mean()]
# Just process values if Series
except AttributeError:
df = df.rolling(window=window, center=False).mean()
# Combine dataframes
if len(dfs) > 1:
# concatenate
df = pd.concat(dfs, axis=1)
return df
def solartime(observer, sun=None):
"""
Get Solartime for location of 'observer' relative to 'sun'
Parameters
-------
observer (ephem observer object): Location of the observer
sun (ephem sun object): Which dun to use? (default: our sun)
Returns
-------
(float)
Notes
-------
- Credit: J.F. Sebastian
http://stackoverflow.com/questions/13314626/local-solar-time-function-from-utc-and-longitude
"""
import ephem
if isinstance(sun, type(None)):
ephem.Sun()
# Astronomical math - compute the angle between the sun and observe
sun.compute(observer)
# sidereal time == ra (right ascension) is the highest point (noon)
hour_angle = observer.sidereal_time() - sun.ra
return ephem.hours(hour_angle + ephem.hours('12:00')).norm # norm for 24h
| mit |
rkube/2dads | src/tests/arakawa_fd/test_arakawa_convergence.py | 1 | 2520 | #!/usr/bin/python
#-*- Encoding: UTF-8 -*-
"""
Load input and output from test_arakawa.cu
Compute {f,g} = -f_y g_x + g_y f_x
Input:
f(x, y) = -sin(2 pi x)^2 sin(2 pi y)^2
f_x = -4 pi (cos 2 pi x)sin(2 pi x) sin(2 pi y)^2
f_y = -4 pi(cos 2 pi y) sin(2 pi y) sin(2 pi x)^2
-> initializes arr1
g(x, y) = sin(pi x) sin(pi y)
g_x = pi cos(pi x) sin(pi y)
g_y = pi sin(pi x) cos(pi y)
-> initializes arr2
{f,g} = 16 pi^2 cos(2 pi x) cos(pi y) [-(cos(2 pi x) + cos(2 pi y))sin (pi x)^2 sin(pi y)^2
-> stored in arr3
"""
import numpy as np
import matplotlib.pyplot as plt
def fin_1(x, y):
return(-1.0 * np.sin(2. * np.pi * x) * np.sin(2. * np.pi * x) * np.sin(2. * np.pi * y) * np.sin(2. * np.pi * y))
def fin2(x, y):
return(sin(np.pi * x) * sin(np.pi * y))
def fout_an(x, y):
return(16. * np.pi * np.pi * np.cos(np.pi * x) * np.cos(np.pi * y) * (np.cos(2. * np.pi * y) - np.cos(2. * np.pi * x)) * np.sin(np.pi * x) * np.sin(np.pi * x) * np.sin(np.pi * y) * np.sin(np.pi * y))
Nx_arr = np.array([128, 256, 512, 1024], dtype='int')
L2_arr = np.zeros(Nx_arr.shape[0], dtype='float64')
L = 2.0
for idx, Nx in enumerate(Nx_arr):
solnum = np.loadtxt("test_arakawa_solnum_%d_out.dat" % (Nx))[:Nx, :Nx]
solan = np.loadtxt("test_arakawa_solan_%d_out.dat" % (Nx))[:Nx, :Nx]
dx = L / float(Nx)
xrg = -0.5 * L + (np.arange(Nx) + 0.5) * dx
yrg = -0.5 * L + (np.arange(Nx) + 0.5) * dx
xx, yy = np.meshgrid(xrg, yrg)
res = (solnum - solan)
L2_arr[idx] = np.sqrt((res * res).sum() / float(res.size))
maxval = max(solnum.max(), solan.max())
minval = min(solnum.min(), solan.min())
cvals = np.linspace(minval, maxval, 32)
plt.figure()
plt.title('Nx = %d' % Nx)
plt.plot(solnum[:, 3 * Nx / 8], label='num')
plt.plot(solan[:, 3 * Nx / 8], label='an')
plt.plot(res[:, 3 * Nx / 8], label='diff')
plt.legend()
#plt.figure()
#plt.title('Nx = %d' % Nx)
#plt.contourf(solan, cvals)
#plt.colorbar()
#plt.title('Analytic solution')
#plt.figure()
#plt.title('Nx = %d' % Nx)
#plt.contourf(solnum, cvals)
#plt.colorbar()
#plt.title('Numerical solution')
title_str = r"result, Nx=%d, max=%e, min=%e, L2=%e" % (Nx, res.max(), res.min(), L2_arr[idx])
plt.figure()
plt.contourf(res)
plt.colorbar()
plt.title(title_str)
plt.figure()
plt.loglog(Nx_arr, L2_arr, 'o-')
plt.xlabel(r"$N_x$")
plt.ylabel(r"$L_2$")
plt.show()
# End of file test_arakawa_convergence.py
| mit |
eadgarchen/tensorflow | tensorflow/contrib/timeseries/examples/multivariate.py | 67 | 5155 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""A multivariate TFTS example.
Fits a multivariate model, exports it, and visualizes the learned correlations
by iteratively predicting and sampling from the predictions.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from os import path
import tempfile
import numpy
import tensorflow as tf
try:
import matplotlib # pylint: disable=g-import-not-at-top
matplotlib.use("TkAgg") # Need Tk for interactive plots.
from matplotlib import pyplot # pylint: disable=g-import-not-at-top
HAS_MATPLOTLIB = True
except ImportError:
# Plotting requires matplotlib, but the unit test running this code may
# execute in an environment without it (i.e. matplotlib is not a build
# dependency). We'd still like to test the TensorFlow-dependent parts of this
# example, namely train_and_predict.
HAS_MATPLOTLIB = False
_MODULE_PATH = path.dirname(__file__)
_DATA_FILE = path.join(_MODULE_PATH, "data/multivariate_level.csv")
def multivariate_train_and_sample(
csv_file_name=_DATA_FILE, export_directory=None, training_steps=500):
"""Trains, evaluates, and exports a multivariate model."""
estimator = tf.contrib.timeseries.StructuralEnsembleRegressor(
periodicities=[], num_features=5)
reader = tf.contrib.timeseries.CSVReader(
csv_file_name,
column_names=((tf.contrib.timeseries.TrainEvalFeatures.TIMES,)
+ (tf.contrib.timeseries.TrainEvalFeatures.VALUES,) * 5))
train_input_fn = tf.contrib.timeseries.RandomWindowInputFn(
# Larger window sizes generally produce a better covariance matrix.
reader, batch_size=4, window_size=64)
estimator.train(input_fn=train_input_fn, steps=training_steps)
evaluation_input_fn = tf.contrib.timeseries.WholeDatasetInputFn(reader)
current_state = estimator.evaluate(input_fn=evaluation_input_fn, steps=1)
values = [current_state["observed"]]
times = [current_state[tf.contrib.timeseries.FilteringResults.TIMES]]
# Export the model so we can do iterative prediction and filtering without
# reloading model checkpoints.
if export_directory is None:
export_directory = tempfile.mkdtemp()
input_receiver_fn = estimator.build_raw_serving_input_receiver_fn()
export_location = estimator.export_savedmodel(
export_directory, input_receiver_fn)
with tf.Graph().as_default():
numpy.random.seed(1) # Make the example a bit more deterministic
with tf.Session() as session:
signatures = tf.saved_model.loader.load(
session, [tf.saved_model.tag_constants.SERVING], export_location)
for _ in range(100):
current_prediction = (
tf.contrib.timeseries.saved_model_utils.predict_continuation(
continue_from=current_state, signatures=signatures,
session=session, steps=1))
next_sample = numpy.random.multivariate_normal(
# Squeeze out the batch and series length dimensions (both 1).
mean=numpy.squeeze(current_prediction["mean"], axis=[0, 1]),
cov=numpy.squeeze(current_prediction["covariance"], axis=[0, 1]))
# Update model state so that future predictions are conditional on the
# value we just sampled.
filtering_features = {
tf.contrib.timeseries.TrainEvalFeatures.TIMES: current_prediction[
tf.contrib.timeseries.FilteringResults.TIMES],
tf.contrib.timeseries.TrainEvalFeatures.VALUES: next_sample[
None, None, :]}
current_state = (
tf.contrib.timeseries.saved_model_utils.filter_continuation(
continue_from=current_state,
session=session,
signatures=signatures,
features=filtering_features))
values.append(next_sample[None, None, :])
times.append(current_state["times"])
all_observations = numpy.squeeze(numpy.concatenate(values, axis=1), axis=0)
all_times = numpy.squeeze(numpy.concatenate(times, axis=1), axis=0)
return all_times, all_observations
def main(unused_argv):
if not HAS_MATPLOTLIB:
raise ImportError(
"Please install matplotlib to generate a plot from this example.")
all_times, all_observations = multivariate_train_and_sample()
# Show where sampling starts on the plot
pyplot.axvline(1000, linestyle="dotted")
pyplot.plot(all_times, all_observations)
pyplot.show()
if __name__ == "__main__":
tf.app.run(main=main)
| apache-2.0 |
kyleabeauchamp/HMCNotes | code/old/test_xhmc_respa_amoeba.py | 1 | 1990 | import lb_loader
import simtk.openmm.app as app
import numpy as np
import pandas as pd
import simtk.openmm as mm
from simtk import unit as u
from openmmtools import hmc_integrators, testsystems
pd.set_option('display.width', 1000)
n_steps = 3000
temperature = 300. * u.kelvin
hydrogenMass = 3.0 * u.amu
system, positions = lb_loader.load_amoeba()
hmc_integrators.guess_force_groups(system, multipole=2)
#system, positions = lb_loader.load_lb(hydrogenMass=hydrogenMass)
#hmc_integrators.guess_force_groups(system, nonbonded=1, fft=1, others=0)
integrator = mm.LangevinIntegrator(temperature, 1.0 / u.picoseconds, 0.25 * u.femtoseconds)
context = mm.Context(system, integrator)
context.setPositions(positions)
context.setVelocitiesToTemperature(temperature)
integrator.step(1000)
positions = context.getState(getPositions=True).getPositions()
state = context.getState(getEnergy=True)
energy = state.getPotentialEnergy() + state.getKineticEnergy()
energy, state.getPotentialEnergy(), state.getKineticEnergy()
collision_rate = 10000.0 / u.picoseconds
groups = [(0, 4), (1, 2), (2, 1)]
timestep = 1.0 * u.femtoseconds
steps_per_hmc = 10
k_max = 6
integrator = hmc_integrators.XHMCRESPAIntegrator(temperature, steps_per_hmc, timestep, collision_rate, k_max, groups)
context = mm.Context(system, integrator)
context.setPositions(positions)
context.setVelocitiesToTemperature(temperature)
integrator.step(500)
data = integrator.vstep(25)
integrator = hmc_integrators.GHMCRESPA(temperature, steps_per_hmc, timestep, collision_rate, groups)
context = mm.Context(system, integrator)
context.setPositions(positions)
context.setVelocitiesToTemperature(temperature)
integrator.step(500)
data = integrator.vstep(25)
integrator = hmc_integrators.GHMCIntegrator(temperature, steps_per_hmc, timestep, collision_rate)
context = mm.Context(system, integrator)
context.setPositions(positions)
context.setVelocitiesToTemperature(temperature)
integrator.step(500)
data = integrator.vstep(25)
| gpl-2.0 |
huobaowangxi/scikit-learn | examples/svm/plot_svm_anova.py | 250 | 2000 | """
=================================================
SVM-Anova: SVM with univariate feature selection
=================================================
This example shows how to perform univariate feature before running a SVC
(support vector classifier) to improve the classification scores.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn import svm, datasets, feature_selection, cross_validation
from sklearn.pipeline import Pipeline
###############################################################################
# Import some data to play with
digits = datasets.load_digits()
y = digits.target
# Throw away data, to be in the curse of dimension settings
y = y[:200]
X = digits.data[:200]
n_samples = len(y)
X = X.reshape((n_samples, -1))
# add 200 non-informative features
X = np.hstack((X, 2 * np.random.random((n_samples, 200))))
###############################################################################
# Create a feature-selection transform and an instance of SVM that we
# combine together to have an full-blown estimator
transform = feature_selection.SelectPercentile(feature_selection.f_classif)
clf = Pipeline([('anova', transform), ('svc', svm.SVC(C=1.0))])
###############################################################################
# Plot the cross-validation score as a function of percentile of features
score_means = list()
score_stds = list()
percentiles = (1, 3, 6, 10, 15, 20, 30, 40, 60, 80, 100)
for percentile in percentiles:
clf.set_params(anova__percentile=percentile)
# Compute cross-validation score using all CPUs
this_scores = cross_validation.cross_val_score(clf, X, y, n_jobs=1)
score_means.append(this_scores.mean())
score_stds.append(this_scores.std())
plt.errorbar(percentiles, score_means, np.array(score_stds))
plt.title(
'Performance of the SVM-Anova varying the percentile of features selected')
plt.xlabel('Percentile')
plt.ylabel('Prediction rate')
plt.axis('tight')
plt.show()
| bsd-3-clause |
alephu5/Soundbyte | environment/lib/python3.3/site-packages/pandas/io/tests/test_date_converters.py | 1 | 5055 | from pandas.compat import StringIO, BytesIO
from datetime import date, datetime
import csv
import os
import sys
import re
import nose
from numpy import nan
import numpy as np
from numpy.testing.decorators import slow
from pandas import DataFrame, Series, Index, isnull
import pandas.io.parsers as parsers
from pandas.io.parsers import (read_csv, read_table, read_fwf,
TextParser)
from pandas.util.testing import (assert_almost_equal, assert_frame_equal,
assert_series_equal, network)
import pandas.lib as lib
from pandas import compat
from pandas.lib import Timestamp
import pandas.io.date_converters as conv
import pandas.util.testing as tm
class TestConverters(tm.TestCase):
def setUp(self):
self.years = np.array([2007, 2008])
self.months = np.array([1, 2])
self.days = np.array([3, 4])
self.hours = np.array([5, 6])
self.minutes = np.array([7, 8])
self.seconds = np.array([9, 0])
self.dates = np.array(['2007/1/3', '2008/2/4'], dtype=object)
self.times = np.array(['05:07:09', '06:08:00'], dtype=object)
self.expected = np.array([datetime(2007, 1, 3, 5, 7, 9),
datetime(2008, 2, 4, 6, 8, 0)])
def test_parse_date_time(self):
result = conv.parse_date_time(self.dates, self.times)
self.assert_((result == self.expected).all())
data = """\
date, time, a, b
2001-01-05, 10:00:00, 0.0, 10.
2001-01-05, 00:00:00, 1., 11.
"""
datecols = {'date_time': [0, 1]}
df = read_table(StringIO(data), sep=',', header=0,
parse_dates=datecols, date_parser=conv.parse_date_time)
self.assert_('date_time' in df)
self.assert_(df.date_time.ix[0] == datetime(2001, 1, 5, 10, 0, 0))
data = ("KORD,19990127, 19:00:00, 18:56:00, 0.8100\n"
"KORD,19990127, 20:00:00, 19:56:00, 0.0100\n"
"KORD,19990127, 21:00:00, 20:56:00, -0.5900\n"
"KORD,19990127, 21:00:00, 21:18:00, -0.9900\n"
"KORD,19990127, 22:00:00, 21:56:00, -0.5900\n"
"KORD,19990127, 23:00:00, 22:56:00, -0.5900")
date_spec = {'nominal': [1, 2], 'actual': [1, 3]}
df = read_csv(StringIO(data), header=None, parse_dates=date_spec,
date_parser=conv.parse_date_time)
def test_parse_date_fields(self):
result = conv.parse_date_fields(self.years, self.months, self.days)
expected = np.array([datetime(2007, 1, 3), datetime(2008, 2, 4)])
self.assert_((result == expected).all())
data = "year, month, day, a\n 2001 , 01 , 10 , 10.\n 2001 , 02 , 1 , 11."
datecols = {'ymd': [0, 1, 2]}
df = read_table(StringIO(data), sep=',', header=0,
parse_dates=datecols,
date_parser=conv.parse_date_fields)
self.assert_('ymd' in df)
self.assert_(df.ymd.ix[0] == datetime(2001, 1, 10))
def test_datetime_six_col(self):
result = conv.parse_all_fields(self.years, self.months, self.days,
self.hours, self.minutes, self.seconds)
self.assert_((result == self.expected).all())
data = """\
year, month, day, hour, minute, second, a, b
2001, 01, 05, 10, 00, 0, 0.0, 10.
2001, 01, 5, 10, 0, 00, 1., 11.
"""
datecols = {'ymdHMS': [0, 1, 2, 3, 4, 5]}
df = read_table(StringIO(data), sep=',', header=0,
parse_dates=datecols,
date_parser=conv.parse_all_fields)
self.assert_('ymdHMS' in df)
self.assert_(df.ymdHMS.ix[0] == datetime(2001, 1, 5, 10, 0, 0))
def test_datetime_fractional_seconds(self):
data = """\
year, month, day, hour, minute, second, a, b
2001, 01, 05, 10, 00, 0.123456, 0.0, 10.
2001, 01, 5, 10, 0, 0.500000, 1., 11.
"""
datecols = {'ymdHMS': [0, 1, 2, 3, 4, 5]}
df = read_table(StringIO(data), sep=',', header=0,
parse_dates=datecols,
date_parser=conv.parse_all_fields)
self.assert_('ymdHMS' in df)
self.assert_(df.ymdHMS.ix[0] == datetime(2001, 1, 5, 10, 0, 0,
microsecond=123456))
self.assert_(df.ymdHMS.ix[1] == datetime(2001, 1, 5, 10, 0, 0,
microsecond=500000))
def test_generic(self):
data = "year, month, day, a\n 2001, 01, 10, 10.\n 2001, 02, 1, 11."
datecols = {'ym': [0, 1]}
dateconverter = lambda y, m: date(year=int(y), month=int(m), day=1)
df = read_table(StringIO(data), sep=',', header=0,
parse_dates=datecols,
date_parser=dateconverter)
self.assert_('ym' in df)
self.assert_(df.ym.ix[0] == date(2001, 1, 1))
if __name__ == '__main__':
import nose
nose.runmodule(argv=[__file__, '-vvs', '-x', '--pdb', '--pdb-failure'],
exit=False)
| gpl-3.0 |
Peratham/tensor-sc | scripts/common_nodes.py | 2 | 1316 | import matplotlib.pyplot as plt
import numpy as np
import sys
data = sys.argv[1]
def num_common(list1, list2):
return len(list1) + len(list2) - len(np.unique(list1 + list2))
def best_num_common(order1, order2, num_nodes):
order1_first = order1[0:num_nodes]
order1_last = order1[-num_nodes:]
order2_first = order2[0:num_nodes]
order2_last = order2[-num_nodes:]
best = max(num_common(order1_first, order2_first),
num_common(order1_first, order2_last),
num_common(order1_last, order2_first),
num_common(order1_last, order2_last))
return float(best) / num_nodes
with open('%s_msc_order.txt' % (data)) as f:
multi = [int(line) for line in f]
with open('%s_dl_order.txt' % (data)) as f:
dirlap = [int(line) for line in f]
with open('%s_lap_order.txt' % (data)) as f:
lap = [int(line) for line in f]
def num_common_curve(order1, order2):
total_nodes = len(multi)
sizes = range(1, total_nodes, total_nodes / 100)
return sizes, [best_num_common(order1, order2, i) for i in sizes]
sizes1, common1 = num_common_curve(multi, dirlap)
sizes2, common2 = num_common_curve(multi, lap)
plt.plot(sizes1, common1, 'b*--',
sizes2, common2, 'r*--')
plt.legend(['multi / dirlap',
'dirlap / lap'])
plt.show()
| bsd-2-clause |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.